]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Fix masking of PC lower bits when doing exception returns
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
1497c961 31
2ef6175a
RH
32#include "exec/helper-proto.h"
33#include "exec/helper-gen.h"
2c0262af 34
a7e30d84 35#include "trace-tcg.h"
508127e2 36#include "exec/log.h"
a7e30d84
LV
37
38
2b51668f
PM
39#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
40#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 41/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 42#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 43#define ENABLE_ARCH_5J 0
2b51668f
PM
44#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
45#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
46#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
47#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
48#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 49
86753403 50#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 51
f570c61e 52#include "translate.h"
e12ce78d 53
b5ff1b31
FB
54#if defined(CONFIG_USER_ONLY)
55#define IS_USER(s) 1
56#else
57#define IS_USER(s) (s->user)
58#endif
59
1bcea73e 60TCGv_env cpu_env;
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
78bcaa3e
RH
64TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65TCGv_i64 cpu_exclusive_addr;
66TCGv_i64 cpu_exclusive_val;
426f5abc 67#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
68TCGv_i64 cpu_exclusive_test;
69TCGv_i32 cpu_exclusive_info;
426f5abc 70#endif
ad69471c 71
b26eefb6 72/* FIXME: These should be removed. */
39d5492a 73static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 74static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
155c3eac
FN
78static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
a7812ae4 87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 88 tcg_ctx.tcg_env = cpu_env;
a7812ae4 89
155c3eac 90 for (i = 0; i < 16; i++) {
e1ccc054 91 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 92 offsetof(CPUARMState, regs[i]),
155c3eac
FN
93 regnames[i]);
94 }
e1ccc054
RH
95 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
96 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
97 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
98 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 99
e1ccc054 100 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 102 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 103 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 104#ifdef CONFIG_USER_ONLY
e1ccc054 105 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 106 offsetof(CPUARMState, exclusive_test), "exclusive_test");
e1ccc054 107 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 108 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 109#endif
155c3eac 110
14ade10f 111 a64_translate_init();
b26eefb6
PB
112}
113
579d21cc
PM
114static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
115{
116 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
117 * insns:
118 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
119 * otherwise, access as if at PL0.
120 */
121 switch (s->mmu_idx) {
122 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
123 case ARMMMUIdx_S12NSE0:
124 case ARMMMUIdx_S12NSE1:
125 return ARMMMUIdx_S12NSE0;
126 case ARMMMUIdx_S1E3:
127 case ARMMMUIdx_S1SE0:
128 case ARMMMUIdx_S1SE1:
129 return ARMMMUIdx_S1SE0;
130 case ARMMMUIdx_S2NS:
131 default:
132 g_assert_not_reached();
133 }
134}
135
39d5492a 136static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 137{
39d5492a 138 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
141}
142
0ecb72a5 143#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 144
39d5492a 145static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
146{
147 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 148 tcg_temp_free_i32(var);
d9ba4830
PB
149}
150
151#define store_cpu_field(var, name) \
0ecb72a5 152 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 153
b26eefb6 154/* Set a variable to the value of a CPU register. */
39d5492a 155static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
156{
157 if (reg == 15) {
158 uint32_t addr;
b90372ad 159 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
155c3eac 166 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
167 }
168}
169
170/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 171static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 172{
39d5492a 173 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
174 load_reg_var(s, tmp, reg);
175 return tmp;
176}
177
178/* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
39d5492a 180static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
181{
182 if (reg == 15) {
9b6a3ea7
PM
183 /* In Thumb mode, we must ignore bit 0.
184 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
185 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
186 * We choose to ignore [1:0] in ARM mode for all architecture versions.
187 */
188 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
b26eefb6
PB
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
39d5492a 205static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 206{
39d5492a 207 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
d4a2dc67 214static void gen_exception_internal(int excp)
d9ba4830 215{
d4a2dc67
PM
216 TCGv_i32 tcg_excp = tcg_const_i32(excp);
217
218 assert(excp_is_internal(excp));
219 gen_helper_exception_internal(cpu_env, tcg_excp);
220 tcg_temp_free_i32(tcg_excp);
221}
222
73710361 223static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
224{
225 TCGv_i32 tcg_excp = tcg_const_i32(excp);
226 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 227 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 228
73710361
GB
229 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
230 tcg_syn, tcg_el);
231
232 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
233 tcg_temp_free_i32(tcg_syn);
234 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
235}
236
50225ad0
PM
237static void gen_ss_advance(DisasContext *s)
238{
239 /* If the singlestep state is Active-not-pending, advance to
240 * Active-pending.
241 */
242 if (s->ss_active) {
243 s->pstate_ss = 0;
244 gen_helper_clear_pstate_ss(cpu_env);
245 }
246}
247
248static void gen_step_complete_exception(DisasContext *s)
249{
250 /* We just completed step of an insn. Move from Active-not-pending
251 * to Active-pending, and then also take the swstep exception.
252 * This corresponds to making the (IMPDEF) choice to prioritize
253 * swstep exceptions over asynchronous exceptions taken to an exception
254 * level where debug is disabled. This choice has the advantage that
255 * we do not need to maintain internal state corresponding to the
256 * ISV/EX syndrome bits between completion of the step and generation
257 * of the exception, and our syndrome information is always correct.
258 */
259 gen_ss_advance(s);
73710361
GB
260 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
261 default_exception_el(s));
50225ad0
PM
262 s->is_jmp = DISAS_EXC;
263}
264
39d5492a 265static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 266{
39d5492a
PM
267 TCGv_i32 tmp1 = tcg_temp_new_i32();
268 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
269 tcg_gen_ext16s_i32(tmp1, a);
270 tcg_gen_ext16s_i32(tmp2, b);
3670669c 271 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 272 tcg_temp_free_i32(tmp2);
3670669c
PB
273 tcg_gen_sari_i32(a, a, 16);
274 tcg_gen_sari_i32(b, b, 16);
275 tcg_gen_mul_i32(b, b, a);
276 tcg_gen_mov_i32(a, tmp1);
7d1b0095 277 tcg_temp_free_i32(tmp1);
3670669c
PB
278}
279
280/* Byteswap each halfword. */
39d5492a 281static void gen_rev16(TCGv_i32 var)
3670669c 282{
39d5492a 283 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
284 tcg_gen_shri_i32(tmp, var, 8);
285 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
286 tcg_gen_shli_i32(var, var, 8);
287 tcg_gen_andi_i32(var, var, 0xff00ff00);
288 tcg_gen_or_i32(var, var, tmp);
7d1b0095 289 tcg_temp_free_i32(tmp);
3670669c
PB
290}
291
292/* Byteswap low halfword and sign extend. */
39d5492a 293static void gen_revsh(TCGv_i32 var)
3670669c 294{
1a855029
AJ
295 tcg_gen_ext16u_i32(var, var);
296 tcg_gen_bswap16_i32(var, var);
297 tcg_gen_ext16s_i32(var, var);
3670669c
PB
298}
299
300/* Unsigned bitfield extract. */
39d5492a 301static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
302{
303 if (shift)
304 tcg_gen_shri_i32(var, var, shift);
305 tcg_gen_andi_i32(var, var, mask);
306}
307
308/* Signed bitfield extract. */
39d5492a 309static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
310{
311 uint32_t signbit;
312
313 if (shift)
314 tcg_gen_sari_i32(var, var, shift);
315 if (shift + width < 32) {
316 signbit = 1u << (width - 1);
317 tcg_gen_andi_i32(var, var, (1u << width) - 1);
318 tcg_gen_xori_i32(var, var, signbit);
319 tcg_gen_subi_i32(var, var, signbit);
320 }
321}
322
838fa72d 323/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 324static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 325{
838fa72d
AJ
326 TCGv_i64 tmp64 = tcg_temp_new_i64();
327
328 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 329 tcg_temp_free_i32(b);
838fa72d
AJ
330 tcg_gen_shli_i64(tmp64, tmp64, 32);
331 tcg_gen_add_i64(a, tmp64, a);
332
333 tcg_temp_free_i64(tmp64);
334 return a;
335}
336
337/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 338static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
339{
340 TCGv_i64 tmp64 = tcg_temp_new_i64();
341
342 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 343 tcg_temp_free_i32(b);
838fa72d
AJ
344 tcg_gen_shli_i64(tmp64, tmp64, 32);
345 tcg_gen_sub_i64(a, tmp64, a);
346
347 tcg_temp_free_i64(tmp64);
348 return a;
3670669c
PB
349}
350
5e3f878a 351/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 352static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 353{
39d5492a
PM
354 TCGv_i32 lo = tcg_temp_new_i32();
355 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 356 TCGv_i64 ret;
5e3f878a 357
831d7fe8 358 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 359 tcg_temp_free_i32(a);
7d1b0095 360 tcg_temp_free_i32(b);
831d7fe8
RH
361
362 ret = tcg_temp_new_i64();
363 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
364 tcg_temp_free_i32(lo);
365 tcg_temp_free_i32(hi);
831d7fe8
RH
366
367 return ret;
5e3f878a
PB
368}
369
39d5492a 370static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 371{
39d5492a
PM
372 TCGv_i32 lo = tcg_temp_new_i32();
373 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 374 TCGv_i64 ret;
5e3f878a 375
831d7fe8 376 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 377 tcg_temp_free_i32(a);
7d1b0095 378 tcg_temp_free_i32(b);
831d7fe8
RH
379
380 ret = tcg_temp_new_i64();
381 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
382 tcg_temp_free_i32(lo);
383 tcg_temp_free_i32(hi);
831d7fe8
RH
384
385 return ret;
5e3f878a
PB
386}
387
8f01245e 388/* Swap low and high halfwords. */
39d5492a 389static void gen_swap_half(TCGv_i32 var)
8f01245e 390{
39d5492a 391 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
392 tcg_gen_shri_i32(tmp, var, 16);
393 tcg_gen_shli_i32(var, var, 16);
394 tcg_gen_or_i32(var, var, tmp);
7d1b0095 395 tcg_temp_free_i32(tmp);
8f01245e
PB
396}
397
b26eefb6
PB
398/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
399 tmp = (t0 ^ t1) & 0x8000;
400 t0 &= ~0x8000;
401 t1 &= ~0x8000;
402 t0 = (t0 + t1) ^ tmp;
403 */
404
39d5492a 405static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 406{
39d5492a 407 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
408 tcg_gen_xor_i32(tmp, t0, t1);
409 tcg_gen_andi_i32(tmp, tmp, 0x8000);
410 tcg_gen_andi_i32(t0, t0, ~0x8000);
411 tcg_gen_andi_i32(t1, t1, ~0x8000);
412 tcg_gen_add_i32(t0, t0, t1);
413 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
414 tcg_temp_free_i32(tmp);
415 tcg_temp_free_i32(t1);
b26eefb6
PB
416}
417
418/* Set CF to the top bit of var. */
39d5492a 419static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 420{
66c374de 421 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
422}
423
424/* Set N and Z flags from var. */
39d5492a 425static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 426{
66c374de
AJ
427 tcg_gen_mov_i32(cpu_NF, var);
428 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
429}
430
431/* T0 += T1 + CF. */
39d5492a 432static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 433{
396e467c 434 tcg_gen_add_i32(t0, t0, t1);
66c374de 435 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
436}
437
e9bb4aa9 438/* dest = T0 + T1 + CF. */
39d5492a 439static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 440{
e9bb4aa9 441 tcg_gen_add_i32(dest, t0, t1);
66c374de 442 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
443}
444
3670669c 445/* dest = T0 - T1 + CF - 1. */
39d5492a 446static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 447{
3670669c 448 tcg_gen_sub_i32(dest, t0, t1);
66c374de 449 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 450 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
451}
452
72485ec4 453/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 454static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 455{
39d5492a 456 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
457 tcg_gen_movi_i32(tmp, 0);
458 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 459 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 460 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
465}
466
49b4c31e 467/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 468static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 469{
39d5492a 470 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
471 if (TCG_TARGET_HAS_add2_i32) {
472 tcg_gen_movi_i32(tmp, 0);
473 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 474 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
475 } else {
476 TCGv_i64 q0 = tcg_temp_new_i64();
477 TCGv_i64 q1 = tcg_temp_new_i64();
478 tcg_gen_extu_i32_i64(q0, t0);
479 tcg_gen_extu_i32_i64(q1, t1);
480 tcg_gen_add_i64(q0, q0, q1);
481 tcg_gen_extu_i32_i64(q1, cpu_CF);
482 tcg_gen_add_i64(q0, q0, q1);
483 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
484 tcg_temp_free_i64(q0);
485 tcg_temp_free_i64(q1);
486 }
487 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
488 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
489 tcg_gen_xor_i32(tmp, t0, t1);
490 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
491 tcg_temp_free_i32(tmp);
492 tcg_gen_mov_i32(dest, cpu_NF);
493}
494
72485ec4 495/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 496static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 497{
39d5492a 498 TCGv_i32 tmp;
72485ec4
AJ
499 tcg_gen_sub_i32(cpu_NF, t0, t1);
500 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
501 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
502 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
503 tmp = tcg_temp_new_i32();
504 tcg_gen_xor_i32(tmp, t0, t1);
505 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
506 tcg_temp_free_i32(tmp);
507 tcg_gen_mov_i32(dest, cpu_NF);
508}
509
e77f0832 510/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 511static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 512{
39d5492a 513 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
514 tcg_gen_not_i32(tmp, t1);
515 gen_adc_CC(dest, t0, tmp);
39d5492a 516 tcg_temp_free_i32(tmp);
2de68a49
RH
517}
518
365af80e 519#define GEN_SHIFT(name) \
39d5492a 520static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 521{ \
39d5492a 522 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
523 tmp1 = tcg_temp_new_i32(); \
524 tcg_gen_andi_i32(tmp1, t1, 0xff); \
525 tmp2 = tcg_const_i32(0); \
526 tmp3 = tcg_const_i32(0x1f); \
527 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
528 tcg_temp_free_i32(tmp3); \
529 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
530 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
531 tcg_temp_free_i32(tmp2); \
532 tcg_temp_free_i32(tmp1); \
533}
534GEN_SHIFT(shl)
535GEN_SHIFT(shr)
536#undef GEN_SHIFT
537
39d5492a 538static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 539{
39d5492a 540 TCGv_i32 tmp1, tmp2;
365af80e
AJ
541 tmp1 = tcg_temp_new_i32();
542 tcg_gen_andi_i32(tmp1, t1, 0xff);
543 tmp2 = tcg_const_i32(0x1f);
544 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
545 tcg_temp_free_i32(tmp2);
546 tcg_gen_sar_i32(dest, t0, tmp1);
547 tcg_temp_free_i32(tmp1);
548}
549
39d5492a 550static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 551{
39d5492a
PM
552 TCGv_i32 c0 = tcg_const_i32(0);
553 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
554 tcg_gen_neg_i32(tmp, src);
555 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
556 tcg_temp_free_i32(c0);
557 tcg_temp_free_i32(tmp);
558}
ad69471c 559
39d5492a 560static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 561{
9a119ff6 562 if (shift == 0) {
66c374de 563 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 564 } else {
66c374de
AJ
565 tcg_gen_shri_i32(cpu_CF, var, shift);
566 if (shift != 31) {
567 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
568 }
9a119ff6 569 }
9a119ff6 570}
b26eefb6 571
9a119ff6 572/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
573static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
574 int shift, int flags)
9a119ff6
PB
575{
576 switch (shiftop) {
577 case 0: /* LSL */
578 if (shift != 0) {
579 if (flags)
580 shifter_out_im(var, 32 - shift);
581 tcg_gen_shli_i32(var, var, shift);
582 }
583 break;
584 case 1: /* LSR */
585 if (shift == 0) {
586 if (flags) {
66c374de 587 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
588 }
589 tcg_gen_movi_i32(var, 0);
590 } else {
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 tcg_gen_shri_i32(var, var, shift);
594 }
595 break;
596 case 2: /* ASR */
597 if (shift == 0)
598 shift = 32;
599 if (flags)
600 shifter_out_im(var, shift - 1);
601 if (shift == 32)
602 shift = 31;
603 tcg_gen_sari_i32(var, var, shift);
604 break;
605 case 3: /* ROR/RRX */
606 if (shift != 0) {
607 if (flags)
608 shifter_out_im(var, shift - 1);
f669df27 609 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 610 } else {
39d5492a 611 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 612 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
613 if (flags)
614 shifter_out_im(var, 0);
615 tcg_gen_shri_i32(var, var, 1);
b26eefb6 616 tcg_gen_or_i32(var, var, tmp);
7d1b0095 617 tcg_temp_free_i32(tmp);
b26eefb6
PB
618 }
619 }
620};
621
39d5492a
PM
622static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
623 TCGv_i32 shift, int flags)
8984bd2e
PB
624{
625 if (flags) {
626 switch (shiftop) {
9ef39277
BS
627 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
628 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
629 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
630 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
631 }
632 } else {
633 switch (shiftop) {
365af80e
AJ
634 case 0:
635 gen_shl(var, var, shift);
636 break;
637 case 1:
638 gen_shr(var, var, shift);
639 break;
640 case 2:
641 gen_sar(var, var, shift);
642 break;
f669df27
AJ
643 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
644 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
645 }
646 }
7d1b0095 647 tcg_temp_free_i32(shift);
8984bd2e
PB
648}
649
6ddbc6e4
PB
650#define PAS_OP(pfx) \
651 switch (op2) { \
652 case 0: gen_pas_helper(glue(pfx,add16)); break; \
653 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
654 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
655 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
656 case 4: gen_pas_helper(glue(pfx,add8)); break; \
657 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
658 }
39d5492a 659static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 660{
a7812ae4 661 TCGv_ptr tmp;
6ddbc6e4
PB
662
663 switch (op1) {
664#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
665 case 1:
a7812ae4 666 tmp = tcg_temp_new_ptr();
0ecb72a5 667 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 668 PAS_OP(s)
b75263d6 669 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
670 break;
671 case 5:
a7812ae4 672 tmp = tcg_temp_new_ptr();
0ecb72a5 673 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 674 PAS_OP(u)
b75263d6 675 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
676 break;
677#undef gen_pas_helper
678#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
679 case 2:
680 PAS_OP(q);
681 break;
682 case 3:
683 PAS_OP(sh);
684 break;
685 case 6:
686 PAS_OP(uq);
687 break;
688 case 7:
689 PAS_OP(uh);
690 break;
691#undef gen_pas_helper
692 }
693}
9ee6e8bb
PB
694#undef PAS_OP
695
6ddbc6e4
PB
696/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
697#define PAS_OP(pfx) \
ed89a2f1 698 switch (op1) { \
6ddbc6e4
PB
699 case 0: gen_pas_helper(glue(pfx,add8)); break; \
700 case 1: gen_pas_helper(glue(pfx,add16)); break; \
701 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
702 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
703 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
704 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
705 }
39d5492a 706static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 707{
a7812ae4 708 TCGv_ptr tmp;
6ddbc6e4 709
ed89a2f1 710 switch (op2) {
6ddbc6e4
PB
711#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
712 case 0:
a7812ae4 713 tmp = tcg_temp_new_ptr();
0ecb72a5 714 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 715 PAS_OP(s)
b75263d6 716 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
717 break;
718 case 4:
a7812ae4 719 tmp = tcg_temp_new_ptr();
0ecb72a5 720 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 721 PAS_OP(u)
b75263d6 722 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
723 break;
724#undef gen_pas_helper
725#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
726 case 1:
727 PAS_OP(q);
728 break;
729 case 2:
730 PAS_OP(sh);
731 break;
732 case 5:
733 PAS_OP(uq);
734 break;
735 case 6:
736 PAS_OP(uh);
737 break;
738#undef gen_pas_helper
739 }
740}
9ee6e8bb
PB
741#undef PAS_OP
742
39fb730a 743/*
6c2c63d3 744 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
745 * This is common between ARM and Aarch64 targets.
746 */
6c2c63d3 747void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 748{
6c2c63d3
RH
749 TCGv_i32 value;
750 TCGCond cond;
751 bool global = true;
d9ba4830 752
d9ba4830
PB
753 switch (cc) {
754 case 0: /* eq: Z */
d9ba4830 755 case 1: /* ne: !Z */
6c2c63d3
RH
756 cond = TCG_COND_EQ;
757 value = cpu_ZF;
d9ba4830 758 break;
6c2c63d3 759
d9ba4830 760 case 2: /* cs: C */
d9ba4830 761 case 3: /* cc: !C */
6c2c63d3
RH
762 cond = TCG_COND_NE;
763 value = cpu_CF;
d9ba4830 764 break;
6c2c63d3 765
d9ba4830 766 case 4: /* mi: N */
d9ba4830 767 case 5: /* pl: !N */
6c2c63d3
RH
768 cond = TCG_COND_LT;
769 value = cpu_NF;
d9ba4830 770 break;
6c2c63d3 771
d9ba4830 772 case 6: /* vs: V */
d9ba4830 773 case 7: /* vc: !V */
6c2c63d3
RH
774 cond = TCG_COND_LT;
775 value = cpu_VF;
d9ba4830 776 break;
6c2c63d3 777
d9ba4830 778 case 8: /* hi: C && !Z */
6c2c63d3
RH
779 case 9: /* ls: !C || Z -> !(C && !Z) */
780 cond = TCG_COND_NE;
781 value = tcg_temp_new_i32();
782 global = false;
783 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
784 ZF is non-zero for !Z; so AND the two subexpressions. */
785 tcg_gen_neg_i32(value, cpu_CF);
786 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 787 break;
6c2c63d3 788
d9ba4830 789 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 790 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
791 /* Since we're only interested in the sign bit, == 0 is >= 0. */
792 cond = TCG_COND_GE;
793 value = tcg_temp_new_i32();
794 global = false;
795 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 796 break;
6c2c63d3 797
d9ba4830 798 case 12: /* gt: !Z && N == V */
d9ba4830 799 case 13: /* le: Z || N != V */
6c2c63d3
RH
800 cond = TCG_COND_NE;
801 value = tcg_temp_new_i32();
802 global = false;
803 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
804 * the sign bit then AND with ZF to yield the result. */
805 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
806 tcg_gen_sari_i32(value, value, 31);
807 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 808 break;
6c2c63d3 809
9305eac0
RH
810 case 14: /* always */
811 case 15: /* always */
812 /* Use the ALWAYS condition, which will fold early.
813 * It doesn't matter what we use for the value. */
814 cond = TCG_COND_ALWAYS;
815 value = cpu_ZF;
816 goto no_invert;
817
d9ba4830
PB
818 default:
819 fprintf(stderr, "Bad condition code 0x%x\n", cc);
820 abort();
821 }
6c2c63d3
RH
822
823 if (cc & 1) {
824 cond = tcg_invert_cond(cond);
825 }
826
9305eac0 827 no_invert:
6c2c63d3
RH
828 cmp->cond = cond;
829 cmp->value = value;
830 cmp->value_global = global;
831}
832
833void arm_free_cc(DisasCompare *cmp)
834{
835 if (!cmp->value_global) {
836 tcg_temp_free_i32(cmp->value);
837 }
838}
839
840void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
841{
842 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
843}
844
845void arm_gen_test_cc(int cc, TCGLabel *label)
846{
847 DisasCompare cmp;
848 arm_test_cc(&cmp, cc);
849 arm_jump_cc(&cmp, label);
850 arm_free_cc(&cmp);
d9ba4830 851}
2c0262af 852
b1d8e52e 853static const uint8_t table_logic_cc[16] = {
2c0262af
FB
854 1, /* and */
855 1, /* xor */
856 0, /* sub */
857 0, /* rsb */
858 0, /* add */
859 0, /* adc */
860 0, /* sbc */
861 0, /* rsc */
862 1, /* andl */
863 1, /* xorl */
864 0, /* cmp */
865 0, /* cmn */
866 1, /* orr */
867 1, /* mov */
868 1, /* bic */
869 1, /* mvn */
870};
3b46e624 871
d9ba4830
PB
872/* Set PC and Thumb state from an immediate address. */
873static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 874{
39d5492a 875 TCGv_i32 tmp;
99c475ab 876
577bf808 877 s->is_jmp = DISAS_JUMP;
d9ba4830 878 if (s->thumb != (addr & 1)) {
7d1b0095 879 tmp = tcg_temp_new_i32();
d9ba4830 880 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 881 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 882 tcg_temp_free_i32(tmp);
d9ba4830 883 }
155c3eac 884 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
885}
886
887/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 888static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 889{
577bf808 890 s->is_jmp = DISAS_JUMP;
155c3eac
FN
891 tcg_gen_andi_i32(cpu_R[15], var, ~1);
892 tcg_gen_andi_i32(var, var, 1);
893 store_cpu_field(var, thumb);
d9ba4830
PB
894}
895
21aeb343
JR
896/* Variant of store_reg which uses branch&exchange logic when storing
897 to r15 in ARM architecture v7 and above. The source must be a temporary
898 and will be marked as dead. */
7dcc1f89 899static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
900{
901 if (reg == 15 && ENABLE_ARCH_7) {
902 gen_bx(s, var);
903 } else {
904 store_reg(s, reg, var);
905 }
906}
907
be5e7a76
DES
908/* Variant of store_reg which uses branch&exchange logic when storing
909 * to r15 in ARM architecture v5T and above. This is used for storing
910 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
911 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 912static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
913{
914 if (reg == 15 && ENABLE_ARCH_5) {
915 gen_bx(s, var);
916 } else {
917 store_reg(s, reg, var);
918 }
919}
920
e334bd31
PB
921#ifdef CONFIG_USER_ONLY
922#define IS_USER_ONLY 1
923#else
924#define IS_USER_ONLY 0
925#endif
926
08307563
PM
927/* Abstractions of "generate code to do a guest load/store for
928 * AArch32", where a vaddr is always 32 bits (and is zero
929 * extended if we're a 64 bit core) and data is also
930 * 32 bits unless specifically doing a 64 bit access.
931 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 932 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
933 */
934#if TARGET_LONG_BITS == 32
935
e334bd31 936#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
937static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
938 TCGv_i32 addr, int index) \
08307563 939{ \
dacf0a2f 940 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
941 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
942 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
943 TCGv addr_be = tcg_temp_new(); \
944 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
945 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
946 tcg_temp_free(addr_be); \
947 return; \
948 } \
dacf0a2f 949 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
08307563
PM
950}
951
e334bd31 952#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
953static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
954 TCGv_i32 addr, int index) \
08307563 955{ \
dacf0a2f 956 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
957 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
958 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
959 TCGv addr_be = tcg_temp_new(); \
960 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
961 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
962 tcg_temp_free(addr_be); \
963 return; \
964 } \
dacf0a2f 965 tcg_gen_qemu_st_i32(val, addr, index, opc); \
08307563
PM
966}
967
12dcc321
PB
968static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
969 TCGv_i32 addr, int index)
08307563 970{
dacf0a2f
PB
971 TCGMemOp opc = MO_Q | s->be_data;
972 tcg_gen_qemu_ld_i64(val, addr, index, opc);
e334bd31
PB
973 /* Not needed for user-mode BE32, where we use MO_BE instead. */
974 if (!IS_USER_ONLY && s->sctlr_b) {
975 tcg_gen_rotri_i64(val, val, 32);
976 }
08307563
PM
977}
978
12dcc321
PB
979static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
980 TCGv_i32 addr, int index)
08307563 981{
dacf0a2f 982 TCGMemOp opc = MO_Q | s->be_data;
e334bd31
PB
983 /* Not needed for user-mode BE32, where we use MO_BE instead. */
984 if (!IS_USER_ONLY && s->sctlr_b) {
985 TCGv_i64 tmp = tcg_temp_new_i64();
986 tcg_gen_rotri_i64(tmp, val, 32);
987 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
988 tcg_temp_free_i64(tmp);
989 return;
990 }
dacf0a2f 991 tcg_gen_qemu_st_i64(val, addr, index, opc);
08307563
PM
992}
993
994#else
995
e334bd31 996#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
997static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
998 TCGv_i32 addr, int index) \
08307563 999{ \
dacf0a2f 1000 TCGMemOp opc = (OPC) | s->be_data; \
08307563 1001 TCGv addr64 = tcg_temp_new(); \
08307563 1002 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
1003 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1004 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1005 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1006 } \
dacf0a2f 1007 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
08307563 1008 tcg_temp_free(addr64); \
08307563
PM
1009}
1010
e334bd31 1011#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
1012static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1013 TCGv_i32 addr, int index) \
08307563 1014{ \
dacf0a2f 1015 TCGMemOp opc = (OPC) | s->be_data; \
08307563 1016 TCGv addr64 = tcg_temp_new(); \
08307563 1017 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
1018 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1019 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1020 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1021 } \
dacf0a2f 1022 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
08307563 1023 tcg_temp_free(addr64); \
08307563
PM
1024}
1025
12dcc321
PB
1026static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1027 TCGv_i32 addr, int index)
08307563 1028{
dacf0a2f 1029 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1030 TCGv addr64 = tcg_temp_new();
1031 tcg_gen_extu_i32_i64(addr64, addr);
dacf0a2f 1032 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
e334bd31
PB
1033
1034 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1035 if (!IS_USER_ONLY && s->sctlr_b) {
1036 tcg_gen_rotri_i64(val, val, 32);
1037 }
08307563
PM
1038 tcg_temp_free(addr64);
1039}
1040
12dcc321
PB
1041static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1042 TCGv_i32 addr, int index)
08307563 1043{
dacf0a2f 1044 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1045 TCGv addr64 = tcg_temp_new();
1046 tcg_gen_extu_i32_i64(addr64, addr);
e334bd31
PB
1047
1048 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1049 if (!IS_USER_ONLY && s->sctlr_b) {
1050 TCGv tmp = tcg_temp_new();
1051 tcg_gen_rotri_i64(tmp, val, 32);
1052 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1053 tcg_temp_free(tmp);
1054 } else {
1055 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1056 }
08307563
PM
1057 tcg_temp_free(addr64);
1058}
1059
1060#endif
1061
e334bd31
PB
1062DO_GEN_LD(8s, MO_SB, 3)
1063DO_GEN_LD(8u, MO_UB, 3)
1064DO_GEN_LD(16s, MO_SW, 2)
1065DO_GEN_LD(16u, MO_UW, 2)
1066DO_GEN_LD(32u, MO_UL, 0)
30901475 1067/* 'a' variants include an alignment check */
e334bd31
PB
1068DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1069DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1070DO_GEN_ST(8, MO_UB, 3)
1071DO_GEN_ST(16, MO_UW, 2)
1072DO_GEN_ST(32, MO_UL, 0)
08307563 1073
eaed129d 1074static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 1075{
40f860cd 1076 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
1077}
1078
37e6456e
PM
1079static inline void gen_hvc(DisasContext *s, int imm16)
1080{
1081 /* The pre HVC helper handles cases when HVC gets trapped
1082 * as an undefined insn by runtime configuration (ie before
1083 * the insn really executes).
1084 */
1085 gen_set_pc_im(s, s->pc - 4);
1086 gen_helper_pre_hvc(cpu_env);
1087 /* Otherwise we will treat this as a real exception which
1088 * happens after execution of the insn. (The distinction matters
1089 * for the PC value reported to the exception handler and also
1090 * for single stepping.)
1091 */
1092 s->svc_imm = imm16;
1093 gen_set_pc_im(s, s->pc);
1094 s->is_jmp = DISAS_HVC;
1095}
1096
1097static inline void gen_smc(DisasContext *s)
1098{
1099 /* As with HVC, we may take an exception either before or after
1100 * the insn executes.
1101 */
1102 TCGv_i32 tmp;
1103
1104 gen_set_pc_im(s, s->pc - 4);
1105 tmp = tcg_const_i32(syn_aa32_smc());
1106 gen_helper_pre_smc(cpu_env, tmp);
1107 tcg_temp_free_i32(tmp);
1108 gen_set_pc_im(s, s->pc);
1109 s->is_jmp = DISAS_SMC;
1110}
1111
d4a2dc67
PM
1112static inline void
1113gen_set_condexec (DisasContext *s)
1114{
1115 if (s->condexec_mask) {
1116 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1117 TCGv_i32 tmp = tcg_temp_new_i32();
1118 tcg_gen_movi_i32(tmp, val);
1119 store_cpu_field(tmp, condexec_bits);
1120 }
1121}
1122
1123static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1124{
1125 gen_set_condexec(s);
1126 gen_set_pc_im(s, s->pc - offset);
1127 gen_exception_internal(excp);
1128 s->is_jmp = DISAS_JUMP;
1129}
1130
73710361
GB
1131static void gen_exception_insn(DisasContext *s, int offset, int excp,
1132 int syn, uint32_t target_el)
d4a2dc67
PM
1133{
1134 gen_set_condexec(s);
1135 gen_set_pc_im(s, s->pc - offset);
73710361 1136 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1137 s->is_jmp = DISAS_JUMP;
1138}
1139
b5ff1b31
FB
1140/* Force a TB lookup after an instruction that changes the CPU state. */
1141static inline void gen_lookup_tb(DisasContext *s)
1142{
a6445c52 1143 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1144 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1145}
1146
b0109805 1147static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1148 TCGv_i32 var)
2c0262af 1149{
1e8d4eec 1150 int val, rm, shift, shiftop;
39d5492a 1151 TCGv_i32 offset;
2c0262af
FB
1152
1153 if (!(insn & (1 << 25))) {
1154 /* immediate */
1155 val = insn & 0xfff;
1156 if (!(insn & (1 << 23)))
1157 val = -val;
537730b9 1158 if (val != 0)
b0109805 1159 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1160 } else {
1161 /* shift/register */
1162 rm = (insn) & 0xf;
1163 shift = (insn >> 7) & 0x1f;
1e8d4eec 1164 shiftop = (insn >> 5) & 3;
b26eefb6 1165 offset = load_reg(s, rm);
9a119ff6 1166 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1167 if (!(insn & (1 << 23)))
b0109805 1168 tcg_gen_sub_i32(var, var, offset);
2c0262af 1169 else
b0109805 1170 tcg_gen_add_i32(var, var, offset);
7d1b0095 1171 tcg_temp_free_i32(offset);
2c0262af
FB
1172 }
1173}
1174
191f9a93 1175static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1176 int extra, TCGv_i32 var)
2c0262af
FB
1177{
1178 int val, rm;
39d5492a 1179 TCGv_i32 offset;
3b46e624 1180
2c0262af
FB
1181 if (insn & (1 << 22)) {
1182 /* immediate */
1183 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1184 if (!(insn & (1 << 23)))
1185 val = -val;
18acad92 1186 val += extra;
537730b9 1187 if (val != 0)
b0109805 1188 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1189 } else {
1190 /* register */
191f9a93 1191 if (extra)
b0109805 1192 tcg_gen_addi_i32(var, var, extra);
2c0262af 1193 rm = (insn) & 0xf;
b26eefb6 1194 offset = load_reg(s, rm);
2c0262af 1195 if (!(insn & (1 << 23)))
b0109805 1196 tcg_gen_sub_i32(var, var, offset);
2c0262af 1197 else
b0109805 1198 tcg_gen_add_i32(var, var, offset);
7d1b0095 1199 tcg_temp_free_i32(offset);
2c0262af
FB
1200 }
1201}
1202
5aaebd13
PM
1203static TCGv_ptr get_fpstatus_ptr(int neon)
1204{
1205 TCGv_ptr statusptr = tcg_temp_new_ptr();
1206 int offset;
1207 if (neon) {
0ecb72a5 1208 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1209 } else {
0ecb72a5 1210 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1211 }
1212 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1213 return statusptr;
1214}
1215
4373f3ce
PB
1216#define VFP_OP2(name) \
1217static inline void gen_vfp_##name(int dp) \
1218{ \
ae1857ec
PM
1219 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1220 if (dp) { \
1221 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1222 } else { \
1223 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1224 } \
1225 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1226}
1227
4373f3ce
PB
1228VFP_OP2(add)
1229VFP_OP2(sub)
1230VFP_OP2(mul)
1231VFP_OP2(div)
1232
1233#undef VFP_OP2
1234
605a6aed
PM
1235static inline void gen_vfp_F1_mul(int dp)
1236{
1237 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1238 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1239 if (dp) {
ae1857ec 1240 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1241 } else {
ae1857ec 1242 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1243 }
ae1857ec 1244 tcg_temp_free_ptr(fpst);
605a6aed
PM
1245}
1246
1247static inline void gen_vfp_F1_neg(int dp)
1248{
1249 /* Like gen_vfp_neg() but put result in F1 */
1250 if (dp) {
1251 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1252 } else {
1253 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1254 }
1255}
1256
4373f3ce
PB
1257static inline void gen_vfp_abs(int dp)
1258{
1259 if (dp)
1260 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1261 else
1262 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1263}
1264
1265static inline void gen_vfp_neg(int dp)
1266{
1267 if (dp)
1268 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1269 else
1270 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1271}
1272
1273static inline void gen_vfp_sqrt(int dp)
1274{
1275 if (dp)
1276 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1277 else
1278 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1279}
1280
1281static inline void gen_vfp_cmp(int dp)
1282{
1283 if (dp)
1284 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1285 else
1286 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1287}
1288
1289static inline void gen_vfp_cmpe(int dp)
1290{
1291 if (dp)
1292 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1293 else
1294 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1295}
1296
1297static inline void gen_vfp_F1_ld0(int dp)
1298{
1299 if (dp)
5b340b51 1300 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1301 else
5b340b51 1302 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1303}
1304
5500b06c
PM
1305#define VFP_GEN_ITOF(name) \
1306static inline void gen_vfp_##name(int dp, int neon) \
1307{ \
5aaebd13 1308 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1309 if (dp) { \
1310 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1311 } else { \
1312 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1313 } \
b7fa9214 1314 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1315}
1316
5500b06c
PM
1317VFP_GEN_ITOF(uito)
1318VFP_GEN_ITOF(sito)
1319#undef VFP_GEN_ITOF
4373f3ce 1320
5500b06c
PM
1321#define VFP_GEN_FTOI(name) \
1322static inline void gen_vfp_##name(int dp, int neon) \
1323{ \
5aaebd13 1324 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1325 if (dp) { \
1326 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1327 } else { \
1328 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1329 } \
b7fa9214 1330 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1331}
1332
5500b06c
PM
1333VFP_GEN_FTOI(toui)
1334VFP_GEN_FTOI(touiz)
1335VFP_GEN_FTOI(tosi)
1336VFP_GEN_FTOI(tosiz)
1337#undef VFP_GEN_FTOI
4373f3ce 1338
16d5b3ca 1339#define VFP_GEN_FIX(name, round) \
5500b06c 1340static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1341{ \
39d5492a 1342 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1343 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1344 if (dp) { \
16d5b3ca
WN
1345 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1346 statusptr); \
5500b06c 1347 } else { \
16d5b3ca
WN
1348 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1349 statusptr); \
5500b06c 1350 } \
b75263d6 1351 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1352 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1353}
16d5b3ca
WN
1354VFP_GEN_FIX(tosh, _round_to_zero)
1355VFP_GEN_FIX(tosl, _round_to_zero)
1356VFP_GEN_FIX(touh, _round_to_zero)
1357VFP_GEN_FIX(toul, _round_to_zero)
1358VFP_GEN_FIX(shto, )
1359VFP_GEN_FIX(slto, )
1360VFP_GEN_FIX(uhto, )
1361VFP_GEN_FIX(ulto, )
4373f3ce 1362#undef VFP_GEN_FIX
9ee6e8bb 1363
39d5492a 1364static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1365{
08307563 1366 if (dp) {
12dcc321 1367 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1368 } else {
12dcc321 1369 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1370 }
b5ff1b31
FB
1371}
1372
39d5492a 1373static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1374{
08307563 1375 if (dp) {
12dcc321 1376 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1377 } else {
12dcc321 1378 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1379 }
b5ff1b31
FB
1380}
1381
8e96005d
FB
1382static inline long
1383vfp_reg_offset (int dp, int reg)
1384{
1385 if (dp)
1386 return offsetof(CPUARMState, vfp.regs[reg]);
1387 else if (reg & 1) {
1388 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1389 + offsetof(CPU_DoubleU, l.upper);
1390 } else {
1391 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1392 + offsetof(CPU_DoubleU, l.lower);
1393 }
1394}
9ee6e8bb
PB
1395
1396/* Return the offset of a 32-bit piece of a NEON register.
1397 zero is the least significant end of the register. */
1398static inline long
1399neon_reg_offset (int reg, int n)
1400{
1401 int sreg;
1402 sreg = reg * 2 + n;
1403 return vfp_reg_offset(0, sreg);
1404}
1405
39d5492a 1406static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1407{
39d5492a 1408 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1409 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1410 return tmp;
1411}
1412
39d5492a 1413static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1414{
1415 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1416 tcg_temp_free_i32(var);
8f8e3aa4
PB
1417}
1418
a7812ae4 1419static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1420{
1421 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1422}
1423
a7812ae4 1424static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1425{
1426 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1427}
1428
4373f3ce
PB
1429#define tcg_gen_ld_f32 tcg_gen_ld_i32
1430#define tcg_gen_ld_f64 tcg_gen_ld_i64
1431#define tcg_gen_st_f32 tcg_gen_st_i32
1432#define tcg_gen_st_f64 tcg_gen_st_i64
1433
b7bcbe95
FB
1434static inline void gen_mov_F0_vreg(int dp, int reg)
1435{
1436 if (dp)
4373f3ce 1437 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1438 else
4373f3ce 1439 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1440}
1441
1442static inline void gen_mov_F1_vreg(int dp, int reg)
1443{
1444 if (dp)
4373f3ce 1445 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1446 else
4373f3ce 1447 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1448}
1449
1450static inline void gen_mov_vreg_F0(int dp, int reg)
1451{
1452 if (dp)
4373f3ce 1453 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1454 else
4373f3ce 1455 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1456}
1457
18c9b560
AZ
1458#define ARM_CP_RW_BIT (1 << 20)
1459
a7812ae4 1460static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1461{
0ecb72a5 1462 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1463}
1464
a7812ae4 1465static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1466{
0ecb72a5 1467 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1468}
1469
39d5492a 1470static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1471{
39d5492a 1472 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1473 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1474 return var;
e677137d
PB
1475}
1476
39d5492a 1477static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1478{
0ecb72a5 1479 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1480 tcg_temp_free_i32(var);
e677137d
PB
1481}
1482
1483static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1484{
1485 iwmmxt_store_reg(cpu_M0, rn);
1486}
1487
1488static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1489{
1490 iwmmxt_load_reg(cpu_M0, rn);
1491}
1492
1493static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1494{
1495 iwmmxt_load_reg(cpu_V1, rn);
1496 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1497}
1498
1499static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1500{
1501 iwmmxt_load_reg(cpu_V1, rn);
1502 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1503}
1504
1505static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1506{
1507 iwmmxt_load_reg(cpu_V1, rn);
1508 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1509}
1510
1511#define IWMMXT_OP(name) \
1512static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1513{ \
1514 iwmmxt_load_reg(cpu_V1, rn); \
1515 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1516}
1517
477955bd
PM
1518#define IWMMXT_OP_ENV(name) \
1519static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1520{ \
1521 iwmmxt_load_reg(cpu_V1, rn); \
1522 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1523}
1524
1525#define IWMMXT_OP_ENV_SIZE(name) \
1526IWMMXT_OP_ENV(name##b) \
1527IWMMXT_OP_ENV(name##w) \
1528IWMMXT_OP_ENV(name##l)
e677137d 1529
477955bd 1530#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1531static inline void gen_op_iwmmxt_##name##_M0(void) \
1532{ \
477955bd 1533 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1534}
1535
1536IWMMXT_OP(maddsq)
1537IWMMXT_OP(madduq)
1538IWMMXT_OP(sadb)
1539IWMMXT_OP(sadw)
1540IWMMXT_OP(mulslw)
1541IWMMXT_OP(mulshw)
1542IWMMXT_OP(mululw)
1543IWMMXT_OP(muluhw)
1544IWMMXT_OP(macsw)
1545IWMMXT_OP(macuw)
1546
477955bd
PM
1547IWMMXT_OP_ENV_SIZE(unpackl)
1548IWMMXT_OP_ENV_SIZE(unpackh)
1549
1550IWMMXT_OP_ENV1(unpacklub)
1551IWMMXT_OP_ENV1(unpackluw)
1552IWMMXT_OP_ENV1(unpacklul)
1553IWMMXT_OP_ENV1(unpackhub)
1554IWMMXT_OP_ENV1(unpackhuw)
1555IWMMXT_OP_ENV1(unpackhul)
1556IWMMXT_OP_ENV1(unpacklsb)
1557IWMMXT_OP_ENV1(unpacklsw)
1558IWMMXT_OP_ENV1(unpacklsl)
1559IWMMXT_OP_ENV1(unpackhsb)
1560IWMMXT_OP_ENV1(unpackhsw)
1561IWMMXT_OP_ENV1(unpackhsl)
1562
1563IWMMXT_OP_ENV_SIZE(cmpeq)
1564IWMMXT_OP_ENV_SIZE(cmpgtu)
1565IWMMXT_OP_ENV_SIZE(cmpgts)
1566
1567IWMMXT_OP_ENV_SIZE(mins)
1568IWMMXT_OP_ENV_SIZE(minu)
1569IWMMXT_OP_ENV_SIZE(maxs)
1570IWMMXT_OP_ENV_SIZE(maxu)
1571
1572IWMMXT_OP_ENV_SIZE(subn)
1573IWMMXT_OP_ENV_SIZE(addn)
1574IWMMXT_OP_ENV_SIZE(subu)
1575IWMMXT_OP_ENV_SIZE(addu)
1576IWMMXT_OP_ENV_SIZE(subs)
1577IWMMXT_OP_ENV_SIZE(adds)
1578
1579IWMMXT_OP_ENV(avgb0)
1580IWMMXT_OP_ENV(avgb1)
1581IWMMXT_OP_ENV(avgw0)
1582IWMMXT_OP_ENV(avgw1)
e677137d 1583
477955bd
PM
1584IWMMXT_OP_ENV(packuw)
1585IWMMXT_OP_ENV(packul)
1586IWMMXT_OP_ENV(packuq)
1587IWMMXT_OP_ENV(packsw)
1588IWMMXT_OP_ENV(packsl)
1589IWMMXT_OP_ENV(packsq)
e677137d 1590
e677137d
PB
1591static void gen_op_iwmmxt_set_mup(void)
1592{
39d5492a 1593 TCGv_i32 tmp;
e677137d
PB
1594 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1595 tcg_gen_ori_i32(tmp, tmp, 2);
1596 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1597}
1598
1599static void gen_op_iwmmxt_set_cup(void)
1600{
39d5492a 1601 TCGv_i32 tmp;
e677137d
PB
1602 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1603 tcg_gen_ori_i32(tmp, tmp, 1);
1604 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1605}
1606
1607static void gen_op_iwmmxt_setpsr_nz(void)
1608{
39d5492a 1609 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1610 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1611 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1612}
1613
1614static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1615{
1616 iwmmxt_load_reg(cpu_V1, rn);
86831435 1617 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1618 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1619}
1620
39d5492a
PM
1621static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1622 TCGv_i32 dest)
18c9b560
AZ
1623{
1624 int rd;
1625 uint32_t offset;
39d5492a 1626 TCGv_i32 tmp;
18c9b560
AZ
1627
1628 rd = (insn >> 16) & 0xf;
da6b5335 1629 tmp = load_reg(s, rd);
18c9b560
AZ
1630
1631 offset = (insn & 0xff) << ((insn >> 7) & 2);
1632 if (insn & (1 << 24)) {
1633 /* Pre indexed */
1634 if (insn & (1 << 23))
da6b5335 1635 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1636 else
da6b5335
FN
1637 tcg_gen_addi_i32(tmp, tmp, -offset);
1638 tcg_gen_mov_i32(dest, tmp);
18c9b560 1639 if (insn & (1 << 21))
da6b5335
FN
1640 store_reg(s, rd, tmp);
1641 else
7d1b0095 1642 tcg_temp_free_i32(tmp);
18c9b560
AZ
1643 } else if (insn & (1 << 21)) {
1644 /* Post indexed */
da6b5335 1645 tcg_gen_mov_i32(dest, tmp);
18c9b560 1646 if (insn & (1 << 23))
da6b5335 1647 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1648 else
da6b5335
FN
1649 tcg_gen_addi_i32(tmp, tmp, -offset);
1650 store_reg(s, rd, tmp);
18c9b560
AZ
1651 } else if (!(insn & (1 << 23)))
1652 return 1;
1653 return 0;
1654}
1655
39d5492a 1656static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1657{
1658 int rd = (insn >> 0) & 0xf;
39d5492a 1659 TCGv_i32 tmp;
18c9b560 1660
da6b5335
FN
1661 if (insn & (1 << 8)) {
1662 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1663 return 1;
da6b5335
FN
1664 } else {
1665 tmp = iwmmxt_load_creg(rd);
1666 }
1667 } else {
7d1b0095 1668 tmp = tcg_temp_new_i32();
da6b5335 1669 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1670 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1671 }
1672 tcg_gen_andi_i32(tmp, tmp, mask);
1673 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1674 tcg_temp_free_i32(tmp);
18c9b560
AZ
1675 return 0;
1676}
1677
a1c7273b 1678/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1679 (ie. an undefined instruction). */
7dcc1f89 1680static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1681{
1682 int rd, wrd;
1683 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1684 TCGv_i32 addr;
1685 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1686
1687 if ((insn & 0x0e000e00) == 0x0c000000) {
1688 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1689 wrd = insn & 0xf;
1690 rdlo = (insn >> 12) & 0xf;
1691 rdhi = (insn >> 16) & 0xf;
1692 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1693 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1694 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1695 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1696 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1697 } else { /* TMCRR */
da6b5335
FN
1698 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1699 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1700 gen_op_iwmmxt_set_mup();
1701 }
1702 return 0;
1703 }
1704
1705 wrd = (insn >> 12) & 0xf;
7d1b0095 1706 addr = tcg_temp_new_i32();
da6b5335 1707 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1708 tcg_temp_free_i32(addr);
18c9b560 1709 return 1;
da6b5335 1710 }
18c9b560
AZ
1711 if (insn & ARM_CP_RW_BIT) {
1712 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1713 tmp = tcg_temp_new_i32();
12dcc321 1714 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1715 iwmmxt_store_creg(wrd, tmp);
18c9b560 1716 } else {
e677137d
PB
1717 i = 1;
1718 if (insn & (1 << 8)) {
1719 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1720 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1721 i = 0;
1722 } else { /* WLDRW wRd */
29531141 1723 tmp = tcg_temp_new_i32();
12dcc321 1724 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1725 }
1726 } else {
29531141 1727 tmp = tcg_temp_new_i32();
e677137d 1728 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1729 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1730 } else { /* WLDRB */
12dcc321 1731 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1732 }
1733 }
1734 if (i) {
1735 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1736 tcg_temp_free_i32(tmp);
e677137d 1737 }
18c9b560
AZ
1738 gen_op_iwmmxt_movq_wRn_M0(wrd);
1739 }
1740 } else {
1741 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1742 tmp = iwmmxt_load_creg(wrd);
12dcc321 1743 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1744 } else {
1745 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1746 tmp = tcg_temp_new_i32();
e677137d
PB
1747 if (insn & (1 << 8)) {
1748 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1749 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1750 } else { /* WSTRW wRd */
ecc7b3aa 1751 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1752 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1753 }
1754 } else {
1755 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1756 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1757 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1758 } else { /* WSTRB */
ecc7b3aa 1759 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1760 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1761 }
1762 }
18c9b560 1763 }
29531141 1764 tcg_temp_free_i32(tmp);
18c9b560 1765 }
7d1b0095 1766 tcg_temp_free_i32(addr);
18c9b560
AZ
1767 return 0;
1768 }
1769
1770 if ((insn & 0x0f000000) != 0x0e000000)
1771 return 1;
1772
1773 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1774 case 0x000: /* WOR */
1775 wrd = (insn >> 12) & 0xf;
1776 rd0 = (insn >> 0) & 0xf;
1777 rd1 = (insn >> 16) & 0xf;
1778 gen_op_iwmmxt_movq_M0_wRn(rd0);
1779 gen_op_iwmmxt_orq_M0_wRn(rd1);
1780 gen_op_iwmmxt_setpsr_nz();
1781 gen_op_iwmmxt_movq_wRn_M0(wrd);
1782 gen_op_iwmmxt_set_mup();
1783 gen_op_iwmmxt_set_cup();
1784 break;
1785 case 0x011: /* TMCR */
1786 if (insn & 0xf)
1787 return 1;
1788 rd = (insn >> 12) & 0xf;
1789 wrd = (insn >> 16) & 0xf;
1790 switch (wrd) {
1791 case ARM_IWMMXT_wCID:
1792 case ARM_IWMMXT_wCASF:
1793 break;
1794 case ARM_IWMMXT_wCon:
1795 gen_op_iwmmxt_set_cup();
1796 /* Fall through. */
1797 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1798 tmp = iwmmxt_load_creg(wrd);
1799 tmp2 = load_reg(s, rd);
f669df27 1800 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1801 tcg_temp_free_i32(tmp2);
da6b5335 1802 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1803 break;
1804 case ARM_IWMMXT_wCGR0:
1805 case ARM_IWMMXT_wCGR1:
1806 case ARM_IWMMXT_wCGR2:
1807 case ARM_IWMMXT_wCGR3:
1808 gen_op_iwmmxt_set_cup();
da6b5335
FN
1809 tmp = load_reg(s, rd);
1810 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1811 break;
1812 default:
1813 return 1;
1814 }
1815 break;
1816 case 0x100: /* WXOR */
1817 wrd = (insn >> 12) & 0xf;
1818 rd0 = (insn >> 0) & 0xf;
1819 rd1 = (insn >> 16) & 0xf;
1820 gen_op_iwmmxt_movq_M0_wRn(rd0);
1821 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1822 gen_op_iwmmxt_setpsr_nz();
1823 gen_op_iwmmxt_movq_wRn_M0(wrd);
1824 gen_op_iwmmxt_set_mup();
1825 gen_op_iwmmxt_set_cup();
1826 break;
1827 case 0x111: /* TMRC */
1828 if (insn & 0xf)
1829 return 1;
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1832 tmp = iwmmxt_load_creg(wrd);
1833 store_reg(s, rd, tmp);
18c9b560
AZ
1834 break;
1835 case 0x300: /* WANDN */
1836 wrd = (insn >> 12) & 0xf;
1837 rd0 = (insn >> 0) & 0xf;
1838 rd1 = (insn >> 16) & 0xf;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1840 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1841 gen_op_iwmmxt_andq_M0_wRn(rd1);
1842 gen_op_iwmmxt_setpsr_nz();
1843 gen_op_iwmmxt_movq_wRn_M0(wrd);
1844 gen_op_iwmmxt_set_mup();
1845 gen_op_iwmmxt_set_cup();
1846 break;
1847 case 0x200: /* WAND */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 0) & 0xf;
1850 rd1 = (insn >> 16) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 gen_op_iwmmxt_andq_M0_wRn(rd1);
1853 gen_op_iwmmxt_setpsr_nz();
1854 gen_op_iwmmxt_movq_wRn_M0(wrd);
1855 gen_op_iwmmxt_set_mup();
1856 gen_op_iwmmxt_set_cup();
1857 break;
1858 case 0x810: case 0xa10: /* WMADD */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 0) & 0xf;
1861 rd1 = (insn >> 16) & 0xf;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 if (insn & (1 << 21))
1864 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1865 else
1866 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1867 gen_op_iwmmxt_movq_wRn_M0(wrd);
1868 gen_op_iwmmxt_set_mup();
1869 break;
1870 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 16) & 0xf;
1873 rd1 = (insn >> 0) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 switch ((insn >> 22) & 3) {
1876 case 0:
1877 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1878 break;
1879 case 1:
1880 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1881 break;
1882 case 2:
1883 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1884 break;
1885 case 3:
1886 return 1;
1887 }
1888 gen_op_iwmmxt_movq_wRn_M0(wrd);
1889 gen_op_iwmmxt_set_mup();
1890 gen_op_iwmmxt_set_cup();
1891 break;
1892 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1893 wrd = (insn >> 12) & 0xf;
1894 rd0 = (insn >> 16) & 0xf;
1895 rd1 = (insn >> 0) & 0xf;
1896 gen_op_iwmmxt_movq_M0_wRn(rd0);
1897 switch ((insn >> 22) & 3) {
1898 case 0:
1899 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1900 break;
1901 case 1:
1902 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1903 break;
1904 case 2:
1905 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1906 break;
1907 case 3:
1908 return 1;
1909 }
1910 gen_op_iwmmxt_movq_wRn_M0(wrd);
1911 gen_op_iwmmxt_set_mup();
1912 gen_op_iwmmxt_set_cup();
1913 break;
1914 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 16) & 0xf;
1917 rd1 = (insn >> 0) & 0xf;
1918 gen_op_iwmmxt_movq_M0_wRn(rd0);
1919 if (insn & (1 << 22))
1920 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1921 else
1922 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1923 if (!(insn & (1 << 20)))
1924 gen_op_iwmmxt_addl_M0_wRn(wrd);
1925 gen_op_iwmmxt_movq_wRn_M0(wrd);
1926 gen_op_iwmmxt_set_mup();
1927 break;
1928 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1929 wrd = (insn >> 12) & 0xf;
1930 rd0 = (insn >> 16) & 0xf;
1931 rd1 = (insn >> 0) & 0xf;
1932 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1933 if (insn & (1 << 21)) {
1934 if (insn & (1 << 20))
1935 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1938 } else {
1939 if (insn & (1 << 20))
1940 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1941 else
1942 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1943 }
18c9b560
AZ
1944 gen_op_iwmmxt_movq_wRn_M0(wrd);
1945 gen_op_iwmmxt_set_mup();
1946 break;
1947 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1948 wrd = (insn >> 12) & 0xf;
1949 rd0 = (insn >> 16) & 0xf;
1950 rd1 = (insn >> 0) & 0xf;
1951 gen_op_iwmmxt_movq_M0_wRn(rd0);
1952 if (insn & (1 << 21))
1953 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1954 else
1955 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1956 if (!(insn & (1 << 20))) {
e677137d
PB
1957 iwmmxt_load_reg(cpu_V1, wrd);
1958 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1959 }
1960 gen_op_iwmmxt_movq_wRn_M0(wrd);
1961 gen_op_iwmmxt_set_mup();
1962 break;
1963 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1964 wrd = (insn >> 12) & 0xf;
1965 rd0 = (insn >> 16) & 0xf;
1966 rd1 = (insn >> 0) & 0xf;
1967 gen_op_iwmmxt_movq_M0_wRn(rd0);
1968 switch ((insn >> 22) & 3) {
1969 case 0:
1970 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1971 break;
1972 case 1:
1973 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1974 break;
1975 case 2:
1976 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1977 break;
1978 case 3:
1979 return 1;
1980 }
1981 gen_op_iwmmxt_movq_wRn_M0(wrd);
1982 gen_op_iwmmxt_set_mup();
1983 gen_op_iwmmxt_set_cup();
1984 break;
1985 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1986 wrd = (insn >> 12) & 0xf;
1987 rd0 = (insn >> 16) & 0xf;
1988 rd1 = (insn >> 0) & 0xf;
1989 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1990 if (insn & (1 << 22)) {
1991 if (insn & (1 << 20))
1992 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1993 else
1994 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1995 } else {
1996 if (insn & (1 << 20))
1997 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1998 else
1999 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2000 }
18c9b560
AZ
2001 gen_op_iwmmxt_movq_wRn_M0(wrd);
2002 gen_op_iwmmxt_set_mup();
2003 gen_op_iwmmxt_set_cup();
2004 break;
2005 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2006 wrd = (insn >> 12) & 0xf;
2007 rd0 = (insn >> 16) & 0xf;
2008 rd1 = (insn >> 0) & 0xf;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2010 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2011 tcg_gen_andi_i32(tmp, tmp, 7);
2012 iwmmxt_load_reg(cpu_V1, rd1);
2013 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2014 tcg_temp_free_i32(tmp);
18c9b560
AZ
2015 gen_op_iwmmxt_movq_wRn_M0(wrd);
2016 gen_op_iwmmxt_set_mup();
2017 break;
2018 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2019 if (((insn >> 6) & 3) == 3)
2020 return 1;
18c9b560
AZ
2021 rd = (insn >> 12) & 0xf;
2022 wrd = (insn >> 16) & 0xf;
da6b5335 2023 tmp = load_reg(s, rd);
18c9b560
AZ
2024 gen_op_iwmmxt_movq_M0_wRn(wrd);
2025 switch ((insn >> 6) & 3) {
2026 case 0:
da6b5335
FN
2027 tmp2 = tcg_const_i32(0xff);
2028 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2029 break;
2030 case 1:
da6b5335
FN
2031 tmp2 = tcg_const_i32(0xffff);
2032 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2033 break;
2034 case 2:
da6b5335
FN
2035 tmp2 = tcg_const_i32(0xffffffff);
2036 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2037 break;
da6b5335 2038 default:
39d5492a
PM
2039 TCGV_UNUSED_I32(tmp2);
2040 TCGV_UNUSED_I32(tmp3);
18c9b560 2041 }
da6b5335 2042 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2043 tcg_temp_free_i32(tmp3);
2044 tcg_temp_free_i32(tmp2);
7d1b0095 2045 tcg_temp_free_i32(tmp);
18c9b560
AZ
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 break;
2049 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2050 rd = (insn >> 12) & 0xf;
2051 wrd = (insn >> 16) & 0xf;
da6b5335 2052 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2053 return 1;
2054 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2055 tmp = tcg_temp_new_i32();
18c9b560
AZ
2056 switch ((insn >> 22) & 3) {
2057 case 0:
da6b5335 2058 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2059 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2060 if (insn & 8) {
2061 tcg_gen_ext8s_i32(tmp, tmp);
2062 } else {
2063 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2064 }
2065 break;
2066 case 1:
da6b5335 2067 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2068 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2069 if (insn & 8) {
2070 tcg_gen_ext16s_i32(tmp, tmp);
2071 } else {
2072 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2073 }
2074 break;
2075 case 2:
da6b5335 2076 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2077 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2078 break;
18c9b560 2079 }
da6b5335 2080 store_reg(s, rd, tmp);
18c9b560
AZ
2081 break;
2082 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2083 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2084 return 1;
da6b5335 2085 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2086 switch ((insn >> 22) & 3) {
2087 case 0:
da6b5335 2088 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2089 break;
2090 case 1:
da6b5335 2091 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2092 break;
2093 case 2:
da6b5335 2094 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2095 break;
18c9b560 2096 }
da6b5335
FN
2097 tcg_gen_shli_i32(tmp, tmp, 28);
2098 gen_set_nzcv(tmp);
7d1b0095 2099 tcg_temp_free_i32(tmp);
18c9b560
AZ
2100 break;
2101 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2102 if (((insn >> 6) & 3) == 3)
2103 return 1;
18c9b560
AZ
2104 rd = (insn >> 12) & 0xf;
2105 wrd = (insn >> 16) & 0xf;
da6b5335 2106 tmp = load_reg(s, rd);
18c9b560
AZ
2107 switch ((insn >> 6) & 3) {
2108 case 0:
da6b5335 2109 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2110 break;
2111 case 1:
da6b5335 2112 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2113 break;
2114 case 2:
da6b5335 2115 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2116 break;
18c9b560 2117 }
7d1b0095 2118 tcg_temp_free_i32(tmp);
18c9b560
AZ
2119 gen_op_iwmmxt_movq_wRn_M0(wrd);
2120 gen_op_iwmmxt_set_mup();
2121 break;
2122 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2123 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2124 return 1;
da6b5335 2125 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2126 tmp2 = tcg_temp_new_i32();
da6b5335 2127 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 for (i = 0; i < 7; i ++) {
da6b5335
FN
2131 tcg_gen_shli_i32(tmp2, tmp2, 4);
2132 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2133 }
2134 break;
2135 case 1:
2136 for (i = 0; i < 3; i ++) {
da6b5335
FN
2137 tcg_gen_shli_i32(tmp2, tmp2, 8);
2138 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2139 }
2140 break;
2141 case 2:
da6b5335
FN
2142 tcg_gen_shli_i32(tmp2, tmp2, 16);
2143 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2144 break;
18c9b560 2145 }
da6b5335 2146 gen_set_nzcv(tmp);
7d1b0095
PM
2147 tcg_temp_free_i32(tmp2);
2148 tcg_temp_free_i32(tmp);
18c9b560
AZ
2149 break;
2150 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2151 wrd = (insn >> 12) & 0xf;
2152 rd0 = (insn >> 16) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 switch ((insn >> 22) & 3) {
2155 case 0:
e677137d 2156 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2157 break;
2158 case 1:
e677137d 2159 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2160 break;
2161 case 2:
e677137d 2162 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2163 break;
2164 case 3:
2165 return 1;
2166 }
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 break;
2170 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2171 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2172 return 1;
da6b5335 2173 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2174 tmp2 = tcg_temp_new_i32();
da6b5335 2175 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 for (i = 0; i < 7; i ++) {
da6b5335
FN
2179 tcg_gen_shli_i32(tmp2, tmp2, 4);
2180 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2181 }
2182 break;
2183 case 1:
2184 for (i = 0; i < 3; i ++) {
da6b5335
FN
2185 tcg_gen_shli_i32(tmp2, tmp2, 8);
2186 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2187 }
2188 break;
2189 case 2:
da6b5335
FN
2190 tcg_gen_shli_i32(tmp2, tmp2, 16);
2191 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2192 break;
18c9b560 2193 }
da6b5335 2194 gen_set_nzcv(tmp);
7d1b0095
PM
2195 tcg_temp_free_i32(tmp2);
2196 tcg_temp_free_i32(tmp);
18c9b560
AZ
2197 break;
2198 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2199 rd = (insn >> 12) & 0xf;
2200 rd0 = (insn >> 16) & 0xf;
da6b5335 2201 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2202 return 1;
2203 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2204 tmp = tcg_temp_new_i32();
18c9b560
AZ
2205 switch ((insn >> 22) & 3) {
2206 case 0:
da6b5335 2207 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2208 break;
2209 case 1:
da6b5335 2210 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2211 break;
2212 case 2:
da6b5335 2213 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2214 break;
18c9b560 2215 }
da6b5335 2216 store_reg(s, rd, tmp);
18c9b560
AZ
2217 break;
2218 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2219 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2220 wrd = (insn >> 12) & 0xf;
2221 rd0 = (insn >> 16) & 0xf;
2222 rd1 = (insn >> 0) & 0xf;
2223 gen_op_iwmmxt_movq_M0_wRn(rd0);
2224 switch ((insn >> 22) & 3) {
2225 case 0:
2226 if (insn & (1 << 21))
2227 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2228 else
2229 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2230 break;
2231 case 1:
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2236 break;
2237 case 2:
2238 if (insn & (1 << 21))
2239 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2240 else
2241 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2242 break;
2243 case 3:
2244 return 1;
2245 }
2246 gen_op_iwmmxt_movq_wRn_M0(wrd);
2247 gen_op_iwmmxt_set_mup();
2248 gen_op_iwmmxt_set_cup();
2249 break;
2250 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2251 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2252 wrd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
2254 gen_op_iwmmxt_movq_M0_wRn(rd0);
2255 switch ((insn >> 22) & 3) {
2256 case 0:
2257 if (insn & (1 << 21))
2258 gen_op_iwmmxt_unpacklsb_M0();
2259 else
2260 gen_op_iwmmxt_unpacklub_M0();
2261 break;
2262 case 1:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_unpacklsw_M0();
2265 else
2266 gen_op_iwmmxt_unpackluw_M0();
2267 break;
2268 case 2:
2269 if (insn & (1 << 21))
2270 gen_op_iwmmxt_unpacklsl_M0();
2271 else
2272 gen_op_iwmmxt_unpacklul_M0();
2273 break;
2274 case 3:
2275 return 1;
2276 }
2277 gen_op_iwmmxt_movq_wRn_M0(wrd);
2278 gen_op_iwmmxt_set_mup();
2279 gen_op_iwmmxt_set_cup();
2280 break;
2281 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2282 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2283 wrd = (insn >> 12) & 0xf;
2284 rd0 = (insn >> 16) & 0xf;
2285 gen_op_iwmmxt_movq_M0_wRn(rd0);
2286 switch ((insn >> 22) & 3) {
2287 case 0:
2288 if (insn & (1 << 21))
2289 gen_op_iwmmxt_unpackhsb_M0();
2290 else
2291 gen_op_iwmmxt_unpackhub_M0();
2292 break;
2293 case 1:
2294 if (insn & (1 << 21))
2295 gen_op_iwmmxt_unpackhsw_M0();
2296 else
2297 gen_op_iwmmxt_unpackhuw_M0();
2298 break;
2299 case 2:
2300 if (insn & (1 << 21))
2301 gen_op_iwmmxt_unpackhsl_M0();
2302 else
2303 gen_op_iwmmxt_unpackhul_M0();
2304 break;
2305 case 3:
2306 return 1;
2307 }
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 gen_op_iwmmxt_set_cup();
2311 break;
2312 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2313 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2314 if (((insn >> 22) & 3) == 0)
2315 return 1;
18c9b560
AZ
2316 wrd = (insn >> 12) & 0xf;
2317 rd0 = (insn >> 16) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2319 tmp = tcg_temp_new_i32();
da6b5335 2320 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2321 tcg_temp_free_i32(tmp);
18c9b560 2322 return 1;
da6b5335 2323 }
18c9b560 2324 switch ((insn >> 22) & 3) {
18c9b560 2325 case 1:
477955bd 2326 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2327 break;
2328 case 2:
477955bd 2329 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2330 break;
2331 case 3:
477955bd 2332 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2333 break;
2334 }
7d1b0095 2335 tcg_temp_free_i32(tmp);
18c9b560
AZ
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2341 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2342 if (((insn >> 22) & 3) == 0)
2343 return 1;
18c9b560
AZ
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2347 tmp = tcg_temp_new_i32();
da6b5335 2348 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2349 tcg_temp_free_i32(tmp);
18c9b560 2350 return 1;
da6b5335 2351 }
18c9b560 2352 switch ((insn >> 22) & 3) {
18c9b560 2353 case 1:
477955bd 2354 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2355 break;
2356 case 2:
477955bd 2357 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2358 break;
2359 case 3:
477955bd 2360 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2361 break;
2362 }
7d1b0095 2363 tcg_temp_free_i32(tmp);
18c9b560
AZ
2364 gen_op_iwmmxt_movq_wRn_M0(wrd);
2365 gen_op_iwmmxt_set_mup();
2366 gen_op_iwmmxt_set_cup();
2367 break;
2368 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2369 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2370 if (((insn >> 22) & 3) == 0)
2371 return 1;
18c9b560
AZ
2372 wrd = (insn >> 12) & 0xf;
2373 rd0 = (insn >> 16) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2375 tmp = tcg_temp_new_i32();
da6b5335 2376 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2377 tcg_temp_free_i32(tmp);
18c9b560 2378 return 1;
da6b5335 2379 }
18c9b560 2380 switch ((insn >> 22) & 3) {
18c9b560 2381 case 1:
477955bd 2382 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2383 break;
2384 case 2:
477955bd 2385 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2386 break;
2387 case 3:
477955bd 2388 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2389 break;
2390 }
7d1b0095 2391 tcg_temp_free_i32(tmp);
18c9b560
AZ
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 gen_op_iwmmxt_set_cup();
2395 break;
2396 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2397 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2398 if (((insn >> 22) & 3) == 0)
2399 return 1;
18c9b560
AZ
2400 wrd = (insn >> 12) & 0xf;
2401 rd0 = (insn >> 16) & 0xf;
2402 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2403 tmp = tcg_temp_new_i32();
18c9b560 2404 switch ((insn >> 22) & 3) {
18c9b560 2405 case 1:
da6b5335 2406 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2407 tcg_temp_free_i32(tmp);
18c9b560 2408 return 1;
da6b5335 2409 }
477955bd 2410 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2411 break;
2412 case 2:
da6b5335 2413 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2414 tcg_temp_free_i32(tmp);
18c9b560 2415 return 1;
da6b5335 2416 }
477955bd 2417 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2418 break;
2419 case 3:
da6b5335 2420 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2421 tcg_temp_free_i32(tmp);
18c9b560 2422 return 1;
da6b5335 2423 }
477955bd 2424 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2425 break;
2426 }
7d1b0095 2427 tcg_temp_free_i32(tmp);
18c9b560
AZ
2428 gen_op_iwmmxt_movq_wRn_M0(wrd);
2429 gen_op_iwmmxt_set_mup();
2430 gen_op_iwmmxt_set_cup();
2431 break;
2432 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2433 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2434 wrd = (insn >> 12) & 0xf;
2435 rd0 = (insn >> 16) & 0xf;
2436 rd1 = (insn >> 0) & 0xf;
2437 gen_op_iwmmxt_movq_M0_wRn(rd0);
2438 switch ((insn >> 22) & 3) {
2439 case 0:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2442 else
2443 gen_op_iwmmxt_minub_M0_wRn(rd1);
2444 break;
2445 case 1:
2446 if (insn & (1 << 21))
2447 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2448 else
2449 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2450 break;
2451 case 2:
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2454 else
2455 gen_op_iwmmxt_minul_M0_wRn(rd1);
2456 break;
2457 case 3:
2458 return 1;
2459 }
2460 gen_op_iwmmxt_movq_wRn_M0(wrd);
2461 gen_op_iwmmxt_set_mup();
2462 break;
2463 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2464 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2465 wrd = (insn >> 12) & 0xf;
2466 rd0 = (insn >> 16) & 0xf;
2467 rd1 = (insn >> 0) & 0xf;
2468 gen_op_iwmmxt_movq_M0_wRn(rd0);
2469 switch ((insn >> 22) & 3) {
2470 case 0:
2471 if (insn & (1 << 21))
2472 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2473 else
2474 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2475 break;
2476 case 1:
2477 if (insn & (1 << 21))
2478 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2479 else
2480 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2481 break;
2482 case 2:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2485 else
2486 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2487 break;
2488 case 3:
2489 return 1;
2490 }
2491 gen_op_iwmmxt_movq_wRn_M0(wrd);
2492 gen_op_iwmmxt_set_mup();
2493 break;
2494 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2495 case 0x402: case 0x502: case 0x602: case 0x702:
2496 wrd = (insn >> 12) & 0xf;
2497 rd0 = (insn >> 16) & 0xf;
2498 rd1 = (insn >> 0) & 0xf;
2499 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2500 tmp = tcg_const_i32((insn >> 20) & 3);
2501 iwmmxt_load_reg(cpu_V1, rd1);
2502 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2503 tcg_temp_free_i32(tmp);
18c9b560
AZ
2504 gen_op_iwmmxt_movq_wRn_M0(wrd);
2505 gen_op_iwmmxt_set_mup();
2506 break;
2507 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2508 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2509 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2510 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2511 wrd = (insn >> 12) & 0xf;
2512 rd0 = (insn >> 16) & 0xf;
2513 rd1 = (insn >> 0) & 0xf;
2514 gen_op_iwmmxt_movq_M0_wRn(rd0);
2515 switch ((insn >> 20) & 0xf) {
2516 case 0x0:
2517 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2518 break;
2519 case 0x1:
2520 gen_op_iwmmxt_subub_M0_wRn(rd1);
2521 break;
2522 case 0x3:
2523 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2524 break;
2525 case 0x4:
2526 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2527 break;
2528 case 0x5:
2529 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2530 break;
2531 case 0x7:
2532 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2533 break;
2534 case 0x8:
2535 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2536 break;
2537 case 0x9:
2538 gen_op_iwmmxt_subul_M0_wRn(rd1);
2539 break;
2540 case 0xb:
2541 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2542 break;
2543 default:
2544 return 1;
2545 }
2546 gen_op_iwmmxt_movq_wRn_M0(wrd);
2547 gen_op_iwmmxt_set_mup();
2548 gen_op_iwmmxt_set_cup();
2549 break;
2550 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2551 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2552 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2553 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2554 wrd = (insn >> 12) & 0xf;
2555 rd0 = (insn >> 16) & 0xf;
2556 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2557 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2558 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2559 tcg_temp_free_i32(tmp);
18c9b560
AZ
2560 gen_op_iwmmxt_movq_wRn_M0(wrd);
2561 gen_op_iwmmxt_set_mup();
2562 gen_op_iwmmxt_set_cup();
2563 break;
2564 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2565 case 0x418: case 0x518: case 0x618: case 0x718:
2566 case 0x818: case 0x918: case 0xa18: case 0xb18:
2567 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2568 wrd = (insn >> 12) & 0xf;
2569 rd0 = (insn >> 16) & 0xf;
2570 rd1 = (insn >> 0) & 0xf;
2571 gen_op_iwmmxt_movq_M0_wRn(rd0);
2572 switch ((insn >> 20) & 0xf) {
2573 case 0x0:
2574 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2575 break;
2576 case 0x1:
2577 gen_op_iwmmxt_addub_M0_wRn(rd1);
2578 break;
2579 case 0x3:
2580 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2581 break;
2582 case 0x4:
2583 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2584 break;
2585 case 0x5:
2586 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2587 break;
2588 case 0x7:
2589 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2590 break;
2591 case 0x8:
2592 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2593 break;
2594 case 0x9:
2595 gen_op_iwmmxt_addul_M0_wRn(rd1);
2596 break;
2597 case 0xb:
2598 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2599 break;
2600 default:
2601 return 1;
2602 }
2603 gen_op_iwmmxt_movq_wRn_M0(wrd);
2604 gen_op_iwmmxt_set_mup();
2605 gen_op_iwmmxt_set_cup();
2606 break;
2607 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2608 case 0x408: case 0x508: case 0x608: case 0x708:
2609 case 0x808: case 0x908: case 0xa08: case 0xb08:
2610 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2611 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2612 return 1;
18c9b560
AZ
2613 wrd = (insn >> 12) & 0xf;
2614 rd0 = (insn >> 16) & 0xf;
2615 rd1 = (insn >> 0) & 0xf;
2616 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2617 switch ((insn >> 22) & 3) {
18c9b560
AZ
2618 case 1:
2619 if (insn & (1 << 21))
2620 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2621 else
2622 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2623 break;
2624 case 2:
2625 if (insn & (1 << 21))
2626 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2627 else
2628 gen_op_iwmmxt_packul_M0_wRn(rd1);
2629 break;
2630 case 3:
2631 if (insn & (1 << 21))
2632 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2633 else
2634 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2635 break;
2636 }
2637 gen_op_iwmmxt_movq_wRn_M0(wrd);
2638 gen_op_iwmmxt_set_mup();
2639 gen_op_iwmmxt_set_cup();
2640 break;
2641 case 0x201: case 0x203: case 0x205: case 0x207:
2642 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2643 case 0x211: case 0x213: case 0x215: case 0x217:
2644 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2645 wrd = (insn >> 5) & 0xf;
2646 rd0 = (insn >> 12) & 0xf;
2647 rd1 = (insn >> 0) & 0xf;
2648 if (rd0 == 0xf || rd1 == 0xf)
2649 return 1;
2650 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2651 tmp = load_reg(s, rd0);
2652 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2653 switch ((insn >> 16) & 0xf) {
2654 case 0x0: /* TMIA */
da6b5335 2655 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2656 break;
2657 case 0x8: /* TMIAPH */
da6b5335 2658 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2659 break;
2660 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2661 if (insn & (1 << 16))
da6b5335 2662 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2663 if (insn & (1 << 17))
da6b5335
FN
2664 tcg_gen_shri_i32(tmp2, tmp2, 16);
2665 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2666 break;
2667 default:
7d1b0095
PM
2668 tcg_temp_free_i32(tmp2);
2669 tcg_temp_free_i32(tmp);
18c9b560
AZ
2670 return 1;
2671 }
7d1b0095
PM
2672 tcg_temp_free_i32(tmp2);
2673 tcg_temp_free_i32(tmp);
18c9b560
AZ
2674 gen_op_iwmmxt_movq_wRn_M0(wrd);
2675 gen_op_iwmmxt_set_mup();
2676 break;
2677 default:
2678 return 1;
2679 }
2680
2681 return 0;
2682}
2683
a1c7273b 2684/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2685 (ie. an undefined instruction). */
7dcc1f89 2686static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2687{
2688 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2689 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2690
2691 if ((insn & 0x0ff00f10) == 0x0e200010) {
2692 /* Multiply with Internal Accumulate Format */
2693 rd0 = (insn >> 12) & 0xf;
2694 rd1 = insn & 0xf;
2695 acc = (insn >> 5) & 7;
2696
2697 if (acc != 0)
2698 return 1;
2699
3a554c0f
FN
2700 tmp = load_reg(s, rd0);
2701 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2702 switch ((insn >> 16) & 0xf) {
2703 case 0x0: /* MIA */
3a554c0f 2704 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2705 break;
2706 case 0x8: /* MIAPH */
3a554c0f 2707 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2708 break;
2709 case 0xc: /* MIABB */
2710 case 0xd: /* MIABT */
2711 case 0xe: /* MIATB */
2712 case 0xf: /* MIATT */
18c9b560 2713 if (insn & (1 << 16))
3a554c0f 2714 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2715 if (insn & (1 << 17))
3a554c0f
FN
2716 tcg_gen_shri_i32(tmp2, tmp2, 16);
2717 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2718 break;
2719 default:
2720 return 1;
2721 }
7d1b0095
PM
2722 tcg_temp_free_i32(tmp2);
2723 tcg_temp_free_i32(tmp);
18c9b560
AZ
2724
2725 gen_op_iwmmxt_movq_wRn_M0(acc);
2726 return 0;
2727 }
2728
2729 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2730 /* Internal Accumulator Access Format */
2731 rdhi = (insn >> 16) & 0xf;
2732 rdlo = (insn >> 12) & 0xf;
2733 acc = insn & 7;
2734
2735 if (acc != 0)
2736 return 1;
2737
2738 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2739 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2740 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2741 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2742 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2743 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2744 } else { /* MAR */
3a554c0f
FN
2745 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2746 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2747 }
2748 return 0;
2749 }
2750
2751 return 1;
2752}
2753
9ee6e8bb
PB
2754#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2755#define VFP_SREG(insn, bigbit, smallbit) \
2756 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2757#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2758 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2759 reg = (((insn) >> (bigbit)) & 0x0f) \
2760 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2761 } else { \
2762 if (insn & (1 << (smallbit))) \
2763 return 1; \
2764 reg = ((insn) >> (bigbit)) & 0x0f; \
2765 }} while (0)
2766
2767#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2768#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2769#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2770#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2771#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2772#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2773
4373f3ce 2774/* Move between integer and VFP cores. */
39d5492a 2775static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2776{
39d5492a 2777 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2778 tcg_gen_mov_i32(tmp, cpu_F0s);
2779 return tmp;
2780}
2781
39d5492a 2782static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2783{
2784 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2785 tcg_temp_free_i32(tmp);
4373f3ce
PB
2786}
2787
39d5492a 2788static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2789{
39d5492a 2790 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2791 if (shift)
2792 tcg_gen_shri_i32(var, var, shift);
86831435 2793 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2794 tcg_gen_shli_i32(tmp, var, 8);
2795 tcg_gen_or_i32(var, var, tmp);
2796 tcg_gen_shli_i32(tmp, var, 16);
2797 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2798 tcg_temp_free_i32(tmp);
ad69471c
PB
2799}
2800
39d5492a 2801static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2802{
39d5492a 2803 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2804 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2805 tcg_gen_shli_i32(tmp, var, 16);
2806 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2807 tcg_temp_free_i32(tmp);
ad69471c
PB
2808}
2809
39d5492a 2810static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2811{
39d5492a 2812 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2813 tcg_gen_andi_i32(var, var, 0xffff0000);
2814 tcg_gen_shri_i32(tmp, var, 16);
2815 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2816 tcg_temp_free_i32(tmp);
ad69471c
PB
2817}
2818
39d5492a 2819static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2820{
2821 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2822 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2823 switch (size) {
2824 case 0:
12dcc321 2825 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2826 gen_neon_dup_u8(tmp, 0);
2827 break;
2828 case 1:
12dcc321 2829 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2830 gen_neon_dup_low16(tmp);
2831 break;
2832 case 2:
12dcc321 2833 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2834 break;
2835 default: /* Avoid compiler warnings. */
2836 abort();
2837 }
2838 return tmp;
2839}
2840
04731fb5
WN
2841static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2842 uint32_t dp)
2843{
2844 uint32_t cc = extract32(insn, 20, 2);
2845
2846 if (dp) {
2847 TCGv_i64 frn, frm, dest;
2848 TCGv_i64 tmp, zero, zf, nf, vf;
2849
2850 zero = tcg_const_i64(0);
2851
2852 frn = tcg_temp_new_i64();
2853 frm = tcg_temp_new_i64();
2854 dest = tcg_temp_new_i64();
2855
2856 zf = tcg_temp_new_i64();
2857 nf = tcg_temp_new_i64();
2858 vf = tcg_temp_new_i64();
2859
2860 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2861 tcg_gen_ext_i32_i64(nf, cpu_NF);
2862 tcg_gen_ext_i32_i64(vf, cpu_VF);
2863
2864 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2865 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2866 switch (cc) {
2867 case 0: /* eq: Z */
2868 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2869 frn, frm);
2870 break;
2871 case 1: /* vs: V */
2872 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2873 frn, frm);
2874 break;
2875 case 2: /* ge: N == V -> N ^ V == 0 */
2876 tmp = tcg_temp_new_i64();
2877 tcg_gen_xor_i64(tmp, vf, nf);
2878 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2879 frn, frm);
2880 tcg_temp_free_i64(tmp);
2881 break;
2882 case 3: /* gt: !Z && N == V */
2883 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2884 frn, frm);
2885 tmp = tcg_temp_new_i64();
2886 tcg_gen_xor_i64(tmp, vf, nf);
2887 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2888 dest, frm);
2889 tcg_temp_free_i64(tmp);
2890 break;
2891 }
2892 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2893 tcg_temp_free_i64(frn);
2894 tcg_temp_free_i64(frm);
2895 tcg_temp_free_i64(dest);
2896
2897 tcg_temp_free_i64(zf);
2898 tcg_temp_free_i64(nf);
2899 tcg_temp_free_i64(vf);
2900
2901 tcg_temp_free_i64(zero);
2902 } else {
2903 TCGv_i32 frn, frm, dest;
2904 TCGv_i32 tmp, zero;
2905
2906 zero = tcg_const_i32(0);
2907
2908 frn = tcg_temp_new_i32();
2909 frm = tcg_temp_new_i32();
2910 dest = tcg_temp_new_i32();
2911 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2912 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2913 switch (cc) {
2914 case 0: /* eq: Z */
2915 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2916 frn, frm);
2917 break;
2918 case 1: /* vs: V */
2919 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2920 frn, frm);
2921 break;
2922 case 2: /* ge: N == V -> N ^ V == 0 */
2923 tmp = tcg_temp_new_i32();
2924 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2925 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2926 frn, frm);
2927 tcg_temp_free_i32(tmp);
2928 break;
2929 case 3: /* gt: !Z && N == V */
2930 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2931 frn, frm);
2932 tmp = tcg_temp_new_i32();
2933 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2934 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2935 dest, frm);
2936 tcg_temp_free_i32(tmp);
2937 break;
2938 }
2939 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2940 tcg_temp_free_i32(frn);
2941 tcg_temp_free_i32(frm);
2942 tcg_temp_free_i32(dest);
2943
2944 tcg_temp_free_i32(zero);
2945 }
2946
2947 return 0;
2948}
2949
40cfacdd
WN
2950static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2951 uint32_t rm, uint32_t dp)
2952{
2953 uint32_t vmin = extract32(insn, 6, 1);
2954 TCGv_ptr fpst = get_fpstatus_ptr(0);
2955
2956 if (dp) {
2957 TCGv_i64 frn, frm, dest;
2958
2959 frn = tcg_temp_new_i64();
2960 frm = tcg_temp_new_i64();
2961 dest = tcg_temp_new_i64();
2962
2963 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2964 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2965 if (vmin) {
f71a2ae5 2966 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2967 } else {
f71a2ae5 2968 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2969 }
2970 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2971 tcg_temp_free_i64(frn);
2972 tcg_temp_free_i64(frm);
2973 tcg_temp_free_i64(dest);
2974 } else {
2975 TCGv_i32 frn, frm, dest;
2976
2977 frn = tcg_temp_new_i32();
2978 frm = tcg_temp_new_i32();
2979 dest = tcg_temp_new_i32();
2980
2981 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2982 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2983 if (vmin) {
f71a2ae5 2984 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2985 } else {
f71a2ae5 2986 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2987 }
2988 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2989 tcg_temp_free_i32(frn);
2990 tcg_temp_free_i32(frm);
2991 tcg_temp_free_i32(dest);
2992 }
2993
2994 tcg_temp_free_ptr(fpst);
2995 return 0;
2996}
2997
7655f39b
WN
2998static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2999 int rounding)
3000{
3001 TCGv_ptr fpst = get_fpstatus_ptr(0);
3002 TCGv_i32 tcg_rmode;
3003
3004 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3006
3007 if (dp) {
3008 TCGv_i64 tcg_op;
3009 TCGv_i64 tcg_res;
3010 tcg_op = tcg_temp_new_i64();
3011 tcg_res = tcg_temp_new_i64();
3012 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3013 gen_helper_rintd(tcg_res, tcg_op, fpst);
3014 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3015 tcg_temp_free_i64(tcg_op);
3016 tcg_temp_free_i64(tcg_res);
3017 } else {
3018 TCGv_i32 tcg_op;
3019 TCGv_i32 tcg_res;
3020 tcg_op = tcg_temp_new_i32();
3021 tcg_res = tcg_temp_new_i32();
3022 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3023 gen_helper_rints(tcg_res, tcg_op, fpst);
3024 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3025 tcg_temp_free_i32(tcg_op);
3026 tcg_temp_free_i32(tcg_res);
3027 }
3028
3029 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3030 tcg_temp_free_i32(tcg_rmode);
3031
3032 tcg_temp_free_ptr(fpst);
3033 return 0;
3034}
3035
c9975a83
WN
3036static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3037 int rounding)
3038{
3039 bool is_signed = extract32(insn, 7, 1);
3040 TCGv_ptr fpst = get_fpstatus_ptr(0);
3041 TCGv_i32 tcg_rmode, tcg_shift;
3042
3043 tcg_shift = tcg_const_i32(0);
3044
3045 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3046 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3047
3048 if (dp) {
3049 TCGv_i64 tcg_double, tcg_res;
3050 TCGv_i32 tcg_tmp;
3051 /* Rd is encoded as a single precision register even when the source
3052 * is double precision.
3053 */
3054 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3055 tcg_double = tcg_temp_new_i64();
3056 tcg_res = tcg_temp_new_i64();
3057 tcg_tmp = tcg_temp_new_i32();
3058 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3059 if (is_signed) {
3060 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3061 } else {
3062 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3063 }
ecc7b3aa 3064 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3065 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3066 tcg_temp_free_i32(tcg_tmp);
3067 tcg_temp_free_i64(tcg_res);
3068 tcg_temp_free_i64(tcg_double);
3069 } else {
3070 TCGv_i32 tcg_single, tcg_res;
3071 tcg_single = tcg_temp_new_i32();
3072 tcg_res = tcg_temp_new_i32();
3073 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3074 if (is_signed) {
3075 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3076 } else {
3077 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3078 }
3079 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3080 tcg_temp_free_i32(tcg_res);
3081 tcg_temp_free_i32(tcg_single);
3082 }
3083
3084 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3085 tcg_temp_free_i32(tcg_rmode);
3086
3087 tcg_temp_free_i32(tcg_shift);
3088
3089 tcg_temp_free_ptr(fpst);
3090
3091 return 0;
3092}
7655f39b
WN
3093
3094/* Table for converting the most common AArch32 encoding of
3095 * rounding mode to arm_fprounding order (which matches the
3096 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3097 */
3098static const uint8_t fp_decode_rm[] = {
3099 FPROUNDING_TIEAWAY,
3100 FPROUNDING_TIEEVEN,
3101 FPROUNDING_POSINF,
3102 FPROUNDING_NEGINF,
3103};
3104
7dcc1f89 3105static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3106{
3107 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3108
d614a513 3109 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3110 return 1;
3111 }
3112
3113 if (dp) {
3114 VFP_DREG_D(rd, insn);
3115 VFP_DREG_N(rn, insn);
3116 VFP_DREG_M(rm, insn);
3117 } else {
3118 rd = VFP_SREG_D(insn);
3119 rn = VFP_SREG_N(insn);
3120 rm = VFP_SREG_M(insn);
3121 }
3122
3123 if ((insn & 0x0f800e50) == 0x0e000a00) {
3124 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3125 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3126 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3127 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3128 /* VRINTA, VRINTN, VRINTP, VRINTM */
3129 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3130 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3131 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3132 /* VCVTA, VCVTN, VCVTP, VCVTM */
3133 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3134 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3135 }
3136 return 1;
3137}
3138
a1c7273b 3139/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3140 (ie. an undefined instruction). */
7dcc1f89 3141static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3142{
3143 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3144 int dp, veclen;
39d5492a
PM
3145 TCGv_i32 addr;
3146 TCGv_i32 tmp;
3147 TCGv_i32 tmp2;
b7bcbe95 3148
d614a513 3149 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3150 return 1;
d614a513 3151 }
40f137e1 3152
2c7ffc41
PM
3153 /* FIXME: this access check should not take precedence over UNDEF
3154 * for invalid encodings; we will generate incorrect syndrome information
3155 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3156 */
9dbbc748 3157 if (s->fp_excp_el) {
2c7ffc41 3158 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3159 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3160 return 0;
3161 }
3162
5df8bac1 3163 if (!s->vfp_enabled) {
9ee6e8bb 3164 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3165 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3166 return 1;
3167 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3168 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3169 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3170 return 1;
a50c0f51 3171 }
40f137e1 3172 }
6a57f3eb
WN
3173
3174 if (extract32(insn, 28, 4) == 0xf) {
3175 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3176 * only used in v8 and above.
3177 */
7dcc1f89 3178 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3179 }
3180
b7bcbe95
FB
3181 dp = ((insn & 0xf00) == 0xb00);
3182 switch ((insn >> 24) & 0xf) {
3183 case 0xe:
3184 if (insn & (1 << 4)) {
3185 /* single register transfer */
b7bcbe95
FB
3186 rd = (insn >> 12) & 0xf;
3187 if (dp) {
9ee6e8bb
PB
3188 int size;
3189 int pass;
3190
3191 VFP_DREG_N(rn, insn);
3192 if (insn & 0xf)
b7bcbe95 3193 return 1;
9ee6e8bb 3194 if (insn & 0x00c00060
d614a513 3195 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3196 return 1;
d614a513 3197 }
9ee6e8bb
PB
3198
3199 pass = (insn >> 21) & 1;
3200 if (insn & (1 << 22)) {
3201 size = 0;
3202 offset = ((insn >> 5) & 3) * 8;
3203 } else if (insn & (1 << 5)) {
3204 size = 1;
3205 offset = (insn & (1 << 6)) ? 16 : 0;
3206 } else {
3207 size = 2;
3208 offset = 0;
3209 }
18c9b560 3210 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3211 /* vfp->arm */
ad69471c 3212 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3213 switch (size) {
3214 case 0:
9ee6e8bb 3215 if (offset)
ad69471c 3216 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3217 if (insn & (1 << 23))
ad69471c 3218 gen_uxtb(tmp);
9ee6e8bb 3219 else
ad69471c 3220 gen_sxtb(tmp);
9ee6e8bb
PB
3221 break;
3222 case 1:
9ee6e8bb
PB
3223 if (insn & (1 << 23)) {
3224 if (offset) {
ad69471c 3225 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3226 } else {
ad69471c 3227 gen_uxth(tmp);
9ee6e8bb
PB
3228 }
3229 } else {
3230 if (offset) {
ad69471c 3231 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3232 } else {
ad69471c 3233 gen_sxth(tmp);
9ee6e8bb
PB
3234 }
3235 }
3236 break;
3237 case 2:
9ee6e8bb
PB
3238 break;
3239 }
ad69471c 3240 store_reg(s, rd, tmp);
b7bcbe95
FB
3241 } else {
3242 /* arm->vfp */
ad69471c 3243 tmp = load_reg(s, rd);
9ee6e8bb
PB
3244 if (insn & (1 << 23)) {
3245 /* VDUP */
3246 if (size == 0) {
ad69471c 3247 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3248 } else if (size == 1) {
ad69471c 3249 gen_neon_dup_low16(tmp);
9ee6e8bb 3250 }
cbbccffc 3251 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3252 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3253 tcg_gen_mov_i32(tmp2, tmp);
3254 neon_store_reg(rn, n, tmp2);
3255 }
3256 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3257 } else {
3258 /* VMOV */
3259 switch (size) {
3260 case 0:
ad69471c 3261 tmp2 = neon_load_reg(rn, pass);
d593c48e 3262 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3263 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3264 break;
3265 case 1:
ad69471c 3266 tmp2 = neon_load_reg(rn, pass);
d593c48e 3267 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3268 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3269 break;
3270 case 2:
9ee6e8bb
PB
3271 break;
3272 }
ad69471c 3273 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3274 }
b7bcbe95 3275 }
9ee6e8bb
PB
3276 } else { /* !dp */
3277 if ((insn & 0x6f) != 0x00)
3278 return 1;
3279 rn = VFP_SREG_N(insn);
18c9b560 3280 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3281 /* vfp->arm */
3282 if (insn & (1 << 21)) {
3283 /* system register */
40f137e1 3284 rn >>= 1;
9ee6e8bb 3285
b7bcbe95 3286 switch (rn) {
40f137e1 3287 case ARM_VFP_FPSID:
4373f3ce 3288 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3289 VFP3 restricts all id registers to privileged
3290 accesses. */
3291 if (IS_USER(s)
d614a513 3292 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3293 return 1;
d614a513 3294 }
4373f3ce 3295 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3296 break;
40f137e1 3297 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3298 if (IS_USER(s))
3299 return 1;
4373f3ce 3300 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3301 break;
40f137e1
PB
3302 case ARM_VFP_FPINST:
3303 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3304 /* Not present in VFP3. */
3305 if (IS_USER(s)
d614a513 3306 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3307 return 1;
d614a513 3308 }
4373f3ce 3309 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3310 break;
40f137e1 3311 case ARM_VFP_FPSCR:
601d70b9 3312 if (rd == 15) {
4373f3ce
PB
3313 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3314 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3315 } else {
7d1b0095 3316 tmp = tcg_temp_new_i32();
4373f3ce
PB
3317 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3318 }
b7bcbe95 3319 break;
a50c0f51 3320 case ARM_VFP_MVFR2:
d614a513 3321 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3322 return 1;
3323 }
3324 /* fall through */
9ee6e8bb
PB
3325 case ARM_VFP_MVFR0:
3326 case ARM_VFP_MVFR1:
3327 if (IS_USER(s)
d614a513 3328 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3329 return 1;
d614a513 3330 }
4373f3ce 3331 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3332 break;
b7bcbe95
FB
3333 default:
3334 return 1;
3335 }
3336 } else {
3337 gen_mov_F0_vreg(0, rn);
4373f3ce 3338 tmp = gen_vfp_mrs();
b7bcbe95
FB
3339 }
3340 if (rd == 15) {
b5ff1b31 3341 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3342 gen_set_nzcv(tmp);
7d1b0095 3343 tcg_temp_free_i32(tmp);
4373f3ce
PB
3344 } else {
3345 store_reg(s, rd, tmp);
3346 }
b7bcbe95
FB
3347 } else {
3348 /* arm->vfp */
b7bcbe95 3349 if (insn & (1 << 21)) {
40f137e1 3350 rn >>= 1;
b7bcbe95
FB
3351 /* system register */
3352 switch (rn) {
40f137e1 3353 case ARM_VFP_FPSID:
9ee6e8bb
PB
3354 case ARM_VFP_MVFR0:
3355 case ARM_VFP_MVFR1:
b7bcbe95
FB
3356 /* Writes are ignored. */
3357 break;
40f137e1 3358 case ARM_VFP_FPSCR:
e4c1cfa5 3359 tmp = load_reg(s, rd);
4373f3ce 3360 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3361 tcg_temp_free_i32(tmp);
b5ff1b31 3362 gen_lookup_tb(s);
b7bcbe95 3363 break;
40f137e1 3364 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3365 if (IS_USER(s))
3366 return 1;
71b3c3de
JR
3367 /* TODO: VFP subarchitecture support.
3368 * For now, keep the EN bit only */
e4c1cfa5 3369 tmp = load_reg(s, rd);
71b3c3de 3370 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3371 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3372 gen_lookup_tb(s);
3373 break;
3374 case ARM_VFP_FPINST:
3375 case ARM_VFP_FPINST2:
23adb861
PM
3376 if (IS_USER(s)) {
3377 return 1;
3378 }
e4c1cfa5 3379 tmp = load_reg(s, rd);
4373f3ce 3380 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3381 break;
b7bcbe95
FB
3382 default:
3383 return 1;
3384 }
3385 } else {
e4c1cfa5 3386 tmp = load_reg(s, rd);
4373f3ce 3387 gen_vfp_msr(tmp);
b7bcbe95
FB
3388 gen_mov_vreg_F0(0, rn);
3389 }
3390 }
3391 }
3392 } else {
3393 /* data processing */
3394 /* The opcode is in bits 23, 21, 20 and 6. */
3395 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3396 if (dp) {
3397 if (op == 15) {
3398 /* rn is opcode */
3399 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3400 } else {
3401 /* rn is register number */
9ee6e8bb 3402 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3403 }
3404
239c20c7
WN
3405 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3406 ((rn & 0x1e) == 0x6))) {
3407 /* Integer or single/half precision destination. */
9ee6e8bb 3408 rd = VFP_SREG_D(insn);
b7bcbe95 3409 } else {
9ee6e8bb 3410 VFP_DREG_D(rd, insn);
b7bcbe95 3411 }
04595bf6 3412 if (op == 15 &&
239c20c7
WN
3413 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3414 ((rn & 0x1e) == 0x4))) {
3415 /* VCVT from int or half precision is always from S reg
3416 * regardless of dp bit. VCVT with immediate frac_bits
3417 * has same format as SREG_M.
04595bf6
PM
3418 */
3419 rm = VFP_SREG_M(insn);
b7bcbe95 3420 } else {
9ee6e8bb 3421 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3422 }
3423 } else {
9ee6e8bb 3424 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3425 if (op == 15 && rn == 15) {
3426 /* Double precision destination. */
9ee6e8bb
PB
3427 VFP_DREG_D(rd, insn);
3428 } else {
3429 rd = VFP_SREG_D(insn);
3430 }
04595bf6
PM
3431 /* NB that we implicitly rely on the encoding for the frac_bits
3432 * in VCVT of fixed to float being the same as that of an SREG_M
3433 */
9ee6e8bb 3434 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3435 }
3436
69d1fc22 3437 veclen = s->vec_len;
b7bcbe95
FB
3438 if (op == 15 && rn > 3)
3439 veclen = 0;
3440
3441 /* Shut up compiler warnings. */
3442 delta_m = 0;
3443 delta_d = 0;
3444 bank_mask = 0;
3b46e624 3445
b7bcbe95
FB
3446 if (veclen > 0) {
3447 if (dp)
3448 bank_mask = 0xc;
3449 else
3450 bank_mask = 0x18;
3451
3452 /* Figure out what type of vector operation this is. */
3453 if ((rd & bank_mask) == 0) {
3454 /* scalar */
3455 veclen = 0;
3456 } else {
3457 if (dp)
69d1fc22 3458 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3459 else
69d1fc22 3460 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3461
3462 if ((rm & bank_mask) == 0) {
3463 /* mixed scalar/vector */
3464 delta_m = 0;
3465 } else {
3466 /* vector */
3467 delta_m = delta_d;
3468 }
3469 }
3470 }
3471
3472 /* Load the initial operands. */
3473 if (op == 15) {
3474 switch (rn) {
3475 case 16:
3476 case 17:
3477 /* Integer source */
3478 gen_mov_F0_vreg(0, rm);
3479 break;
3480 case 8:
3481 case 9:
3482 /* Compare */
3483 gen_mov_F0_vreg(dp, rd);
3484 gen_mov_F1_vreg(dp, rm);
3485 break;
3486 case 10:
3487 case 11:
3488 /* Compare with zero */
3489 gen_mov_F0_vreg(dp, rd);
3490 gen_vfp_F1_ld0(dp);
3491 break;
9ee6e8bb
PB
3492 case 20:
3493 case 21:
3494 case 22:
3495 case 23:
644ad806
PB
3496 case 28:
3497 case 29:
3498 case 30:
3499 case 31:
9ee6e8bb
PB
3500 /* Source and destination the same. */
3501 gen_mov_F0_vreg(dp, rd);
3502 break;
6e0c0ed1
PM
3503 case 4:
3504 case 5:
3505 case 6:
3506 case 7:
239c20c7
WN
3507 /* VCVTB, VCVTT: only present with the halfprec extension
3508 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3509 * (we choose to UNDEF)
6e0c0ed1 3510 */
d614a513
PM
3511 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3512 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3513 return 1;
3514 }
239c20c7
WN
3515 if (!extract32(rn, 1, 1)) {
3516 /* Half precision source. */
3517 gen_mov_F0_vreg(0, rm);
3518 break;
3519 }
6e0c0ed1 3520 /* Otherwise fall through */
b7bcbe95
FB
3521 default:
3522 /* One source operand. */
3523 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3524 break;
b7bcbe95
FB
3525 }
3526 } else {
3527 /* Two source operands. */
3528 gen_mov_F0_vreg(dp, rn);
3529 gen_mov_F1_vreg(dp, rm);
3530 }
3531
3532 for (;;) {
3533 /* Perform the calculation. */
3534 switch (op) {
605a6aed
PM
3535 case 0: /* VMLA: fd + (fn * fm) */
3536 /* Note that order of inputs to the add matters for NaNs */
3537 gen_vfp_F1_mul(dp);
3538 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3539 gen_vfp_add(dp);
3540 break;
605a6aed 3541 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3542 gen_vfp_mul(dp);
605a6aed
PM
3543 gen_vfp_F1_neg(dp);
3544 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3545 gen_vfp_add(dp);
3546 break;
605a6aed
PM
3547 case 2: /* VNMLS: -fd + (fn * fm) */
3548 /* Note that it isn't valid to replace (-A + B) with (B - A)
3549 * or similar plausible looking simplifications
3550 * because this will give wrong results for NaNs.
3551 */
3552 gen_vfp_F1_mul(dp);
3553 gen_mov_F0_vreg(dp, rd);
3554 gen_vfp_neg(dp);
3555 gen_vfp_add(dp);
b7bcbe95 3556 break;
605a6aed 3557 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3558 gen_vfp_mul(dp);
605a6aed
PM
3559 gen_vfp_F1_neg(dp);
3560 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3561 gen_vfp_neg(dp);
605a6aed 3562 gen_vfp_add(dp);
b7bcbe95
FB
3563 break;
3564 case 4: /* mul: fn * fm */
3565 gen_vfp_mul(dp);
3566 break;
3567 case 5: /* nmul: -(fn * fm) */
3568 gen_vfp_mul(dp);
3569 gen_vfp_neg(dp);
3570 break;
3571 case 6: /* add: fn + fm */
3572 gen_vfp_add(dp);
3573 break;
3574 case 7: /* sub: fn - fm */
3575 gen_vfp_sub(dp);
3576 break;
3577 case 8: /* div: fn / fm */
3578 gen_vfp_div(dp);
3579 break;
da97f52c
PM
3580 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3581 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3582 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3583 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3584 /* These are fused multiply-add, and must be done as one
3585 * floating point operation with no rounding between the
3586 * multiplication and addition steps.
3587 * NB that doing the negations here as separate steps is
3588 * correct : an input NaN should come out with its sign bit
3589 * flipped if it is a negated-input.
3590 */
d614a513 3591 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3592 return 1;
3593 }
3594 if (dp) {
3595 TCGv_ptr fpst;
3596 TCGv_i64 frd;
3597 if (op & 1) {
3598 /* VFNMS, VFMS */
3599 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3600 }
3601 frd = tcg_temp_new_i64();
3602 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3603 if (op & 2) {
3604 /* VFNMA, VFNMS */
3605 gen_helper_vfp_negd(frd, frd);
3606 }
3607 fpst = get_fpstatus_ptr(0);
3608 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3609 cpu_F1d, frd, fpst);
3610 tcg_temp_free_ptr(fpst);
3611 tcg_temp_free_i64(frd);
3612 } else {
3613 TCGv_ptr fpst;
3614 TCGv_i32 frd;
3615 if (op & 1) {
3616 /* VFNMS, VFMS */
3617 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3618 }
3619 frd = tcg_temp_new_i32();
3620 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3621 if (op & 2) {
3622 gen_helper_vfp_negs(frd, frd);
3623 }
3624 fpst = get_fpstatus_ptr(0);
3625 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3626 cpu_F1s, frd, fpst);
3627 tcg_temp_free_ptr(fpst);
3628 tcg_temp_free_i32(frd);
3629 }
3630 break;
9ee6e8bb 3631 case 14: /* fconst */
d614a513
PM
3632 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3633 return 1;
3634 }
9ee6e8bb
PB
3635
3636 n = (insn << 12) & 0x80000000;
3637 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3638 if (dp) {
3639 if (i & 0x40)
3640 i |= 0x3f80;
3641 else
3642 i |= 0x4000;
3643 n |= i << 16;
4373f3ce 3644 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3645 } else {
3646 if (i & 0x40)
3647 i |= 0x780;
3648 else
3649 i |= 0x800;
3650 n |= i << 19;
5b340b51 3651 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3652 }
9ee6e8bb 3653 break;
b7bcbe95
FB
3654 case 15: /* extension space */
3655 switch (rn) {
3656 case 0: /* cpy */
3657 /* no-op */
3658 break;
3659 case 1: /* abs */
3660 gen_vfp_abs(dp);
3661 break;
3662 case 2: /* neg */
3663 gen_vfp_neg(dp);
3664 break;
3665 case 3: /* sqrt */
3666 gen_vfp_sqrt(dp);
3667 break;
239c20c7 3668 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3669 tmp = gen_vfp_mrs();
3670 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3671 if (dp) {
3672 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3673 cpu_env);
3674 } else {
3675 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3676 cpu_env);
3677 }
7d1b0095 3678 tcg_temp_free_i32(tmp);
60011498 3679 break;
239c20c7 3680 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3681 tmp = gen_vfp_mrs();
3682 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3683 if (dp) {
3684 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3685 cpu_env);
3686 } else {
3687 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3688 cpu_env);
3689 }
7d1b0095 3690 tcg_temp_free_i32(tmp);
60011498 3691 break;
239c20c7 3692 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3693 tmp = tcg_temp_new_i32();
239c20c7
WN
3694 if (dp) {
3695 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3696 cpu_env);
3697 } else {
3698 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3699 cpu_env);
3700 }
60011498
PB
3701 gen_mov_F0_vreg(0, rd);
3702 tmp2 = gen_vfp_mrs();
3703 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3704 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3705 tcg_temp_free_i32(tmp2);
60011498
PB
3706 gen_vfp_msr(tmp);
3707 break;
239c20c7 3708 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3709 tmp = tcg_temp_new_i32();
239c20c7
WN
3710 if (dp) {
3711 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3712 cpu_env);
3713 } else {
3714 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3715 cpu_env);
3716 }
60011498
PB
3717 tcg_gen_shli_i32(tmp, tmp, 16);
3718 gen_mov_F0_vreg(0, rd);
3719 tmp2 = gen_vfp_mrs();
3720 tcg_gen_ext16u_i32(tmp2, tmp2);
3721 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3722 tcg_temp_free_i32(tmp2);
60011498
PB
3723 gen_vfp_msr(tmp);
3724 break;
b7bcbe95
FB
3725 case 8: /* cmp */
3726 gen_vfp_cmp(dp);
3727 break;
3728 case 9: /* cmpe */
3729 gen_vfp_cmpe(dp);
3730 break;
3731 case 10: /* cmpz */
3732 gen_vfp_cmp(dp);
3733 break;
3734 case 11: /* cmpez */
3735 gen_vfp_F1_ld0(dp);
3736 gen_vfp_cmpe(dp);
3737 break;
664c6733
WN
3738 case 12: /* vrintr */
3739 {
3740 TCGv_ptr fpst = get_fpstatus_ptr(0);
3741 if (dp) {
3742 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3743 } else {
3744 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3745 }
3746 tcg_temp_free_ptr(fpst);
3747 break;
3748 }
a290c62a
WN
3749 case 13: /* vrintz */
3750 {
3751 TCGv_ptr fpst = get_fpstatus_ptr(0);
3752 TCGv_i32 tcg_rmode;
3753 tcg_rmode = tcg_const_i32(float_round_to_zero);
3754 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3755 if (dp) {
3756 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3757 } else {
3758 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3759 }
3760 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3761 tcg_temp_free_i32(tcg_rmode);
3762 tcg_temp_free_ptr(fpst);
3763 break;
3764 }
4e82bc01
WN
3765 case 14: /* vrintx */
3766 {
3767 TCGv_ptr fpst = get_fpstatus_ptr(0);
3768 if (dp) {
3769 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3770 } else {
3771 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3772 }
3773 tcg_temp_free_ptr(fpst);
3774 break;
3775 }
b7bcbe95
FB
3776 case 15: /* single<->double conversion */
3777 if (dp)
4373f3ce 3778 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3779 else
4373f3ce 3780 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3781 break;
3782 case 16: /* fuito */
5500b06c 3783 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3784 break;
3785 case 17: /* fsito */
5500b06c 3786 gen_vfp_sito(dp, 0);
b7bcbe95 3787 break;
9ee6e8bb 3788 case 20: /* fshto */
d614a513
PM
3789 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3790 return 1;
3791 }
5500b06c 3792 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3793 break;
3794 case 21: /* fslto */
d614a513
PM
3795 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3796 return 1;
3797 }
5500b06c 3798 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3799 break;
3800 case 22: /* fuhto */
d614a513
PM
3801 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3802 return 1;
3803 }
5500b06c 3804 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3805 break;
3806 case 23: /* fulto */
d614a513
PM
3807 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3808 return 1;
3809 }
5500b06c 3810 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3811 break;
b7bcbe95 3812 case 24: /* ftoui */
5500b06c 3813 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3814 break;
3815 case 25: /* ftouiz */
5500b06c 3816 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3817 break;
3818 case 26: /* ftosi */
5500b06c 3819 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3820 break;
3821 case 27: /* ftosiz */
5500b06c 3822 gen_vfp_tosiz(dp, 0);
b7bcbe95 3823 break;
9ee6e8bb 3824 case 28: /* ftosh */
d614a513
PM
3825 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3826 return 1;
3827 }
5500b06c 3828 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3829 break;
3830 case 29: /* ftosl */
d614a513
PM
3831 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3832 return 1;
3833 }
5500b06c 3834 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3835 break;
3836 case 30: /* ftouh */
d614a513
PM
3837 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3838 return 1;
3839 }
5500b06c 3840 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3841 break;
3842 case 31: /* ftoul */
d614a513
PM
3843 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3844 return 1;
3845 }
5500b06c 3846 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3847 break;
b7bcbe95 3848 default: /* undefined */
b7bcbe95
FB
3849 return 1;
3850 }
3851 break;
3852 default: /* undefined */
b7bcbe95
FB
3853 return 1;
3854 }
3855
3856 /* Write back the result. */
239c20c7
WN
3857 if (op == 15 && (rn >= 8 && rn <= 11)) {
3858 /* Comparison, do nothing. */
3859 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3860 (rn & 0x1e) == 0x6)) {
3861 /* VCVT double to int: always integer result.
3862 * VCVT double to half precision is always a single
3863 * precision result.
3864 */
b7bcbe95 3865 gen_mov_vreg_F0(0, rd);
239c20c7 3866 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3867 /* conversion */
3868 gen_mov_vreg_F0(!dp, rd);
239c20c7 3869 } else {
b7bcbe95 3870 gen_mov_vreg_F0(dp, rd);
239c20c7 3871 }
b7bcbe95
FB
3872
3873 /* break out of the loop if we have finished */
3874 if (veclen == 0)
3875 break;
3876
3877 if (op == 15 && delta_m == 0) {
3878 /* single source one-many */
3879 while (veclen--) {
3880 rd = ((rd + delta_d) & (bank_mask - 1))
3881 | (rd & bank_mask);
3882 gen_mov_vreg_F0(dp, rd);
3883 }
3884 break;
3885 }
3886 /* Setup the next operands. */
3887 veclen--;
3888 rd = ((rd + delta_d) & (bank_mask - 1))
3889 | (rd & bank_mask);
3890
3891 if (op == 15) {
3892 /* One source operand. */
3893 rm = ((rm + delta_m) & (bank_mask - 1))
3894 | (rm & bank_mask);
3895 gen_mov_F0_vreg(dp, rm);
3896 } else {
3897 /* Two source operands. */
3898 rn = ((rn + delta_d) & (bank_mask - 1))
3899 | (rn & bank_mask);
3900 gen_mov_F0_vreg(dp, rn);
3901 if (delta_m) {
3902 rm = ((rm + delta_m) & (bank_mask - 1))
3903 | (rm & bank_mask);
3904 gen_mov_F1_vreg(dp, rm);
3905 }
3906 }
3907 }
3908 }
3909 break;
3910 case 0xc:
3911 case 0xd:
8387da81 3912 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3913 /* two-register transfer */
3914 rn = (insn >> 16) & 0xf;
3915 rd = (insn >> 12) & 0xf;
3916 if (dp) {
9ee6e8bb
PB
3917 VFP_DREG_M(rm, insn);
3918 } else {
3919 rm = VFP_SREG_M(insn);
3920 }
b7bcbe95 3921
18c9b560 3922 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3923 /* vfp->arm */
3924 if (dp) {
4373f3ce
PB
3925 gen_mov_F0_vreg(0, rm * 2);
3926 tmp = gen_vfp_mrs();
3927 store_reg(s, rd, tmp);
3928 gen_mov_F0_vreg(0, rm * 2 + 1);
3929 tmp = gen_vfp_mrs();
3930 store_reg(s, rn, tmp);
b7bcbe95
FB
3931 } else {
3932 gen_mov_F0_vreg(0, rm);
4373f3ce 3933 tmp = gen_vfp_mrs();
8387da81 3934 store_reg(s, rd, tmp);
b7bcbe95 3935 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3936 tmp = gen_vfp_mrs();
8387da81 3937 store_reg(s, rn, tmp);
b7bcbe95
FB
3938 }
3939 } else {
3940 /* arm->vfp */
3941 if (dp) {
4373f3ce
PB
3942 tmp = load_reg(s, rd);
3943 gen_vfp_msr(tmp);
3944 gen_mov_vreg_F0(0, rm * 2);
3945 tmp = load_reg(s, rn);
3946 gen_vfp_msr(tmp);
3947 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3948 } else {
8387da81 3949 tmp = load_reg(s, rd);
4373f3ce 3950 gen_vfp_msr(tmp);
b7bcbe95 3951 gen_mov_vreg_F0(0, rm);
8387da81 3952 tmp = load_reg(s, rn);
4373f3ce 3953 gen_vfp_msr(tmp);
b7bcbe95
FB
3954 gen_mov_vreg_F0(0, rm + 1);
3955 }
3956 }
3957 } else {
3958 /* Load/store */
3959 rn = (insn >> 16) & 0xf;
3960 if (dp)
9ee6e8bb 3961 VFP_DREG_D(rd, insn);
b7bcbe95 3962 else
9ee6e8bb 3963 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3964 if ((insn & 0x01200000) == 0x01000000) {
3965 /* Single load/store */
3966 offset = (insn & 0xff) << 2;
3967 if ((insn & (1 << 23)) == 0)
3968 offset = -offset;
934814f1
PM
3969 if (s->thumb && rn == 15) {
3970 /* This is actually UNPREDICTABLE */
3971 addr = tcg_temp_new_i32();
3972 tcg_gen_movi_i32(addr, s->pc & ~2);
3973 } else {
3974 addr = load_reg(s, rn);
3975 }
312eea9f 3976 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3977 if (insn & (1 << 20)) {
312eea9f 3978 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3979 gen_mov_vreg_F0(dp, rd);
3980 } else {
3981 gen_mov_F0_vreg(dp, rd);
312eea9f 3982 gen_vfp_st(s, dp, addr);
b7bcbe95 3983 }
7d1b0095 3984 tcg_temp_free_i32(addr);
b7bcbe95
FB
3985 } else {
3986 /* load/store multiple */
934814f1 3987 int w = insn & (1 << 21);
b7bcbe95
FB
3988 if (dp)
3989 n = (insn >> 1) & 0x7f;
3990 else
3991 n = insn & 0xff;
3992
934814f1
PM
3993 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3994 /* P == U , W == 1 => UNDEF */
3995 return 1;
3996 }
3997 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3998 /* UNPREDICTABLE cases for bad immediates: we choose to
3999 * UNDEF to avoid generating huge numbers of TCG ops
4000 */
4001 return 1;
4002 }
4003 if (rn == 15 && w) {
4004 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4005 return 1;
4006 }
4007
4008 if (s->thumb && rn == 15) {
4009 /* This is actually UNPREDICTABLE */
4010 addr = tcg_temp_new_i32();
4011 tcg_gen_movi_i32(addr, s->pc & ~2);
4012 } else {
4013 addr = load_reg(s, rn);
4014 }
b7bcbe95 4015 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4016 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4017
4018 if (dp)
4019 offset = 8;
4020 else
4021 offset = 4;
4022 for (i = 0; i < n; i++) {
18c9b560 4023 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4024 /* load */
312eea9f 4025 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4026 gen_mov_vreg_F0(dp, rd + i);
4027 } else {
4028 /* store */
4029 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4030 gen_vfp_st(s, dp, addr);
b7bcbe95 4031 }
312eea9f 4032 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4033 }
934814f1 4034 if (w) {
b7bcbe95
FB
4035 /* writeback */
4036 if (insn & (1 << 24))
4037 offset = -offset * n;
4038 else if (dp && (insn & 1))
4039 offset = 4;
4040 else
4041 offset = 0;
4042
4043 if (offset != 0)
312eea9f
FN
4044 tcg_gen_addi_i32(addr, addr, offset);
4045 store_reg(s, rn, addr);
4046 } else {
7d1b0095 4047 tcg_temp_free_i32(addr);
b7bcbe95
FB
4048 }
4049 }
4050 }
4051 break;
4052 default:
4053 /* Should never happen. */
4054 return 1;
4055 }
4056 return 0;
4057}
4058
90aa39a1 4059static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4060{
90aa39a1
SF
4061#ifndef CONFIG_USER_ONLY
4062 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4063 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4064#else
4065 return true;
4066#endif
4067}
6e256c93 4068
90aa39a1
SF
4069static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4070{
4071 if (use_goto_tb(s, dest)) {
57fec1fe 4072 tcg_gen_goto_tb(n);
eaed129d 4073 gen_set_pc_im(s, dest);
90aa39a1 4074 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4075 } else {
eaed129d 4076 gen_set_pc_im(s, dest);
57fec1fe 4077 tcg_gen_exit_tb(0);
6e256c93 4078 }
c53be334
FB
4079}
4080
8aaca4c0
FB
4081static inline void gen_jmp (DisasContext *s, uint32_t dest)
4082{
50225ad0 4083 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 4084 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4085 if (s->thumb)
d9ba4830
PB
4086 dest |= 1;
4087 gen_bx_im(s, dest);
8aaca4c0 4088 } else {
6e256c93 4089 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4090 s->is_jmp = DISAS_TB_JUMP;
4091 }
4092}
4093
39d5492a 4094static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4095{
ee097184 4096 if (x)
d9ba4830 4097 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4098 else
d9ba4830 4099 gen_sxth(t0);
ee097184 4100 if (y)
d9ba4830 4101 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4102 else
d9ba4830
PB
4103 gen_sxth(t1);
4104 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4105}
4106
4107/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4108static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4109{
b5ff1b31
FB
4110 uint32_t mask;
4111
4112 mask = 0;
4113 if (flags & (1 << 0))
4114 mask |= 0xff;
4115 if (flags & (1 << 1))
4116 mask |= 0xff00;
4117 if (flags & (1 << 2))
4118 mask |= 0xff0000;
4119 if (flags & (1 << 3))
4120 mask |= 0xff000000;
9ee6e8bb 4121
2ae23e75 4122 /* Mask out undefined bits. */
9ee6e8bb 4123 mask &= ~CPSR_RESERVED;
d614a513 4124 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4125 mask &= ~CPSR_T;
d614a513
PM
4126 }
4127 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4128 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4129 }
4130 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4131 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4132 }
4133 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4134 mask &= ~CPSR_IT;
d614a513 4135 }
4051e12c
PM
4136 /* Mask out execution state and reserved bits. */
4137 if (!spsr) {
4138 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4139 }
b5ff1b31
FB
4140 /* Mask out privileged bits. */
4141 if (IS_USER(s))
9ee6e8bb 4142 mask &= CPSR_USER;
b5ff1b31
FB
4143 return mask;
4144}
4145
2fbac54b 4146/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4147static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4148{
39d5492a 4149 TCGv_i32 tmp;
b5ff1b31
FB
4150 if (spsr) {
4151 /* ??? This is also undefined in system mode. */
4152 if (IS_USER(s))
4153 return 1;
d9ba4830
PB
4154
4155 tmp = load_cpu_field(spsr);
4156 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4157 tcg_gen_andi_i32(t0, t0, mask);
4158 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4159 store_cpu_field(tmp, spsr);
b5ff1b31 4160 } else {
2fbac54b 4161 gen_set_cpsr(t0, mask);
b5ff1b31 4162 }
7d1b0095 4163 tcg_temp_free_i32(t0);
b5ff1b31
FB
4164 gen_lookup_tb(s);
4165 return 0;
4166}
4167
2fbac54b
FN
4168/* Returns nonzero if access to the PSR is not permitted. */
4169static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4170{
39d5492a 4171 TCGv_i32 tmp;
7d1b0095 4172 tmp = tcg_temp_new_i32();
2fbac54b
FN
4173 tcg_gen_movi_i32(tmp, val);
4174 return gen_set_psr(s, mask, spsr, tmp);
4175}
4176
8bfd0550
PM
4177static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4178 int *tgtmode, int *regno)
4179{
4180 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4181 * the target mode and register number, and identify the various
4182 * unpredictable cases.
4183 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4184 * + executed in user mode
4185 * + using R15 as the src/dest register
4186 * + accessing an unimplemented register
4187 * + accessing a register that's inaccessible at current PL/security state*
4188 * + accessing a register that you could access with a different insn
4189 * We choose to UNDEF in all these cases.
4190 * Since we don't know which of the various AArch32 modes we are in
4191 * we have to defer some checks to runtime.
4192 * Accesses to Monitor mode registers from Secure EL1 (which implies
4193 * that EL3 is AArch64) must trap to EL3.
4194 *
4195 * If the access checks fail this function will emit code to take
4196 * an exception and return false. Otherwise it will return true,
4197 * and set *tgtmode and *regno appropriately.
4198 */
4199 int exc_target = default_exception_el(s);
4200
4201 /* These instructions are present only in ARMv8, or in ARMv7 with the
4202 * Virtualization Extensions.
4203 */
4204 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4205 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4206 goto undef;
4207 }
4208
4209 if (IS_USER(s) || rn == 15) {
4210 goto undef;
4211 }
4212
4213 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4214 * of registers into (r, sysm).
4215 */
4216 if (r) {
4217 /* SPSRs for other modes */
4218 switch (sysm) {
4219 case 0xe: /* SPSR_fiq */
4220 *tgtmode = ARM_CPU_MODE_FIQ;
4221 break;
4222 case 0x10: /* SPSR_irq */
4223 *tgtmode = ARM_CPU_MODE_IRQ;
4224 break;
4225 case 0x12: /* SPSR_svc */
4226 *tgtmode = ARM_CPU_MODE_SVC;
4227 break;
4228 case 0x14: /* SPSR_abt */
4229 *tgtmode = ARM_CPU_MODE_ABT;
4230 break;
4231 case 0x16: /* SPSR_und */
4232 *tgtmode = ARM_CPU_MODE_UND;
4233 break;
4234 case 0x1c: /* SPSR_mon */
4235 *tgtmode = ARM_CPU_MODE_MON;
4236 break;
4237 case 0x1e: /* SPSR_hyp */
4238 *tgtmode = ARM_CPU_MODE_HYP;
4239 break;
4240 default: /* unallocated */
4241 goto undef;
4242 }
4243 /* We arbitrarily assign SPSR a register number of 16. */
4244 *regno = 16;
4245 } else {
4246 /* general purpose registers for other modes */
4247 switch (sysm) {
4248 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4249 *tgtmode = ARM_CPU_MODE_USR;
4250 *regno = sysm + 8;
4251 break;
4252 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4253 *tgtmode = ARM_CPU_MODE_FIQ;
4254 *regno = sysm;
4255 break;
4256 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4257 *tgtmode = ARM_CPU_MODE_IRQ;
4258 *regno = sysm & 1 ? 13 : 14;
4259 break;
4260 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4261 *tgtmode = ARM_CPU_MODE_SVC;
4262 *regno = sysm & 1 ? 13 : 14;
4263 break;
4264 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4265 *tgtmode = ARM_CPU_MODE_ABT;
4266 *regno = sysm & 1 ? 13 : 14;
4267 break;
4268 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4269 *tgtmode = ARM_CPU_MODE_UND;
4270 *regno = sysm & 1 ? 13 : 14;
4271 break;
4272 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4273 *tgtmode = ARM_CPU_MODE_MON;
4274 *regno = sysm & 1 ? 13 : 14;
4275 break;
4276 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4277 *tgtmode = ARM_CPU_MODE_HYP;
4278 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4279 *regno = sysm & 1 ? 13 : 17;
4280 break;
4281 default: /* unallocated */
4282 goto undef;
4283 }
4284 }
4285
4286 /* Catch the 'accessing inaccessible register' cases we can detect
4287 * at translate time.
4288 */
4289 switch (*tgtmode) {
4290 case ARM_CPU_MODE_MON:
4291 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4292 goto undef;
4293 }
4294 if (s->current_el == 1) {
4295 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4296 * then accesses to Mon registers trap to EL3
4297 */
4298 exc_target = 3;
4299 goto undef;
4300 }
4301 break;
4302 case ARM_CPU_MODE_HYP:
4303 /* Note that we can forbid accesses from EL2 here because they
4304 * must be from Hyp mode itself
4305 */
4306 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4307 goto undef;
4308 }
4309 break;
4310 default:
4311 break;
4312 }
4313
4314 return true;
4315
4316undef:
4317 /* If we get here then some access check did not pass */
4318 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4319 return false;
4320}
4321
4322static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4323{
4324 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4325 int tgtmode = 0, regno = 0;
4326
4327 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4328 return;
4329 }
4330
4331 /* Sync state because msr_banked() can raise exceptions */
4332 gen_set_condexec(s);
4333 gen_set_pc_im(s, s->pc - 4);
4334 tcg_reg = load_reg(s, rn);
4335 tcg_tgtmode = tcg_const_i32(tgtmode);
4336 tcg_regno = tcg_const_i32(regno);
4337 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4338 tcg_temp_free_i32(tcg_tgtmode);
4339 tcg_temp_free_i32(tcg_regno);
4340 tcg_temp_free_i32(tcg_reg);
4341 s->is_jmp = DISAS_UPDATE;
4342}
4343
4344static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4345{
4346 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4347 int tgtmode = 0, regno = 0;
4348
4349 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4350 return;
4351 }
4352
4353 /* Sync state because mrs_banked() can raise exceptions */
4354 gen_set_condexec(s);
4355 gen_set_pc_im(s, s->pc - 4);
4356 tcg_reg = tcg_temp_new_i32();
4357 tcg_tgtmode = tcg_const_i32(tgtmode);
4358 tcg_regno = tcg_const_i32(regno);
4359 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4360 tcg_temp_free_i32(tcg_tgtmode);
4361 tcg_temp_free_i32(tcg_regno);
4362 store_reg(s, rn, tcg_reg);
4363 s->is_jmp = DISAS_UPDATE;
4364}
4365
fb0e8e79
PM
4366/* Store value to PC as for an exception return (ie don't
4367 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4368 * will do the masking based on the new value of the Thumb bit.
4369 */
4370static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4371{
fb0e8e79
PM
4372 tcg_gen_mov_i32(cpu_R[15], pc);
4373 tcg_temp_free_i32(pc);
b5ff1b31
FB
4374}
4375
b0109805 4376/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4377static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4378{
fb0e8e79
PM
4379 store_pc_exc_ret(s, pc);
4380 /* The cpsr_write_eret helper will mask the low bits of PC
4381 * appropriately depending on the new Thumb bit, so it must
4382 * be called after storing the new PC.
4383 */
235ea1f5 4384 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4385 tcg_temp_free_i32(cpsr);
577bf808 4386 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4387}
3b46e624 4388
fb0e8e79
PM
4389/* Generate an old-style exception return. Marks pc as dead. */
4390static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4391{
4392 gen_rfe(s, pc, load_cpu_field(spsr));
4393}
4394
9ee6e8bb
PB
4395static void gen_nop_hint(DisasContext *s, int val)
4396{
4397 switch (val) {
c87e5a61
PM
4398 case 1: /* yield */
4399 gen_set_pc_im(s, s->pc);
4400 s->is_jmp = DISAS_YIELD;
4401 break;
9ee6e8bb 4402 case 3: /* wfi */
eaed129d 4403 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4404 s->is_jmp = DISAS_WFI;
4405 break;
4406 case 2: /* wfe */
72c1d3af
PM
4407 gen_set_pc_im(s, s->pc);
4408 s->is_jmp = DISAS_WFE;
4409 break;
9ee6e8bb 4410 case 4: /* sev */
12b10571
MR
4411 case 5: /* sevl */
4412 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4413 default: /* nop */
4414 break;
4415 }
4416}
99c475ab 4417
ad69471c 4418#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4419
39d5492a 4420static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4421{
4422 switch (size) {
dd8fbd78
FN
4423 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4424 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4425 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4426 default: abort();
9ee6e8bb 4427 }
9ee6e8bb
PB
4428}
4429
39d5492a 4430static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4431{
4432 switch (size) {
dd8fbd78
FN
4433 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4434 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4435 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4436 default: return;
4437 }
4438}
4439
4440/* 32-bit pairwise ops end up the same as the elementwise versions. */
4441#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4442#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4443#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4444#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4445
ad69471c
PB
4446#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4447 switch ((size << 1) | u) { \
4448 case 0: \
dd8fbd78 4449 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4450 break; \
4451 case 1: \
dd8fbd78 4452 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4453 break; \
4454 case 2: \
dd8fbd78 4455 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4456 break; \
4457 case 3: \
dd8fbd78 4458 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4459 break; \
4460 case 4: \
dd8fbd78 4461 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4462 break; \
4463 case 5: \
dd8fbd78 4464 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4465 break; \
4466 default: return 1; \
4467 }} while (0)
9ee6e8bb
PB
4468
4469#define GEN_NEON_INTEGER_OP(name) do { \
4470 switch ((size << 1) | u) { \
ad69471c 4471 case 0: \
dd8fbd78 4472 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4473 break; \
4474 case 1: \
dd8fbd78 4475 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4476 break; \
4477 case 2: \
dd8fbd78 4478 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4479 break; \
4480 case 3: \
dd8fbd78 4481 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4482 break; \
4483 case 4: \
dd8fbd78 4484 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4485 break; \
4486 case 5: \
dd8fbd78 4487 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4488 break; \
9ee6e8bb
PB
4489 default: return 1; \
4490 }} while (0)
4491
39d5492a 4492static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4493{
39d5492a 4494 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4495 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4496 return tmp;
9ee6e8bb
PB
4497}
4498
39d5492a 4499static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4500{
dd8fbd78 4501 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4502 tcg_temp_free_i32(var);
9ee6e8bb
PB
4503}
4504
39d5492a 4505static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4506{
39d5492a 4507 TCGv_i32 tmp;
9ee6e8bb 4508 if (size == 1) {
0fad6efc
PM
4509 tmp = neon_load_reg(reg & 7, reg >> 4);
4510 if (reg & 8) {
dd8fbd78 4511 gen_neon_dup_high16(tmp);
0fad6efc
PM
4512 } else {
4513 gen_neon_dup_low16(tmp);
dd8fbd78 4514 }
0fad6efc
PM
4515 } else {
4516 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4517 }
dd8fbd78 4518 return tmp;
9ee6e8bb
PB
4519}
4520
02acedf9 4521static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4522{
39d5492a 4523 TCGv_i32 tmp, tmp2;
600b828c 4524 if (!q && size == 2) {
02acedf9
PM
4525 return 1;
4526 }
4527 tmp = tcg_const_i32(rd);
4528 tmp2 = tcg_const_i32(rm);
4529 if (q) {
4530 switch (size) {
4531 case 0:
02da0b2d 4532 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4533 break;
4534 case 1:
02da0b2d 4535 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4536 break;
4537 case 2:
02da0b2d 4538 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4539 break;
4540 default:
4541 abort();
4542 }
4543 } else {
4544 switch (size) {
4545 case 0:
02da0b2d 4546 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4547 break;
4548 case 1:
02da0b2d 4549 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4550 break;
4551 default:
4552 abort();
4553 }
4554 }
4555 tcg_temp_free_i32(tmp);
4556 tcg_temp_free_i32(tmp2);
4557 return 0;
19457615
FN
4558}
4559
d68a6f3a 4560static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4561{
39d5492a 4562 TCGv_i32 tmp, tmp2;
600b828c 4563 if (!q && size == 2) {
d68a6f3a
PM
4564 return 1;
4565 }
4566 tmp = tcg_const_i32(rd);
4567 tmp2 = tcg_const_i32(rm);
4568 if (q) {
4569 switch (size) {
4570 case 0:
02da0b2d 4571 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4572 break;
4573 case 1:
02da0b2d 4574 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4575 break;
4576 case 2:
02da0b2d 4577 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4578 break;
4579 default:
4580 abort();
4581 }
4582 } else {
4583 switch (size) {
4584 case 0:
02da0b2d 4585 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4586 break;
4587 case 1:
02da0b2d 4588 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4589 break;
4590 default:
4591 abort();
4592 }
4593 }
4594 tcg_temp_free_i32(tmp);
4595 tcg_temp_free_i32(tmp2);
4596 return 0;
19457615
FN
4597}
4598
39d5492a 4599static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4600{
39d5492a 4601 TCGv_i32 rd, tmp;
19457615 4602
7d1b0095
PM
4603 rd = tcg_temp_new_i32();
4604 tmp = tcg_temp_new_i32();
19457615
FN
4605
4606 tcg_gen_shli_i32(rd, t0, 8);
4607 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4608 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4609 tcg_gen_or_i32(rd, rd, tmp);
4610
4611 tcg_gen_shri_i32(t1, t1, 8);
4612 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4613 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4614 tcg_gen_or_i32(t1, t1, tmp);
4615 tcg_gen_mov_i32(t0, rd);
4616
7d1b0095
PM
4617 tcg_temp_free_i32(tmp);
4618 tcg_temp_free_i32(rd);
19457615
FN
4619}
4620
39d5492a 4621static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4622{
39d5492a 4623 TCGv_i32 rd, tmp;
19457615 4624
7d1b0095
PM
4625 rd = tcg_temp_new_i32();
4626 tmp = tcg_temp_new_i32();
19457615
FN
4627
4628 tcg_gen_shli_i32(rd, t0, 16);
4629 tcg_gen_andi_i32(tmp, t1, 0xffff);
4630 tcg_gen_or_i32(rd, rd, tmp);
4631 tcg_gen_shri_i32(t1, t1, 16);
4632 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4633 tcg_gen_or_i32(t1, t1, tmp);
4634 tcg_gen_mov_i32(t0, rd);
4635
7d1b0095
PM
4636 tcg_temp_free_i32(tmp);
4637 tcg_temp_free_i32(rd);
19457615
FN
4638}
4639
4640
9ee6e8bb
PB
4641static struct {
4642 int nregs;
4643 int interleave;
4644 int spacing;
4645} neon_ls_element_type[11] = {
4646 {4, 4, 1},
4647 {4, 4, 2},
4648 {4, 1, 1},
4649 {4, 2, 1},
4650 {3, 3, 1},
4651 {3, 3, 2},
4652 {3, 1, 1},
4653 {1, 1, 1},
4654 {2, 2, 1},
4655 {2, 2, 2},
4656 {2, 1, 1}
4657};
4658
4659/* Translate a NEON load/store element instruction. Return nonzero if the
4660 instruction is invalid. */
7dcc1f89 4661static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4662{
4663 int rd, rn, rm;
4664 int op;
4665 int nregs;
4666 int interleave;
84496233 4667 int spacing;
9ee6e8bb
PB
4668 int stride;
4669 int size;
4670 int reg;
4671 int pass;
4672 int load;
4673 int shift;
9ee6e8bb 4674 int n;
39d5492a
PM
4675 TCGv_i32 addr;
4676 TCGv_i32 tmp;
4677 TCGv_i32 tmp2;
84496233 4678 TCGv_i64 tmp64;
9ee6e8bb 4679
2c7ffc41
PM
4680 /* FIXME: this access check should not take precedence over UNDEF
4681 * for invalid encodings; we will generate incorrect syndrome information
4682 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4683 */
9dbbc748 4684 if (s->fp_excp_el) {
2c7ffc41 4685 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4686 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4687 return 0;
4688 }
4689
5df8bac1 4690 if (!s->vfp_enabled)
9ee6e8bb
PB
4691 return 1;
4692 VFP_DREG_D(rd, insn);
4693 rn = (insn >> 16) & 0xf;
4694 rm = insn & 0xf;
4695 load = (insn & (1 << 21)) != 0;
4696 if ((insn & (1 << 23)) == 0) {
4697 /* Load store all elements. */
4698 op = (insn >> 8) & 0xf;
4699 size = (insn >> 6) & 3;
84496233 4700 if (op > 10)
9ee6e8bb 4701 return 1;
f2dd89d0
PM
4702 /* Catch UNDEF cases for bad values of align field */
4703 switch (op & 0xc) {
4704 case 4:
4705 if (((insn >> 5) & 1) == 1) {
4706 return 1;
4707 }
4708 break;
4709 case 8:
4710 if (((insn >> 4) & 3) == 3) {
4711 return 1;
4712 }
4713 break;
4714 default:
4715 break;
4716 }
9ee6e8bb
PB
4717 nregs = neon_ls_element_type[op].nregs;
4718 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4719 spacing = neon_ls_element_type[op].spacing;
4720 if (size == 3 && (interleave | spacing) != 1)
4721 return 1;
e318a60b 4722 addr = tcg_temp_new_i32();
dcc65026 4723 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4724 stride = (1 << size) * interleave;
4725 for (reg = 0; reg < nregs; reg++) {
4726 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4727 load_reg_var(s, addr, rn);
4728 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4729 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4730 load_reg_var(s, addr, rn);
4731 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4732 }
84496233 4733 if (size == 3) {
8ed1237d 4734 tmp64 = tcg_temp_new_i64();
84496233 4735 if (load) {
12dcc321 4736 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4737 neon_store_reg64(tmp64, rd);
84496233 4738 } else {
84496233 4739 neon_load_reg64(tmp64, rd);
12dcc321 4740 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4741 }
8ed1237d 4742 tcg_temp_free_i64(tmp64);
84496233
JR
4743 tcg_gen_addi_i32(addr, addr, stride);
4744 } else {
4745 for (pass = 0; pass < 2; pass++) {
4746 if (size == 2) {
4747 if (load) {
58ab8e96 4748 tmp = tcg_temp_new_i32();
12dcc321 4749 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4750 neon_store_reg(rd, pass, tmp);
4751 } else {
4752 tmp = neon_load_reg(rd, pass);
12dcc321 4753 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4754 tcg_temp_free_i32(tmp);
84496233 4755 }
1b2b1e54 4756 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4757 } else if (size == 1) {
4758 if (load) {
58ab8e96 4759 tmp = tcg_temp_new_i32();
12dcc321 4760 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4761 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4762 tmp2 = tcg_temp_new_i32();
12dcc321 4763 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4764 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4765 tcg_gen_shli_i32(tmp2, tmp2, 16);
4766 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4767 tcg_temp_free_i32(tmp2);
84496233
JR
4768 neon_store_reg(rd, pass, tmp);
4769 } else {
4770 tmp = neon_load_reg(rd, pass);
7d1b0095 4771 tmp2 = tcg_temp_new_i32();
84496233 4772 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4773 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4774 tcg_temp_free_i32(tmp);
84496233 4775 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4776 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4777 tcg_temp_free_i32(tmp2);
1b2b1e54 4778 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4779 }
84496233
JR
4780 } else /* size == 0 */ {
4781 if (load) {
39d5492a 4782 TCGV_UNUSED_I32(tmp2);
84496233 4783 for (n = 0; n < 4; n++) {
58ab8e96 4784 tmp = tcg_temp_new_i32();
12dcc321 4785 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4786 tcg_gen_addi_i32(addr, addr, stride);
4787 if (n == 0) {
4788 tmp2 = tmp;
4789 } else {
41ba8341
PB
4790 tcg_gen_shli_i32(tmp, tmp, n * 8);
4791 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4792 tcg_temp_free_i32(tmp);
84496233 4793 }
9ee6e8bb 4794 }
84496233
JR
4795 neon_store_reg(rd, pass, tmp2);
4796 } else {
4797 tmp2 = neon_load_reg(rd, pass);
4798 for (n = 0; n < 4; n++) {
7d1b0095 4799 tmp = tcg_temp_new_i32();
84496233
JR
4800 if (n == 0) {
4801 tcg_gen_mov_i32(tmp, tmp2);
4802 } else {
4803 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4804 }
12dcc321 4805 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4806 tcg_temp_free_i32(tmp);
84496233
JR
4807 tcg_gen_addi_i32(addr, addr, stride);
4808 }
7d1b0095 4809 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4810 }
4811 }
4812 }
4813 }
84496233 4814 rd += spacing;
9ee6e8bb 4815 }
e318a60b 4816 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4817 stride = nregs * 8;
4818 } else {
4819 size = (insn >> 10) & 3;
4820 if (size == 3) {
4821 /* Load single element to all lanes. */
8e18cde3
PM
4822 int a = (insn >> 4) & 1;
4823 if (!load) {
9ee6e8bb 4824 return 1;
8e18cde3 4825 }
9ee6e8bb
PB
4826 size = (insn >> 6) & 3;
4827 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4828
4829 if (size == 3) {
4830 if (nregs != 4 || a == 0) {
9ee6e8bb 4831 return 1;
99c475ab 4832 }
8e18cde3
PM
4833 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4834 size = 2;
4835 }
4836 if (nregs == 1 && a == 1 && size == 0) {
4837 return 1;
4838 }
4839 if (nregs == 3 && a == 1) {
4840 return 1;
4841 }
e318a60b 4842 addr = tcg_temp_new_i32();
8e18cde3
PM
4843 load_reg_var(s, addr, rn);
4844 if (nregs == 1) {
4845 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4846 tmp = gen_load_and_replicate(s, addr, size);
4847 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4848 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4849 if (insn & (1 << 5)) {
4850 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4851 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4852 }
4853 tcg_temp_free_i32(tmp);
4854 } else {
4855 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4856 stride = (insn & (1 << 5)) ? 2 : 1;
4857 for (reg = 0; reg < nregs; reg++) {
4858 tmp = gen_load_and_replicate(s, addr, size);
4859 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4860 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4861 tcg_temp_free_i32(tmp);
4862 tcg_gen_addi_i32(addr, addr, 1 << size);
4863 rd += stride;
4864 }
9ee6e8bb 4865 }
e318a60b 4866 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4867 stride = (1 << size) * nregs;
4868 } else {
4869 /* Single element. */
93262b16 4870 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4871 pass = (insn >> 7) & 1;
4872 switch (size) {
4873 case 0:
4874 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4875 stride = 1;
4876 break;
4877 case 1:
4878 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4879 stride = (insn & (1 << 5)) ? 2 : 1;
4880 break;
4881 case 2:
4882 shift = 0;
9ee6e8bb
PB
4883 stride = (insn & (1 << 6)) ? 2 : 1;
4884 break;
4885 default:
4886 abort();
4887 }
4888 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4889 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4890 switch (nregs) {
4891 case 1:
4892 if (((idx & (1 << size)) != 0) ||
4893 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4894 return 1;
4895 }
4896 break;
4897 case 3:
4898 if ((idx & 1) != 0) {
4899 return 1;
4900 }
4901 /* fall through */
4902 case 2:
4903 if (size == 2 && (idx & 2) != 0) {
4904 return 1;
4905 }
4906 break;
4907 case 4:
4908 if ((size == 2) && ((idx & 3) == 3)) {
4909 return 1;
4910 }
4911 break;
4912 default:
4913 abort();
4914 }
4915 if ((rd + stride * (nregs - 1)) > 31) {
4916 /* Attempts to write off the end of the register file
4917 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4918 * the neon_load_reg() would write off the end of the array.
4919 */
4920 return 1;
4921 }
e318a60b 4922 addr = tcg_temp_new_i32();
dcc65026 4923 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4924 for (reg = 0; reg < nregs; reg++) {
4925 if (load) {
58ab8e96 4926 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4927 switch (size) {
4928 case 0:
12dcc321 4929 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4930 break;
4931 case 1:
12dcc321 4932 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4933 break;
4934 case 2:
12dcc321 4935 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4936 break;
a50f5b91
PB
4937 default: /* Avoid compiler warnings. */
4938 abort();
9ee6e8bb
PB
4939 }
4940 if (size != 2) {
8f8e3aa4 4941 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4942 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4943 shift, size ? 16 : 8);
7d1b0095 4944 tcg_temp_free_i32(tmp2);
9ee6e8bb 4945 }
8f8e3aa4 4946 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4947 } else { /* Store */
8f8e3aa4
PB
4948 tmp = neon_load_reg(rd, pass);
4949 if (shift)
4950 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4951 switch (size) {
4952 case 0:
12dcc321 4953 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4954 break;
4955 case 1:
12dcc321 4956 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4957 break;
4958 case 2:
12dcc321 4959 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4960 break;
99c475ab 4961 }
58ab8e96 4962 tcg_temp_free_i32(tmp);
99c475ab 4963 }
9ee6e8bb 4964 rd += stride;
1b2b1e54 4965 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4966 }
e318a60b 4967 tcg_temp_free_i32(addr);
9ee6e8bb 4968 stride = nregs * (1 << size);
99c475ab 4969 }
9ee6e8bb
PB
4970 }
4971 if (rm != 15) {
39d5492a 4972 TCGv_i32 base;
b26eefb6
PB
4973
4974 base = load_reg(s, rn);
9ee6e8bb 4975 if (rm == 13) {
b26eefb6 4976 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4977 } else {
39d5492a 4978 TCGv_i32 index;
b26eefb6
PB
4979 index = load_reg(s, rm);
4980 tcg_gen_add_i32(base, base, index);
7d1b0095 4981 tcg_temp_free_i32(index);
9ee6e8bb 4982 }
b26eefb6 4983 store_reg(s, rn, base);
9ee6e8bb
PB
4984 }
4985 return 0;
4986}
3b46e624 4987
8f8e3aa4 4988/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4989static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4990{
4991 tcg_gen_and_i32(t, t, c);
f669df27 4992 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4993 tcg_gen_or_i32(dest, t, f);
4994}
4995
39d5492a 4996static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4997{
4998 switch (size) {
4999 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5000 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5001 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5002 default: abort();
5003 }
5004}
5005
39d5492a 5006static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5007{
5008 switch (size) {
02da0b2d
PM
5009 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5010 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5011 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5012 default: abort();
5013 }
5014}
5015
39d5492a 5016static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5017{
5018 switch (size) {
02da0b2d
PM
5019 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5020 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5021 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5022 default: abort();
5023 }
5024}
5025
39d5492a 5026static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5027{
5028 switch (size) {
02da0b2d
PM
5029 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5030 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5031 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5032 default: abort();
5033 }
5034}
5035
39d5492a 5036static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5037 int q, int u)
5038{
5039 if (q) {
5040 if (u) {
5041 switch (size) {
5042 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5043 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5044 default: abort();
5045 }
5046 } else {
5047 switch (size) {
5048 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5049 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5050 default: abort();
5051 }
5052 }
5053 } else {
5054 if (u) {
5055 switch (size) {
b408a9b0
CL
5056 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5057 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5058 default: abort();
5059 }
5060 } else {
5061 switch (size) {
5062 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5063 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5064 default: abort();
5065 }
5066 }
5067 }
5068}
5069
39d5492a 5070static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5071{
5072 if (u) {
5073 switch (size) {
5074 case 0: gen_helper_neon_widen_u8(dest, src); break;
5075 case 1: gen_helper_neon_widen_u16(dest, src); break;
5076 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5077 default: abort();
5078 }
5079 } else {
5080 switch (size) {
5081 case 0: gen_helper_neon_widen_s8(dest, src); break;
5082 case 1: gen_helper_neon_widen_s16(dest, src); break;
5083 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5084 default: abort();
5085 }
5086 }
7d1b0095 5087 tcg_temp_free_i32(src);
ad69471c
PB
5088}
5089
5090static inline void gen_neon_addl(int size)
5091{
5092 switch (size) {
5093 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5094 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5095 case 2: tcg_gen_add_i64(CPU_V001); break;
5096 default: abort();
5097 }
5098}
5099
5100static inline void gen_neon_subl(int size)
5101{
5102 switch (size) {
5103 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5104 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5105 case 2: tcg_gen_sub_i64(CPU_V001); break;
5106 default: abort();
5107 }
5108}
5109
a7812ae4 5110static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5111{
5112 switch (size) {
5113 case 0: gen_helper_neon_negl_u16(var, var); break;
5114 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5115 case 2:
5116 tcg_gen_neg_i64(var, var);
5117 break;
ad69471c
PB
5118 default: abort();
5119 }
5120}
5121
a7812ae4 5122static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5123{
5124 switch (size) {
02da0b2d
PM
5125 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5126 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5127 default: abort();
5128 }
5129}
5130
39d5492a
PM
5131static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5132 int size, int u)
ad69471c 5133{
a7812ae4 5134 TCGv_i64 tmp;
ad69471c
PB
5135
5136 switch ((size << 1) | u) {
5137 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5138 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5139 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5140 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5141 case 4:
5142 tmp = gen_muls_i64_i32(a, b);
5143 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5144 tcg_temp_free_i64(tmp);
ad69471c
PB
5145 break;
5146 case 5:
5147 tmp = gen_mulu_i64_i32(a, b);
5148 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5149 tcg_temp_free_i64(tmp);
ad69471c
PB
5150 break;
5151 default: abort();
5152 }
c6067f04
CL
5153
5154 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5155 Don't forget to clean them now. */
5156 if (size < 2) {
7d1b0095
PM
5157 tcg_temp_free_i32(a);
5158 tcg_temp_free_i32(b);
c6067f04 5159 }
ad69471c
PB
5160}
5161
39d5492a
PM
5162static void gen_neon_narrow_op(int op, int u, int size,
5163 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5164{
5165 if (op) {
5166 if (u) {
5167 gen_neon_unarrow_sats(size, dest, src);
5168 } else {
5169 gen_neon_narrow(size, dest, src);
5170 }
5171 } else {
5172 if (u) {
5173 gen_neon_narrow_satu(size, dest, src);
5174 } else {
5175 gen_neon_narrow_sats(size, dest, src);
5176 }
5177 }
5178}
5179
62698be3
PM
5180/* Symbolic constants for op fields for Neon 3-register same-length.
5181 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5182 * table A7-9.
5183 */
5184#define NEON_3R_VHADD 0
5185#define NEON_3R_VQADD 1
5186#define NEON_3R_VRHADD 2
5187#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5188#define NEON_3R_VHSUB 4
5189#define NEON_3R_VQSUB 5
5190#define NEON_3R_VCGT 6
5191#define NEON_3R_VCGE 7
5192#define NEON_3R_VSHL 8
5193#define NEON_3R_VQSHL 9
5194#define NEON_3R_VRSHL 10
5195#define NEON_3R_VQRSHL 11
5196#define NEON_3R_VMAX 12
5197#define NEON_3R_VMIN 13
5198#define NEON_3R_VABD 14
5199#define NEON_3R_VABA 15
5200#define NEON_3R_VADD_VSUB 16
5201#define NEON_3R_VTST_VCEQ 17
5202#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5203#define NEON_3R_VMUL 19
5204#define NEON_3R_VPMAX 20
5205#define NEON_3R_VPMIN 21
5206#define NEON_3R_VQDMULH_VQRDMULH 22
5207#define NEON_3R_VPADD 23
f1ecb913 5208#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5209#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5210#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5211#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5212#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5213#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5214#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5215#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5216
5217static const uint8_t neon_3r_sizes[] = {
5218 [NEON_3R_VHADD] = 0x7,
5219 [NEON_3R_VQADD] = 0xf,
5220 [NEON_3R_VRHADD] = 0x7,
5221 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5222 [NEON_3R_VHSUB] = 0x7,
5223 [NEON_3R_VQSUB] = 0xf,
5224 [NEON_3R_VCGT] = 0x7,
5225 [NEON_3R_VCGE] = 0x7,
5226 [NEON_3R_VSHL] = 0xf,
5227 [NEON_3R_VQSHL] = 0xf,
5228 [NEON_3R_VRSHL] = 0xf,
5229 [NEON_3R_VQRSHL] = 0xf,
5230 [NEON_3R_VMAX] = 0x7,
5231 [NEON_3R_VMIN] = 0x7,
5232 [NEON_3R_VABD] = 0x7,
5233 [NEON_3R_VABA] = 0x7,
5234 [NEON_3R_VADD_VSUB] = 0xf,
5235 [NEON_3R_VTST_VCEQ] = 0x7,
5236 [NEON_3R_VML] = 0x7,
5237 [NEON_3R_VMUL] = 0x7,
5238 [NEON_3R_VPMAX] = 0x7,
5239 [NEON_3R_VPMIN] = 0x7,
5240 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5241 [NEON_3R_VPADD] = 0x7,
f1ecb913 5242 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5243 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5244 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5245 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5246 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5247 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5248 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5249 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5250};
5251
600b828c
PM
5252/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5253 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5254 * table A7-13.
5255 */
5256#define NEON_2RM_VREV64 0
5257#define NEON_2RM_VREV32 1
5258#define NEON_2RM_VREV16 2
5259#define NEON_2RM_VPADDL 4
5260#define NEON_2RM_VPADDL_U 5
9d935509
AB
5261#define NEON_2RM_AESE 6 /* Includes AESD */
5262#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5263#define NEON_2RM_VCLS 8
5264#define NEON_2RM_VCLZ 9
5265#define NEON_2RM_VCNT 10
5266#define NEON_2RM_VMVN 11
5267#define NEON_2RM_VPADAL 12
5268#define NEON_2RM_VPADAL_U 13
5269#define NEON_2RM_VQABS 14
5270#define NEON_2RM_VQNEG 15
5271#define NEON_2RM_VCGT0 16
5272#define NEON_2RM_VCGE0 17
5273#define NEON_2RM_VCEQ0 18
5274#define NEON_2RM_VCLE0 19
5275#define NEON_2RM_VCLT0 20
f1ecb913 5276#define NEON_2RM_SHA1H 21
600b828c
PM
5277#define NEON_2RM_VABS 22
5278#define NEON_2RM_VNEG 23
5279#define NEON_2RM_VCGT0_F 24
5280#define NEON_2RM_VCGE0_F 25
5281#define NEON_2RM_VCEQ0_F 26
5282#define NEON_2RM_VCLE0_F 27
5283#define NEON_2RM_VCLT0_F 28
5284#define NEON_2RM_VABS_F 30
5285#define NEON_2RM_VNEG_F 31
5286#define NEON_2RM_VSWP 32
5287#define NEON_2RM_VTRN 33
5288#define NEON_2RM_VUZP 34
5289#define NEON_2RM_VZIP 35
5290#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5291#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5292#define NEON_2RM_VSHLL 38
f1ecb913 5293#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5294#define NEON_2RM_VRINTN 40
2ce70625 5295#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5296#define NEON_2RM_VRINTA 42
5297#define NEON_2RM_VRINTZ 43
600b828c 5298#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5299#define NEON_2RM_VRINTM 45
600b828c 5300#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5301#define NEON_2RM_VRINTP 47
901ad525
WN
5302#define NEON_2RM_VCVTAU 48
5303#define NEON_2RM_VCVTAS 49
5304#define NEON_2RM_VCVTNU 50
5305#define NEON_2RM_VCVTNS 51
5306#define NEON_2RM_VCVTPU 52
5307#define NEON_2RM_VCVTPS 53
5308#define NEON_2RM_VCVTMU 54
5309#define NEON_2RM_VCVTMS 55
600b828c
PM
5310#define NEON_2RM_VRECPE 56
5311#define NEON_2RM_VRSQRTE 57
5312#define NEON_2RM_VRECPE_F 58
5313#define NEON_2RM_VRSQRTE_F 59
5314#define NEON_2RM_VCVT_FS 60
5315#define NEON_2RM_VCVT_FU 61
5316#define NEON_2RM_VCVT_SF 62
5317#define NEON_2RM_VCVT_UF 63
5318
5319static int neon_2rm_is_float_op(int op)
5320{
5321 /* Return true if this neon 2reg-misc op is float-to-float */
5322 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5323 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5324 op == NEON_2RM_VRINTM ||
5325 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5326 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5327}
5328
fe8fcf3d
PM
5329static bool neon_2rm_is_v8_op(int op)
5330{
5331 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5332 switch (op) {
5333 case NEON_2RM_VRINTN:
5334 case NEON_2RM_VRINTA:
5335 case NEON_2RM_VRINTM:
5336 case NEON_2RM_VRINTP:
5337 case NEON_2RM_VRINTZ:
5338 case NEON_2RM_VRINTX:
5339 case NEON_2RM_VCVTAU:
5340 case NEON_2RM_VCVTAS:
5341 case NEON_2RM_VCVTNU:
5342 case NEON_2RM_VCVTNS:
5343 case NEON_2RM_VCVTPU:
5344 case NEON_2RM_VCVTPS:
5345 case NEON_2RM_VCVTMU:
5346 case NEON_2RM_VCVTMS:
5347 return true;
5348 default:
5349 return false;
5350 }
5351}
5352
600b828c
PM
5353/* Each entry in this array has bit n set if the insn allows
5354 * size value n (otherwise it will UNDEF). Since unallocated
5355 * op values will have no bits set they always UNDEF.
5356 */
5357static const uint8_t neon_2rm_sizes[] = {
5358 [NEON_2RM_VREV64] = 0x7,
5359 [NEON_2RM_VREV32] = 0x3,
5360 [NEON_2RM_VREV16] = 0x1,
5361 [NEON_2RM_VPADDL] = 0x7,
5362 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5363 [NEON_2RM_AESE] = 0x1,
5364 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5365 [NEON_2RM_VCLS] = 0x7,
5366 [NEON_2RM_VCLZ] = 0x7,
5367 [NEON_2RM_VCNT] = 0x1,
5368 [NEON_2RM_VMVN] = 0x1,
5369 [NEON_2RM_VPADAL] = 0x7,
5370 [NEON_2RM_VPADAL_U] = 0x7,
5371 [NEON_2RM_VQABS] = 0x7,
5372 [NEON_2RM_VQNEG] = 0x7,
5373 [NEON_2RM_VCGT0] = 0x7,
5374 [NEON_2RM_VCGE0] = 0x7,
5375 [NEON_2RM_VCEQ0] = 0x7,
5376 [NEON_2RM_VCLE0] = 0x7,
5377 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5378 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5379 [NEON_2RM_VABS] = 0x7,
5380 [NEON_2RM_VNEG] = 0x7,
5381 [NEON_2RM_VCGT0_F] = 0x4,
5382 [NEON_2RM_VCGE0_F] = 0x4,
5383 [NEON_2RM_VCEQ0_F] = 0x4,
5384 [NEON_2RM_VCLE0_F] = 0x4,
5385 [NEON_2RM_VCLT0_F] = 0x4,
5386 [NEON_2RM_VABS_F] = 0x4,
5387 [NEON_2RM_VNEG_F] = 0x4,
5388 [NEON_2RM_VSWP] = 0x1,
5389 [NEON_2RM_VTRN] = 0x7,
5390 [NEON_2RM_VUZP] = 0x7,
5391 [NEON_2RM_VZIP] = 0x7,
5392 [NEON_2RM_VMOVN] = 0x7,
5393 [NEON_2RM_VQMOVN] = 0x7,
5394 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5395 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5396 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5397 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5398 [NEON_2RM_VRINTA] = 0x4,
5399 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5400 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5401 [NEON_2RM_VRINTM] = 0x4,
600b828c 5402 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5403 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5404 [NEON_2RM_VCVTAU] = 0x4,
5405 [NEON_2RM_VCVTAS] = 0x4,
5406 [NEON_2RM_VCVTNU] = 0x4,
5407 [NEON_2RM_VCVTNS] = 0x4,
5408 [NEON_2RM_VCVTPU] = 0x4,
5409 [NEON_2RM_VCVTPS] = 0x4,
5410 [NEON_2RM_VCVTMU] = 0x4,
5411 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5412 [NEON_2RM_VRECPE] = 0x4,
5413 [NEON_2RM_VRSQRTE] = 0x4,
5414 [NEON_2RM_VRECPE_F] = 0x4,
5415 [NEON_2RM_VRSQRTE_F] = 0x4,
5416 [NEON_2RM_VCVT_FS] = 0x4,
5417 [NEON_2RM_VCVT_FU] = 0x4,
5418 [NEON_2RM_VCVT_SF] = 0x4,
5419 [NEON_2RM_VCVT_UF] = 0x4,
5420};
5421
9ee6e8bb
PB
5422/* Translate a NEON data processing instruction. Return nonzero if the
5423 instruction is invalid.
ad69471c
PB
5424 We process data in a mixture of 32-bit and 64-bit chunks.
5425 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5426
7dcc1f89 5427static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5428{
5429 int op;
5430 int q;
5431 int rd, rn, rm;
5432 int size;
5433 int shift;
5434 int pass;
5435 int count;
5436 int pairwise;
5437 int u;
ca9a32e4 5438 uint32_t imm, mask;
39d5492a 5439 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5440 TCGv_i64 tmp64;
9ee6e8bb 5441
2c7ffc41
PM
5442 /* FIXME: this access check should not take precedence over UNDEF
5443 * for invalid encodings; we will generate incorrect syndrome information
5444 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5445 */
9dbbc748 5446 if (s->fp_excp_el) {
2c7ffc41 5447 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5448 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5449 return 0;
5450 }
5451
5df8bac1 5452 if (!s->vfp_enabled)
9ee6e8bb
PB
5453 return 1;
5454 q = (insn & (1 << 6)) != 0;
5455 u = (insn >> 24) & 1;
5456 VFP_DREG_D(rd, insn);
5457 VFP_DREG_N(rn, insn);
5458 VFP_DREG_M(rm, insn);
5459 size = (insn >> 20) & 3;
5460 if ((insn & (1 << 23)) == 0) {
5461 /* Three register same length. */
5462 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5463 /* Catch invalid op and bad size combinations: UNDEF */
5464 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5465 return 1;
5466 }
25f84f79
PM
5467 /* All insns of this form UNDEF for either this condition or the
5468 * superset of cases "Q==1"; we catch the latter later.
5469 */
5470 if (q && ((rd | rn | rm) & 1)) {
5471 return 1;
5472 }
f1ecb913
AB
5473 /*
5474 * The SHA-1/SHA-256 3-register instructions require special treatment
5475 * here, as their size field is overloaded as an op type selector, and
5476 * they all consume their input in a single pass.
5477 */
5478 if (op == NEON_3R_SHA) {
5479 if (!q) {
5480 return 1;
5481 }
5482 if (!u) { /* SHA-1 */
d614a513 5483 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5484 return 1;
5485 }
5486 tmp = tcg_const_i32(rd);
5487 tmp2 = tcg_const_i32(rn);
5488 tmp3 = tcg_const_i32(rm);
5489 tmp4 = tcg_const_i32(size);
5490 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5491 tcg_temp_free_i32(tmp4);
5492 } else { /* SHA-256 */
d614a513 5493 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5494 return 1;
5495 }
5496 tmp = tcg_const_i32(rd);
5497 tmp2 = tcg_const_i32(rn);
5498 tmp3 = tcg_const_i32(rm);
5499 switch (size) {
5500 case 0:
5501 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5502 break;
5503 case 1:
5504 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5505 break;
5506 case 2:
5507 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5508 break;
5509 }
5510 }
5511 tcg_temp_free_i32(tmp);
5512 tcg_temp_free_i32(tmp2);
5513 tcg_temp_free_i32(tmp3);
5514 return 0;
5515 }
62698be3
PM
5516 if (size == 3 && op != NEON_3R_LOGIC) {
5517 /* 64-bit element instructions. */
9ee6e8bb 5518 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5519 neon_load_reg64(cpu_V0, rn + pass);
5520 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5521 switch (op) {
62698be3 5522 case NEON_3R_VQADD:
9ee6e8bb 5523 if (u) {
02da0b2d
PM
5524 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5525 cpu_V0, cpu_V1);
2c0262af 5526 } else {
02da0b2d
PM
5527 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5528 cpu_V0, cpu_V1);
2c0262af 5529 }
9ee6e8bb 5530 break;
62698be3 5531 case NEON_3R_VQSUB:
9ee6e8bb 5532 if (u) {
02da0b2d
PM
5533 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5534 cpu_V0, cpu_V1);
ad69471c 5535 } else {
02da0b2d
PM
5536 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5537 cpu_V0, cpu_V1);
ad69471c
PB
5538 }
5539 break;
62698be3 5540 case NEON_3R_VSHL:
ad69471c
PB
5541 if (u) {
5542 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5543 } else {
5544 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5545 }
5546 break;
62698be3 5547 case NEON_3R_VQSHL:
ad69471c 5548 if (u) {
02da0b2d
PM
5549 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5550 cpu_V1, cpu_V0);
ad69471c 5551 } else {
02da0b2d
PM
5552 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5553 cpu_V1, cpu_V0);
ad69471c
PB
5554 }
5555 break;
62698be3 5556 case NEON_3R_VRSHL:
ad69471c
PB
5557 if (u) {
5558 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5559 } else {
ad69471c
PB
5560 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5561 }
5562 break;
62698be3 5563 case NEON_3R_VQRSHL:
ad69471c 5564 if (u) {
02da0b2d
PM
5565 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5566 cpu_V1, cpu_V0);
ad69471c 5567 } else {
02da0b2d
PM
5568 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5569 cpu_V1, cpu_V0);
1e8d4eec 5570 }
9ee6e8bb 5571 break;
62698be3 5572 case NEON_3R_VADD_VSUB:
9ee6e8bb 5573 if (u) {
ad69471c 5574 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5575 } else {
ad69471c 5576 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5577 }
5578 break;
5579 default:
5580 abort();
2c0262af 5581 }
ad69471c 5582 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5583 }
9ee6e8bb 5584 return 0;
2c0262af 5585 }
25f84f79 5586 pairwise = 0;
9ee6e8bb 5587 switch (op) {
62698be3
PM
5588 case NEON_3R_VSHL:
5589 case NEON_3R_VQSHL:
5590 case NEON_3R_VRSHL:
5591 case NEON_3R_VQRSHL:
9ee6e8bb 5592 {
ad69471c
PB
5593 int rtmp;
5594 /* Shift instruction operands are reversed. */
5595 rtmp = rn;
9ee6e8bb 5596 rn = rm;
ad69471c 5597 rm = rtmp;
9ee6e8bb 5598 }
2c0262af 5599 break;
25f84f79
PM
5600 case NEON_3R_VPADD:
5601 if (u) {
5602 return 1;
5603 }
5604 /* Fall through */
62698be3
PM
5605 case NEON_3R_VPMAX:
5606 case NEON_3R_VPMIN:
9ee6e8bb 5607 pairwise = 1;
2c0262af 5608 break;
25f84f79
PM
5609 case NEON_3R_FLOAT_ARITH:
5610 pairwise = (u && size < 2); /* if VPADD (float) */
5611 break;
5612 case NEON_3R_FLOAT_MINMAX:
5613 pairwise = u; /* if VPMIN/VPMAX (float) */
5614 break;
5615 case NEON_3R_FLOAT_CMP:
5616 if (!u && size) {
5617 /* no encoding for U=0 C=1x */
5618 return 1;
5619 }
5620 break;
5621 case NEON_3R_FLOAT_ACMP:
5622 if (!u) {
5623 return 1;
5624 }
5625 break;
505935fc
WN
5626 case NEON_3R_FLOAT_MISC:
5627 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5628 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5629 return 1;
5630 }
2c0262af 5631 break;
25f84f79
PM
5632 case NEON_3R_VMUL:
5633 if (u && (size != 0)) {
5634 /* UNDEF on invalid size for polynomial subcase */
5635 return 1;
5636 }
2c0262af 5637 break;
da97f52c 5638 case NEON_3R_VFM:
d614a513 5639 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5640 return 1;
5641 }
5642 break;
9ee6e8bb 5643 default:
2c0262af 5644 break;
9ee6e8bb 5645 }
dd8fbd78 5646
25f84f79
PM
5647 if (pairwise && q) {
5648 /* All the pairwise insns UNDEF if Q is set */
5649 return 1;
5650 }
5651
9ee6e8bb
PB
5652 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5653
5654 if (pairwise) {
5655 /* Pairwise. */
a5a14945
JR
5656 if (pass < 1) {
5657 tmp = neon_load_reg(rn, 0);
5658 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5659 } else {
a5a14945
JR
5660 tmp = neon_load_reg(rm, 0);
5661 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5662 }
5663 } else {
5664 /* Elementwise. */
dd8fbd78
FN
5665 tmp = neon_load_reg(rn, pass);
5666 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5667 }
5668 switch (op) {
62698be3 5669 case NEON_3R_VHADD:
9ee6e8bb
PB
5670 GEN_NEON_INTEGER_OP(hadd);
5671 break;
62698be3 5672 case NEON_3R_VQADD:
02da0b2d 5673 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5674 break;
62698be3 5675 case NEON_3R_VRHADD:
9ee6e8bb 5676 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5677 break;
62698be3 5678 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5679 switch ((u << 2) | size) {
5680 case 0: /* VAND */
dd8fbd78 5681 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5682 break;
5683 case 1: /* BIC */
f669df27 5684 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5685 break;
5686 case 2: /* VORR */
dd8fbd78 5687 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5688 break;
5689 case 3: /* VORN */
f669df27 5690 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5691 break;
5692 case 4: /* VEOR */
dd8fbd78 5693 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5694 break;
5695 case 5: /* VBSL */
dd8fbd78
FN
5696 tmp3 = neon_load_reg(rd, pass);
5697 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5698 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5699 break;
5700 case 6: /* VBIT */
dd8fbd78
FN
5701 tmp3 = neon_load_reg(rd, pass);
5702 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5703 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5704 break;
5705 case 7: /* VBIF */
dd8fbd78
FN
5706 tmp3 = neon_load_reg(rd, pass);
5707 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5708 tcg_temp_free_i32(tmp3);
9ee6e8bb 5709 break;
2c0262af
FB
5710 }
5711 break;
62698be3 5712 case NEON_3R_VHSUB:
9ee6e8bb
PB
5713 GEN_NEON_INTEGER_OP(hsub);
5714 break;
62698be3 5715 case NEON_3R_VQSUB:
02da0b2d 5716 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5717 break;
62698be3 5718 case NEON_3R_VCGT:
9ee6e8bb
PB
5719 GEN_NEON_INTEGER_OP(cgt);
5720 break;
62698be3 5721 case NEON_3R_VCGE:
9ee6e8bb
PB
5722 GEN_NEON_INTEGER_OP(cge);
5723 break;
62698be3 5724 case NEON_3R_VSHL:
ad69471c 5725 GEN_NEON_INTEGER_OP(shl);
2c0262af 5726 break;
62698be3 5727 case NEON_3R_VQSHL:
02da0b2d 5728 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5729 break;
62698be3 5730 case NEON_3R_VRSHL:
ad69471c 5731 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5732 break;
62698be3 5733 case NEON_3R_VQRSHL:
02da0b2d 5734 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5735 break;
62698be3 5736 case NEON_3R_VMAX:
9ee6e8bb
PB
5737 GEN_NEON_INTEGER_OP(max);
5738 break;
62698be3 5739 case NEON_3R_VMIN:
9ee6e8bb
PB
5740 GEN_NEON_INTEGER_OP(min);
5741 break;
62698be3 5742 case NEON_3R_VABD:
9ee6e8bb
PB
5743 GEN_NEON_INTEGER_OP(abd);
5744 break;
62698be3 5745 case NEON_3R_VABA:
9ee6e8bb 5746 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5747 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5748 tmp2 = neon_load_reg(rd, pass);
5749 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5750 break;
62698be3 5751 case NEON_3R_VADD_VSUB:
9ee6e8bb 5752 if (!u) { /* VADD */
62698be3 5753 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5754 } else { /* VSUB */
5755 switch (size) {
dd8fbd78
FN
5756 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5757 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5758 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5759 default: abort();
9ee6e8bb
PB
5760 }
5761 }
5762 break;
62698be3 5763 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5764 if (!u) { /* VTST */
5765 switch (size) {
dd8fbd78
FN
5766 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5767 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5768 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5769 default: abort();
9ee6e8bb
PB
5770 }
5771 } else { /* VCEQ */
5772 switch (size) {
dd8fbd78
FN
5773 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5774 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5775 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5776 default: abort();
9ee6e8bb
PB
5777 }
5778 }
5779 break;
62698be3 5780 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5781 switch (size) {
dd8fbd78
FN
5782 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5783 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5784 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5785 default: abort();
9ee6e8bb 5786 }
7d1b0095 5787 tcg_temp_free_i32(tmp2);
dd8fbd78 5788 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5789 if (u) { /* VMLS */
dd8fbd78 5790 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5791 } else { /* VMLA */
dd8fbd78 5792 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5793 }
5794 break;
62698be3 5795 case NEON_3R_VMUL:
9ee6e8bb 5796 if (u) { /* polynomial */
dd8fbd78 5797 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5798 } else { /* Integer */
5799 switch (size) {
dd8fbd78
FN
5800 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5801 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5802 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5803 default: abort();
9ee6e8bb
PB
5804 }
5805 }
5806 break;
62698be3 5807 case NEON_3R_VPMAX:
9ee6e8bb
PB
5808 GEN_NEON_INTEGER_OP(pmax);
5809 break;
62698be3 5810 case NEON_3R_VPMIN:
9ee6e8bb
PB
5811 GEN_NEON_INTEGER_OP(pmin);
5812 break;
62698be3 5813 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5814 if (!u) { /* VQDMULH */
5815 switch (size) {
02da0b2d
PM
5816 case 1:
5817 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5818 break;
5819 case 2:
5820 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5821 break;
62698be3 5822 default: abort();
9ee6e8bb 5823 }
62698be3 5824 } else { /* VQRDMULH */
9ee6e8bb 5825 switch (size) {
02da0b2d
PM
5826 case 1:
5827 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5828 break;
5829 case 2:
5830 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5831 break;
62698be3 5832 default: abort();
9ee6e8bb
PB
5833 }
5834 }
5835 break;
62698be3 5836 case NEON_3R_VPADD:
9ee6e8bb 5837 switch (size) {
dd8fbd78
FN
5838 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5839 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5840 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5841 default: abort();
9ee6e8bb
PB
5842 }
5843 break;
62698be3 5844 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5845 {
5846 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5847 switch ((u << 2) | size) {
5848 case 0: /* VADD */
aa47cfdd
PM
5849 case 4: /* VPADD */
5850 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5851 break;
5852 case 2: /* VSUB */
aa47cfdd 5853 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5854 break;
5855 case 6: /* VABD */
aa47cfdd 5856 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5857 break;
5858 default:
62698be3 5859 abort();
9ee6e8bb 5860 }
aa47cfdd 5861 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5862 break;
aa47cfdd 5863 }
62698be3 5864 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5865 {
5866 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5867 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5868 if (!u) {
7d1b0095 5869 tcg_temp_free_i32(tmp2);
dd8fbd78 5870 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5871 if (size == 0) {
aa47cfdd 5872 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5873 } else {
aa47cfdd 5874 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5875 }
5876 }
aa47cfdd 5877 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5878 break;
aa47cfdd 5879 }
62698be3 5880 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5881 {
5882 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5883 if (!u) {
aa47cfdd 5884 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5885 } else {
aa47cfdd
PM
5886 if (size == 0) {
5887 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5888 } else {
5889 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5890 }
b5ff1b31 5891 }
aa47cfdd 5892 tcg_temp_free_ptr(fpstatus);
2c0262af 5893 break;
aa47cfdd 5894 }
62698be3 5895 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5896 {
5897 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5898 if (size == 0) {
5899 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5900 } else {
5901 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5902 }
5903 tcg_temp_free_ptr(fpstatus);
2c0262af 5904 break;
aa47cfdd 5905 }
62698be3 5906 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5907 {
5908 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5909 if (size == 0) {
f71a2ae5 5910 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5911 } else {
f71a2ae5 5912 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5913 }
5914 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5915 break;
aa47cfdd 5916 }
505935fc
WN
5917 case NEON_3R_FLOAT_MISC:
5918 if (u) {
5919 /* VMAXNM/VMINNM */
5920 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5921 if (size == 0) {
f71a2ae5 5922 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5923 } else {
f71a2ae5 5924 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5925 }
5926 tcg_temp_free_ptr(fpstatus);
5927 } else {
5928 if (size == 0) {
5929 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5930 } else {
5931 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5932 }
5933 }
2c0262af 5934 break;
da97f52c
PM
5935 case NEON_3R_VFM:
5936 {
5937 /* VFMA, VFMS: fused multiply-add */
5938 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5939 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5940 if (size) {
5941 /* VFMS */
5942 gen_helper_vfp_negs(tmp, tmp);
5943 }
5944 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5945 tcg_temp_free_i32(tmp3);
5946 tcg_temp_free_ptr(fpstatus);
5947 break;
5948 }
9ee6e8bb
PB
5949 default:
5950 abort();
2c0262af 5951 }
7d1b0095 5952 tcg_temp_free_i32(tmp2);
dd8fbd78 5953
9ee6e8bb
PB
5954 /* Save the result. For elementwise operations we can put it
5955 straight into the destination register. For pairwise operations
5956 we have to be careful to avoid clobbering the source operands. */
5957 if (pairwise && rd == rm) {
dd8fbd78 5958 neon_store_scratch(pass, tmp);
9ee6e8bb 5959 } else {
dd8fbd78 5960 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5961 }
5962
5963 } /* for pass */
5964 if (pairwise && rd == rm) {
5965 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5966 tmp = neon_load_scratch(pass);
5967 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5968 }
5969 }
ad69471c 5970 /* End of 3 register same size operations. */
9ee6e8bb
PB
5971 } else if (insn & (1 << 4)) {
5972 if ((insn & 0x00380080) != 0) {
5973 /* Two registers and shift. */
5974 op = (insn >> 8) & 0xf;
5975 if (insn & (1 << 7)) {
cc13115b
PM
5976 /* 64-bit shift. */
5977 if (op > 7) {
5978 return 1;
5979 }
9ee6e8bb
PB
5980 size = 3;
5981 } else {
5982 size = 2;
5983 while ((insn & (1 << (size + 19))) == 0)
5984 size--;
5985 }
5986 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5987 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5988 by immediate using the variable shift operations. */
5989 if (op < 8) {
5990 /* Shift by immediate:
5991 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5992 if (q && ((rd | rm) & 1)) {
5993 return 1;
5994 }
5995 if (!u && (op == 4 || op == 6)) {
5996 return 1;
5997 }
9ee6e8bb
PB
5998 /* Right shifts are encoded as N - shift, where N is the
5999 element size in bits. */
6000 if (op <= 4)
6001 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6002 if (size == 3) {
6003 count = q + 1;
6004 } else {
6005 count = q ? 4: 2;
6006 }
6007 switch (size) {
6008 case 0:
6009 imm = (uint8_t) shift;
6010 imm |= imm << 8;
6011 imm |= imm << 16;
6012 break;
6013 case 1:
6014 imm = (uint16_t) shift;
6015 imm |= imm << 16;
6016 break;
6017 case 2:
6018 case 3:
6019 imm = shift;
6020 break;
6021 default:
6022 abort();
6023 }
6024
6025 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6026 if (size == 3) {
6027 neon_load_reg64(cpu_V0, rm + pass);
6028 tcg_gen_movi_i64(cpu_V1, imm);
6029 switch (op) {
6030 case 0: /* VSHR */
6031 case 1: /* VSRA */
6032 if (u)
6033 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6034 else
ad69471c 6035 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6036 break;
ad69471c
PB
6037 case 2: /* VRSHR */
6038 case 3: /* VRSRA */
6039 if (u)
6040 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6041 else
ad69471c 6042 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6043 break;
ad69471c 6044 case 4: /* VSRI */
ad69471c
PB
6045 case 5: /* VSHL, VSLI */
6046 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6047 break;
0322b26e 6048 case 6: /* VQSHLU */
02da0b2d
PM
6049 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6050 cpu_V0, cpu_V1);
ad69471c 6051 break;
0322b26e
PM
6052 case 7: /* VQSHL */
6053 if (u) {
02da0b2d 6054 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6055 cpu_V0, cpu_V1);
6056 } else {
02da0b2d 6057 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6058 cpu_V0, cpu_V1);
6059 }
9ee6e8bb 6060 break;
9ee6e8bb 6061 }
ad69471c
PB
6062 if (op == 1 || op == 3) {
6063 /* Accumulate. */
5371cb81 6064 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6065 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6066 } else if (op == 4 || (op == 5 && u)) {
6067 /* Insert */
923e6509
CL
6068 neon_load_reg64(cpu_V1, rd + pass);
6069 uint64_t mask;
6070 if (shift < -63 || shift > 63) {
6071 mask = 0;
6072 } else {
6073 if (op == 4) {
6074 mask = 0xffffffffffffffffull >> -shift;
6075 } else {
6076 mask = 0xffffffffffffffffull << shift;
6077 }
6078 }
6079 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6080 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6081 }
6082 neon_store_reg64(cpu_V0, rd + pass);
6083 } else { /* size < 3 */
6084 /* Operands in T0 and T1. */
dd8fbd78 6085 tmp = neon_load_reg(rm, pass);
7d1b0095 6086 tmp2 = tcg_temp_new_i32();
dd8fbd78 6087 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6088 switch (op) {
6089 case 0: /* VSHR */
6090 case 1: /* VSRA */
6091 GEN_NEON_INTEGER_OP(shl);
6092 break;
6093 case 2: /* VRSHR */
6094 case 3: /* VRSRA */
6095 GEN_NEON_INTEGER_OP(rshl);
6096 break;
6097 case 4: /* VSRI */
ad69471c
PB
6098 case 5: /* VSHL, VSLI */
6099 switch (size) {
dd8fbd78
FN
6100 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6101 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6102 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6103 default: abort();
ad69471c
PB
6104 }
6105 break;
0322b26e 6106 case 6: /* VQSHLU */
ad69471c 6107 switch (size) {
0322b26e 6108 case 0:
02da0b2d
PM
6109 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6110 tmp, tmp2);
0322b26e
PM
6111 break;
6112 case 1:
02da0b2d
PM
6113 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6114 tmp, tmp2);
0322b26e
PM
6115 break;
6116 case 2:
02da0b2d
PM
6117 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6118 tmp, tmp2);
0322b26e
PM
6119 break;
6120 default:
cc13115b 6121 abort();
ad69471c
PB
6122 }
6123 break;
0322b26e 6124 case 7: /* VQSHL */
02da0b2d 6125 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6126 break;
ad69471c 6127 }
7d1b0095 6128 tcg_temp_free_i32(tmp2);
ad69471c
PB
6129
6130 if (op == 1 || op == 3) {
6131 /* Accumulate. */
dd8fbd78 6132 tmp2 = neon_load_reg(rd, pass);
5371cb81 6133 gen_neon_add(size, tmp, tmp2);
7d1b0095 6134 tcg_temp_free_i32(tmp2);
ad69471c
PB
6135 } else if (op == 4 || (op == 5 && u)) {
6136 /* Insert */
6137 switch (size) {
6138 case 0:
6139 if (op == 4)
ca9a32e4 6140 mask = 0xff >> -shift;
ad69471c 6141 else
ca9a32e4
JR
6142 mask = (uint8_t)(0xff << shift);
6143 mask |= mask << 8;
6144 mask |= mask << 16;
ad69471c
PB
6145 break;
6146 case 1:
6147 if (op == 4)
ca9a32e4 6148 mask = 0xffff >> -shift;
ad69471c 6149 else
ca9a32e4
JR
6150 mask = (uint16_t)(0xffff << shift);
6151 mask |= mask << 16;
ad69471c
PB
6152 break;
6153 case 2:
ca9a32e4
JR
6154 if (shift < -31 || shift > 31) {
6155 mask = 0;
6156 } else {
6157 if (op == 4)
6158 mask = 0xffffffffu >> -shift;
6159 else
6160 mask = 0xffffffffu << shift;
6161 }
ad69471c
PB
6162 break;
6163 default:
6164 abort();
6165 }
dd8fbd78 6166 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6167 tcg_gen_andi_i32(tmp, tmp, mask);
6168 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6169 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6170 tcg_temp_free_i32(tmp2);
ad69471c 6171 }
dd8fbd78 6172 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6173 }
6174 } /* for pass */
6175 } else if (op < 10) {
ad69471c 6176 /* Shift by immediate and narrow:
9ee6e8bb 6177 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6178 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6179 if (rm & 1) {
6180 return 1;
6181 }
9ee6e8bb
PB
6182 shift = shift - (1 << (size + 3));
6183 size++;
92cdfaeb 6184 if (size == 3) {
a7812ae4 6185 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6186 neon_load_reg64(cpu_V0, rm);
6187 neon_load_reg64(cpu_V1, rm + 1);
6188 for (pass = 0; pass < 2; pass++) {
6189 TCGv_i64 in;
6190 if (pass == 0) {
6191 in = cpu_V0;
6192 } else {
6193 in = cpu_V1;
6194 }
ad69471c 6195 if (q) {
0b36f4cd 6196 if (input_unsigned) {
92cdfaeb 6197 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6198 } else {
92cdfaeb 6199 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6200 }
ad69471c 6201 } else {
0b36f4cd 6202 if (input_unsigned) {
92cdfaeb 6203 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6204 } else {
92cdfaeb 6205 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6206 }
ad69471c 6207 }
7d1b0095 6208 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6209 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6210 neon_store_reg(rd, pass, tmp);
6211 } /* for pass */
6212 tcg_temp_free_i64(tmp64);
6213 } else {
6214 if (size == 1) {
6215 imm = (uint16_t)shift;
6216 imm |= imm << 16;
2c0262af 6217 } else {
92cdfaeb
PM
6218 /* size == 2 */
6219 imm = (uint32_t)shift;
6220 }
6221 tmp2 = tcg_const_i32(imm);
6222 tmp4 = neon_load_reg(rm + 1, 0);
6223 tmp5 = neon_load_reg(rm + 1, 1);
6224 for (pass = 0; pass < 2; pass++) {
6225 if (pass == 0) {
6226 tmp = neon_load_reg(rm, 0);
6227 } else {
6228 tmp = tmp4;
6229 }
0b36f4cd
CL
6230 gen_neon_shift_narrow(size, tmp, tmp2, q,
6231 input_unsigned);
92cdfaeb
PM
6232 if (pass == 0) {
6233 tmp3 = neon_load_reg(rm, 1);
6234 } else {
6235 tmp3 = tmp5;
6236 }
0b36f4cd
CL
6237 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6238 input_unsigned);
36aa55dc 6239 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6240 tcg_temp_free_i32(tmp);
6241 tcg_temp_free_i32(tmp3);
6242 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6243 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6244 neon_store_reg(rd, pass, tmp);
6245 } /* for pass */
c6067f04 6246 tcg_temp_free_i32(tmp2);
b75263d6 6247 }
9ee6e8bb 6248 } else if (op == 10) {
cc13115b
PM
6249 /* VSHLL, VMOVL */
6250 if (q || (rd & 1)) {
9ee6e8bb 6251 return 1;
cc13115b 6252 }
ad69471c
PB
6253 tmp = neon_load_reg(rm, 0);
6254 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6255 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6256 if (pass == 1)
6257 tmp = tmp2;
6258
6259 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6260
9ee6e8bb
PB
6261 if (shift != 0) {
6262 /* The shift is less than the width of the source
ad69471c
PB
6263 type, so we can just shift the whole register. */
6264 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6265 /* Widen the result of shift: we need to clear
6266 * the potential overflow bits resulting from
6267 * left bits of the narrow input appearing as
6268 * right bits of left the neighbour narrow
6269 * input. */
ad69471c
PB
6270 if (size < 2 || !u) {
6271 uint64_t imm64;
6272 if (size == 0) {
6273 imm = (0xffu >> (8 - shift));
6274 imm |= imm << 16;
acdf01ef 6275 } else if (size == 1) {
ad69471c 6276 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6277 } else {
6278 /* size == 2 */
6279 imm = 0xffffffff >> (32 - shift);
6280 }
6281 if (size < 2) {
6282 imm64 = imm | (((uint64_t)imm) << 32);
6283 } else {
6284 imm64 = imm;
9ee6e8bb 6285 }
acdf01ef 6286 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6287 }
6288 }
ad69471c 6289 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6290 }
f73534a5 6291 } else if (op >= 14) {
9ee6e8bb 6292 /* VCVT fixed-point. */
cc13115b
PM
6293 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6294 return 1;
6295 }
f73534a5
PM
6296 /* We have already masked out the must-be-1 top bit of imm6,
6297 * hence this 32-shift where the ARM ARM has 64-imm6.
6298 */
6299 shift = 32 - shift;
9ee6e8bb 6300 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6301 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6302 if (!(op & 1)) {
9ee6e8bb 6303 if (u)
5500b06c 6304 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6305 else
5500b06c 6306 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6307 } else {
6308 if (u)
5500b06c 6309 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6310 else
5500b06c 6311 gen_vfp_tosl(0, shift, 1);
2c0262af 6312 }
4373f3ce 6313 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6314 }
6315 } else {
9ee6e8bb
PB
6316 return 1;
6317 }
6318 } else { /* (insn & 0x00380080) == 0 */
6319 int invert;
7d80fee5
PM
6320 if (q && (rd & 1)) {
6321 return 1;
6322 }
9ee6e8bb
PB
6323
6324 op = (insn >> 8) & 0xf;
6325 /* One register and immediate. */
6326 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6327 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6328 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6329 * We choose to not special-case this and will behave as if a
6330 * valid constant encoding of 0 had been given.
6331 */
9ee6e8bb
PB
6332 switch (op) {
6333 case 0: case 1:
6334 /* no-op */
6335 break;
6336 case 2: case 3:
6337 imm <<= 8;
6338 break;
6339 case 4: case 5:
6340 imm <<= 16;
6341 break;
6342 case 6: case 7:
6343 imm <<= 24;
6344 break;
6345 case 8: case 9:
6346 imm |= imm << 16;
6347 break;
6348 case 10: case 11:
6349 imm = (imm << 8) | (imm << 24);
6350 break;
6351 case 12:
8e31209e 6352 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6353 break;
6354 case 13:
6355 imm = (imm << 16) | 0xffff;
6356 break;
6357 case 14:
6358 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6359 if (invert)
6360 imm = ~imm;
6361 break;
6362 case 15:
7d80fee5
PM
6363 if (invert) {
6364 return 1;
6365 }
9ee6e8bb
PB
6366 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6367 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6368 break;
6369 }
6370 if (invert)
6371 imm = ~imm;
6372
9ee6e8bb
PB
6373 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6374 if (op & 1 && op < 12) {
ad69471c 6375 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6376 if (invert) {
6377 /* The immediate value has already been inverted, so
6378 BIC becomes AND. */
ad69471c 6379 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6380 } else {
ad69471c 6381 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6382 }
9ee6e8bb 6383 } else {
ad69471c 6384 /* VMOV, VMVN. */
7d1b0095 6385 tmp = tcg_temp_new_i32();
9ee6e8bb 6386 if (op == 14 && invert) {
a5a14945 6387 int n;
ad69471c
PB
6388 uint32_t val;
6389 val = 0;
9ee6e8bb
PB
6390 for (n = 0; n < 4; n++) {
6391 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6392 val |= 0xff << (n * 8);
9ee6e8bb 6393 }
ad69471c
PB
6394 tcg_gen_movi_i32(tmp, val);
6395 } else {
6396 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6397 }
9ee6e8bb 6398 }
ad69471c 6399 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6400 }
6401 }
e4b3861d 6402 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6403 if (size != 3) {
6404 op = (insn >> 8) & 0xf;
6405 if ((insn & (1 << 6)) == 0) {
6406 /* Three registers of different lengths. */
6407 int src1_wide;
6408 int src2_wide;
6409 int prewiden;
526d0096
PM
6410 /* undefreq: bit 0 : UNDEF if size == 0
6411 * bit 1 : UNDEF if size == 1
6412 * bit 2 : UNDEF if size == 2
6413 * bit 3 : UNDEF if U == 1
6414 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6415 */
6416 int undefreq;
6417 /* prewiden, src1_wide, src2_wide, undefreq */
6418 static const int neon_3reg_wide[16][4] = {
6419 {1, 0, 0, 0}, /* VADDL */
6420 {1, 1, 0, 0}, /* VADDW */
6421 {1, 0, 0, 0}, /* VSUBL */
6422 {1, 1, 0, 0}, /* VSUBW */
6423 {0, 1, 1, 0}, /* VADDHN */
6424 {0, 0, 0, 0}, /* VABAL */
6425 {0, 1, 1, 0}, /* VSUBHN */
6426 {0, 0, 0, 0}, /* VABDL */
6427 {0, 0, 0, 0}, /* VMLAL */
526d0096 6428 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6429 {0, 0, 0, 0}, /* VMLSL */
526d0096 6430 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6431 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6432 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6433 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6434 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6435 };
6436
6437 prewiden = neon_3reg_wide[op][0];
6438 src1_wide = neon_3reg_wide[op][1];
6439 src2_wide = neon_3reg_wide[op][2];
695272dc 6440 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6441
526d0096
PM
6442 if ((undefreq & (1 << size)) ||
6443 ((undefreq & 8) && u)) {
695272dc
PM
6444 return 1;
6445 }
6446 if ((src1_wide && (rn & 1)) ||
6447 (src2_wide && (rm & 1)) ||
6448 (!src2_wide && (rd & 1))) {
ad69471c 6449 return 1;
695272dc 6450 }
ad69471c 6451
4e624eda
PM
6452 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6453 * outside the loop below as it only performs a single pass.
6454 */
6455 if (op == 14 && size == 2) {
6456 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6457
d614a513 6458 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6459 return 1;
6460 }
6461 tcg_rn = tcg_temp_new_i64();
6462 tcg_rm = tcg_temp_new_i64();
6463 tcg_rd = tcg_temp_new_i64();
6464 neon_load_reg64(tcg_rn, rn);
6465 neon_load_reg64(tcg_rm, rm);
6466 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6467 neon_store_reg64(tcg_rd, rd);
6468 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6469 neon_store_reg64(tcg_rd, rd + 1);
6470 tcg_temp_free_i64(tcg_rn);
6471 tcg_temp_free_i64(tcg_rm);
6472 tcg_temp_free_i64(tcg_rd);
6473 return 0;
6474 }
6475
9ee6e8bb
PB
6476 /* Avoid overlapping operands. Wide source operands are
6477 always aligned so will never overlap with wide
6478 destinations in problematic ways. */
8f8e3aa4 6479 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6480 tmp = neon_load_reg(rm, 1);
6481 neon_store_scratch(2, tmp);
8f8e3aa4 6482 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6483 tmp = neon_load_reg(rn, 1);
6484 neon_store_scratch(2, tmp);
9ee6e8bb 6485 }
39d5492a 6486 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6487 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6488 if (src1_wide) {
6489 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6490 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6491 } else {
ad69471c 6492 if (pass == 1 && rd == rn) {
dd8fbd78 6493 tmp = neon_load_scratch(2);
9ee6e8bb 6494 } else {
ad69471c
PB
6495 tmp = neon_load_reg(rn, pass);
6496 }
6497 if (prewiden) {
6498 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6499 }
6500 }
ad69471c
PB
6501 if (src2_wide) {
6502 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6503 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6504 } else {
ad69471c 6505 if (pass == 1 && rd == rm) {
dd8fbd78 6506 tmp2 = neon_load_scratch(2);
9ee6e8bb 6507 } else {
ad69471c
PB
6508 tmp2 = neon_load_reg(rm, pass);
6509 }
6510 if (prewiden) {
6511 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6512 }
9ee6e8bb
PB
6513 }
6514 switch (op) {
6515 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6516 gen_neon_addl(size);
9ee6e8bb 6517 break;
79b0e534 6518 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6519 gen_neon_subl(size);
9ee6e8bb
PB
6520 break;
6521 case 5: case 7: /* VABAL, VABDL */
6522 switch ((size << 1) | u) {
ad69471c
PB
6523 case 0:
6524 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6525 break;
6526 case 1:
6527 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6528 break;
6529 case 2:
6530 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6531 break;
6532 case 3:
6533 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6534 break;
6535 case 4:
6536 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6537 break;
6538 case 5:
6539 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6540 break;
9ee6e8bb
PB
6541 default: abort();
6542 }
7d1b0095
PM
6543 tcg_temp_free_i32(tmp2);
6544 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6545 break;
6546 case 8: case 9: case 10: case 11: case 12: case 13:
6547 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6548 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6549 break;
6550 case 14: /* Polynomial VMULL */
e5ca24cb 6551 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6552 tcg_temp_free_i32(tmp2);
6553 tcg_temp_free_i32(tmp);
e5ca24cb 6554 break;
695272dc
PM
6555 default: /* 15 is RESERVED: caught earlier */
6556 abort();
9ee6e8bb 6557 }
ebcd88ce
PM
6558 if (op == 13) {
6559 /* VQDMULL */
6560 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6561 neon_store_reg64(cpu_V0, rd + pass);
6562 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6563 /* Accumulate. */
ebcd88ce 6564 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6565 switch (op) {
4dc064e6
PM
6566 case 10: /* VMLSL */
6567 gen_neon_negl(cpu_V0, size);
6568 /* Fall through */
6569 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6570 gen_neon_addl(size);
9ee6e8bb
PB
6571 break;
6572 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6573 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6574 if (op == 11) {
6575 gen_neon_negl(cpu_V0, size);
6576 }
ad69471c
PB
6577 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6578 break;
9ee6e8bb
PB
6579 default:
6580 abort();
6581 }
ad69471c 6582 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6583 } else if (op == 4 || op == 6) {
6584 /* Narrowing operation. */
7d1b0095 6585 tmp = tcg_temp_new_i32();
79b0e534 6586 if (!u) {
9ee6e8bb 6587 switch (size) {
ad69471c
PB
6588 case 0:
6589 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6590 break;
6591 case 1:
6592 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6593 break;
6594 case 2:
6595 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6596 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6597 break;
9ee6e8bb
PB
6598 default: abort();
6599 }
6600 } else {
6601 switch (size) {
ad69471c
PB
6602 case 0:
6603 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6604 break;
6605 case 1:
6606 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6607 break;
6608 case 2:
6609 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6610 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6611 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6612 break;
9ee6e8bb
PB
6613 default: abort();
6614 }
6615 }
ad69471c
PB
6616 if (pass == 0) {
6617 tmp3 = tmp;
6618 } else {
6619 neon_store_reg(rd, 0, tmp3);
6620 neon_store_reg(rd, 1, tmp);
6621 }
9ee6e8bb
PB
6622 } else {
6623 /* Write back the result. */
ad69471c 6624 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6625 }
6626 }
6627 } else {
3e3326df
PM
6628 /* Two registers and a scalar. NB that for ops of this form
6629 * the ARM ARM labels bit 24 as Q, but it is in our variable
6630 * 'u', not 'q'.
6631 */
6632 if (size == 0) {
6633 return 1;
6634 }
9ee6e8bb 6635 switch (op) {
9ee6e8bb 6636 case 1: /* Float VMLA scalar */
9ee6e8bb 6637 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6638 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6639 if (size == 1) {
6640 return 1;
6641 }
6642 /* fall through */
6643 case 0: /* Integer VMLA scalar */
6644 case 4: /* Integer VMLS scalar */
6645 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6646 case 12: /* VQDMULH scalar */
6647 case 13: /* VQRDMULH scalar */
3e3326df
PM
6648 if (u && ((rd | rn) & 1)) {
6649 return 1;
6650 }
dd8fbd78
FN
6651 tmp = neon_get_scalar(size, rm);
6652 neon_store_scratch(0, tmp);
9ee6e8bb 6653 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6654 tmp = neon_load_scratch(0);
6655 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6656 if (op == 12) {
6657 if (size == 1) {
02da0b2d 6658 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6659 } else {
02da0b2d 6660 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6661 }
6662 } else if (op == 13) {
6663 if (size == 1) {
02da0b2d 6664 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6665 } else {
02da0b2d 6666 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6667 }
6668 } else if (op & 1) {
aa47cfdd
PM
6669 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6670 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6671 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6672 } else {
6673 switch (size) {
dd8fbd78
FN
6674 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6675 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6676 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6677 default: abort();
9ee6e8bb
PB
6678 }
6679 }
7d1b0095 6680 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6681 if (op < 8) {
6682 /* Accumulate. */
dd8fbd78 6683 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6684 switch (op) {
6685 case 0:
dd8fbd78 6686 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6687 break;
6688 case 1:
aa47cfdd
PM
6689 {
6690 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6691 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6692 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6693 break;
aa47cfdd 6694 }
9ee6e8bb 6695 case 4:
dd8fbd78 6696 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6697 break;
6698 case 5:
aa47cfdd
PM
6699 {
6700 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6701 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6702 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6703 break;
aa47cfdd 6704 }
9ee6e8bb
PB
6705 default:
6706 abort();
6707 }
7d1b0095 6708 tcg_temp_free_i32(tmp2);
9ee6e8bb 6709 }
dd8fbd78 6710 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6711 }
6712 break;
9ee6e8bb 6713 case 3: /* VQDMLAL scalar */
9ee6e8bb 6714 case 7: /* VQDMLSL scalar */
9ee6e8bb 6715 case 11: /* VQDMULL scalar */
3e3326df 6716 if (u == 1) {
ad69471c 6717 return 1;
3e3326df
PM
6718 }
6719 /* fall through */
6720 case 2: /* VMLAL sclar */
6721 case 6: /* VMLSL scalar */
6722 case 10: /* VMULL scalar */
6723 if (rd & 1) {
6724 return 1;
6725 }
dd8fbd78 6726 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6727 /* We need a copy of tmp2 because gen_neon_mull
6728 * deletes it during pass 0. */
7d1b0095 6729 tmp4 = tcg_temp_new_i32();
c6067f04 6730 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6731 tmp3 = neon_load_reg(rn, 1);
ad69471c 6732
9ee6e8bb 6733 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6734 if (pass == 0) {
6735 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6736 } else {
dd8fbd78 6737 tmp = tmp3;
c6067f04 6738 tmp2 = tmp4;
9ee6e8bb 6739 }
ad69471c 6740 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6741 if (op != 11) {
6742 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6743 }
9ee6e8bb 6744 switch (op) {
4dc064e6
PM
6745 case 6:
6746 gen_neon_negl(cpu_V0, size);
6747 /* Fall through */
6748 case 2:
ad69471c 6749 gen_neon_addl(size);
9ee6e8bb
PB
6750 break;
6751 case 3: case 7:
ad69471c 6752 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6753 if (op == 7) {
6754 gen_neon_negl(cpu_V0, size);
6755 }
ad69471c 6756 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6757 break;
6758 case 10:
6759 /* no-op */
6760 break;
6761 case 11:
ad69471c 6762 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6763 break;
6764 default:
6765 abort();
6766 }
ad69471c 6767 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6768 }
dd8fbd78 6769
dd8fbd78 6770
9ee6e8bb
PB
6771 break;
6772 default: /* 14 and 15 are RESERVED */
6773 return 1;
6774 }
6775 }
6776 } else { /* size == 3 */
6777 if (!u) {
6778 /* Extract. */
9ee6e8bb 6779 imm = (insn >> 8) & 0xf;
ad69471c
PB
6780
6781 if (imm > 7 && !q)
6782 return 1;
6783
52579ea1
PM
6784 if (q && ((rd | rn | rm) & 1)) {
6785 return 1;
6786 }
6787
ad69471c
PB
6788 if (imm == 0) {
6789 neon_load_reg64(cpu_V0, rn);
6790 if (q) {
6791 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6792 }
ad69471c
PB
6793 } else if (imm == 8) {
6794 neon_load_reg64(cpu_V0, rn + 1);
6795 if (q) {
6796 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6797 }
ad69471c 6798 } else if (q) {
a7812ae4 6799 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6800 if (imm < 8) {
6801 neon_load_reg64(cpu_V0, rn);
a7812ae4 6802 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6803 } else {
6804 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6805 neon_load_reg64(tmp64, rm);
ad69471c
PB
6806 }
6807 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6808 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6809 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6810 if (imm < 8) {
6811 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6812 } else {
ad69471c
PB
6813 neon_load_reg64(cpu_V1, rm + 1);
6814 imm -= 8;
9ee6e8bb 6815 }
ad69471c 6816 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6817 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6818 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6819 tcg_temp_free_i64(tmp64);
ad69471c 6820 } else {
a7812ae4 6821 /* BUGFIX */
ad69471c 6822 neon_load_reg64(cpu_V0, rn);
a7812ae4 6823 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6824 neon_load_reg64(cpu_V1, rm);
a7812ae4 6825 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6826 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6827 }
6828 neon_store_reg64(cpu_V0, rd);
6829 if (q) {
6830 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6831 }
6832 } else if ((insn & (1 << 11)) == 0) {
6833 /* Two register misc. */
6834 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6835 size = (insn >> 18) & 3;
600b828c
PM
6836 /* UNDEF for unknown op values and bad op-size combinations */
6837 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6838 return 1;
6839 }
fe8fcf3d
PM
6840 if (neon_2rm_is_v8_op(op) &&
6841 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6842 return 1;
6843 }
fc2a9b37
PM
6844 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6845 q && ((rm | rd) & 1)) {
6846 return 1;
6847 }
9ee6e8bb 6848 switch (op) {
600b828c 6849 case NEON_2RM_VREV64:
9ee6e8bb 6850 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6851 tmp = neon_load_reg(rm, pass * 2);
6852 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6853 switch (size) {
dd8fbd78
FN
6854 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6855 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6856 case 2: /* no-op */ break;
6857 default: abort();
6858 }
dd8fbd78 6859 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6860 if (size == 2) {
dd8fbd78 6861 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6862 } else {
9ee6e8bb 6863 switch (size) {
dd8fbd78
FN
6864 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6865 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6866 default: abort();
6867 }
dd8fbd78 6868 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6869 }
6870 }
6871 break;
600b828c
PM
6872 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6873 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6874 for (pass = 0; pass < q + 1; pass++) {
6875 tmp = neon_load_reg(rm, pass * 2);
6876 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6877 tmp = neon_load_reg(rm, pass * 2 + 1);
6878 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6879 switch (size) {
6880 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6881 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6882 case 2: tcg_gen_add_i64(CPU_V001); break;
6883 default: abort();
6884 }
600b828c 6885 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6886 /* Accumulate. */
ad69471c
PB
6887 neon_load_reg64(cpu_V1, rd + pass);
6888 gen_neon_addl(size);
9ee6e8bb 6889 }
ad69471c 6890 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6891 }
6892 break;
600b828c 6893 case NEON_2RM_VTRN:
9ee6e8bb 6894 if (size == 2) {
a5a14945 6895 int n;
9ee6e8bb 6896 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6897 tmp = neon_load_reg(rm, n);
6898 tmp2 = neon_load_reg(rd, n + 1);
6899 neon_store_reg(rm, n, tmp2);
6900 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6901 }
6902 } else {
6903 goto elementwise;
6904 }
6905 break;
600b828c 6906 case NEON_2RM_VUZP:
02acedf9 6907 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6908 return 1;
9ee6e8bb
PB
6909 }
6910 break;
600b828c 6911 case NEON_2RM_VZIP:
d68a6f3a 6912 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6913 return 1;
9ee6e8bb
PB
6914 }
6915 break;
600b828c
PM
6916 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6917 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6918 if (rm & 1) {
6919 return 1;
6920 }
39d5492a 6921 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6922 for (pass = 0; pass < 2; pass++) {
ad69471c 6923 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6924 tmp = tcg_temp_new_i32();
600b828c
PM
6925 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6926 tmp, cpu_V0);
ad69471c
PB
6927 if (pass == 0) {
6928 tmp2 = tmp;
6929 } else {
6930 neon_store_reg(rd, 0, tmp2);
6931 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6932 }
9ee6e8bb
PB
6933 }
6934 break;
600b828c 6935 case NEON_2RM_VSHLL:
fc2a9b37 6936 if (q || (rd & 1)) {
9ee6e8bb 6937 return 1;
600b828c 6938 }
ad69471c
PB
6939 tmp = neon_load_reg(rm, 0);
6940 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6941 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6942 if (pass == 1)
6943 tmp = tmp2;
6944 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6945 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6946 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6947 }
6948 break;
600b828c 6949 case NEON_2RM_VCVT_F16_F32:
d614a513 6950 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6951 q || (rm & 1)) {
6952 return 1;
6953 }
7d1b0095
PM
6954 tmp = tcg_temp_new_i32();
6955 tmp2 = tcg_temp_new_i32();
60011498 6956 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6957 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6958 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6959 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6960 tcg_gen_shli_i32(tmp2, tmp2, 16);
6961 tcg_gen_or_i32(tmp2, tmp2, tmp);
6962 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6963 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6964 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6965 neon_store_reg(rd, 0, tmp2);
7d1b0095 6966 tmp2 = tcg_temp_new_i32();
2d981da7 6967 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6968 tcg_gen_shli_i32(tmp2, tmp2, 16);
6969 tcg_gen_or_i32(tmp2, tmp2, tmp);
6970 neon_store_reg(rd, 1, tmp2);
7d1b0095 6971 tcg_temp_free_i32(tmp);
60011498 6972 break;
600b828c 6973 case NEON_2RM_VCVT_F32_F16:
d614a513 6974 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6975 q || (rd & 1)) {
6976 return 1;
6977 }
7d1b0095 6978 tmp3 = tcg_temp_new_i32();
60011498
PB
6979 tmp = neon_load_reg(rm, 0);
6980 tmp2 = neon_load_reg(rm, 1);
6981 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6982 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6983 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6984 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6985 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6986 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6987 tcg_temp_free_i32(tmp);
60011498 6988 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6989 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6990 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6991 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6992 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6993 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6994 tcg_temp_free_i32(tmp2);
6995 tcg_temp_free_i32(tmp3);
60011498 6996 break;
9d935509 6997 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6998 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6999 || ((rm | rd) & 1)) {
7000 return 1;
7001 }
7002 tmp = tcg_const_i32(rd);
7003 tmp2 = tcg_const_i32(rm);
7004
7005 /* Bit 6 is the lowest opcode bit; it distinguishes between
7006 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7007 */
7008 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7009
7010 if (op == NEON_2RM_AESE) {
7011 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7012 } else {
7013 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7014 }
7015 tcg_temp_free_i32(tmp);
7016 tcg_temp_free_i32(tmp2);
7017 tcg_temp_free_i32(tmp3);
7018 break;
f1ecb913 7019 case NEON_2RM_SHA1H:
d614a513 7020 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7021 || ((rm | rd) & 1)) {
7022 return 1;
7023 }
7024 tmp = tcg_const_i32(rd);
7025 tmp2 = tcg_const_i32(rm);
7026
7027 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7028
7029 tcg_temp_free_i32(tmp);
7030 tcg_temp_free_i32(tmp2);
7031 break;
7032 case NEON_2RM_SHA1SU1:
7033 if ((rm | rd) & 1) {
7034 return 1;
7035 }
7036 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7037 if (q) {
d614a513 7038 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7039 return 1;
7040 }
d614a513 7041 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7042 return 1;
7043 }
7044 tmp = tcg_const_i32(rd);
7045 tmp2 = tcg_const_i32(rm);
7046 if (q) {
7047 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7048 } else {
7049 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7050 }
7051 tcg_temp_free_i32(tmp);
7052 tcg_temp_free_i32(tmp2);
7053 break;
9ee6e8bb
PB
7054 default:
7055 elementwise:
7056 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7057 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7058 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7059 neon_reg_offset(rm, pass));
39d5492a 7060 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7061 } else {
dd8fbd78 7062 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7063 }
7064 switch (op) {
600b828c 7065 case NEON_2RM_VREV32:
9ee6e8bb 7066 switch (size) {
dd8fbd78
FN
7067 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7068 case 1: gen_swap_half(tmp); break;
600b828c 7069 default: abort();
9ee6e8bb
PB
7070 }
7071 break;
600b828c 7072 case NEON_2RM_VREV16:
dd8fbd78 7073 gen_rev16(tmp);
9ee6e8bb 7074 break;
600b828c 7075 case NEON_2RM_VCLS:
9ee6e8bb 7076 switch (size) {
dd8fbd78
FN
7077 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7078 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7079 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7080 default: abort();
9ee6e8bb
PB
7081 }
7082 break;
600b828c 7083 case NEON_2RM_VCLZ:
9ee6e8bb 7084 switch (size) {
dd8fbd78
FN
7085 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7086 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7087 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 7088 default: abort();
9ee6e8bb
PB
7089 }
7090 break;
600b828c 7091 case NEON_2RM_VCNT:
dd8fbd78 7092 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7093 break;
600b828c 7094 case NEON_2RM_VMVN:
dd8fbd78 7095 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7096 break;
600b828c 7097 case NEON_2RM_VQABS:
9ee6e8bb 7098 switch (size) {
02da0b2d
PM
7099 case 0:
7100 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7101 break;
7102 case 1:
7103 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7104 break;
7105 case 2:
7106 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7107 break;
600b828c 7108 default: abort();
9ee6e8bb
PB
7109 }
7110 break;
600b828c 7111 case NEON_2RM_VQNEG:
9ee6e8bb 7112 switch (size) {
02da0b2d
PM
7113 case 0:
7114 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7115 break;
7116 case 1:
7117 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7118 break;
7119 case 2:
7120 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7121 break;
600b828c 7122 default: abort();
9ee6e8bb
PB
7123 }
7124 break;
600b828c 7125 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7126 tmp2 = tcg_const_i32(0);
9ee6e8bb 7127 switch(size) {
dd8fbd78
FN
7128 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7129 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7130 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7131 default: abort();
9ee6e8bb 7132 }
39d5492a 7133 tcg_temp_free_i32(tmp2);
600b828c 7134 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7135 tcg_gen_not_i32(tmp, tmp);
600b828c 7136 }
9ee6e8bb 7137 break;
600b828c 7138 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7139 tmp2 = tcg_const_i32(0);
9ee6e8bb 7140 switch(size) {
dd8fbd78
FN
7141 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7142 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7143 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7144 default: abort();
9ee6e8bb 7145 }
39d5492a 7146 tcg_temp_free_i32(tmp2);
600b828c 7147 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7148 tcg_gen_not_i32(tmp, tmp);
600b828c 7149 }
9ee6e8bb 7150 break;
600b828c 7151 case NEON_2RM_VCEQ0:
dd8fbd78 7152 tmp2 = tcg_const_i32(0);
9ee6e8bb 7153 switch(size) {
dd8fbd78
FN
7154 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7155 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7156 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7157 default: abort();
9ee6e8bb 7158 }
39d5492a 7159 tcg_temp_free_i32(tmp2);
9ee6e8bb 7160 break;
600b828c 7161 case NEON_2RM_VABS:
9ee6e8bb 7162 switch(size) {
dd8fbd78
FN
7163 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7164 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7165 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7166 default: abort();
9ee6e8bb
PB
7167 }
7168 break;
600b828c 7169 case NEON_2RM_VNEG:
dd8fbd78
FN
7170 tmp2 = tcg_const_i32(0);
7171 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7172 tcg_temp_free_i32(tmp2);
9ee6e8bb 7173 break;
600b828c 7174 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7175 {
7176 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7177 tmp2 = tcg_const_i32(0);
aa47cfdd 7178 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7179 tcg_temp_free_i32(tmp2);
aa47cfdd 7180 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7181 break;
aa47cfdd 7182 }
600b828c 7183 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7184 {
7185 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7186 tmp2 = tcg_const_i32(0);
aa47cfdd 7187 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7188 tcg_temp_free_i32(tmp2);
aa47cfdd 7189 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7190 break;
aa47cfdd 7191 }
600b828c 7192 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7193 {
7194 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7195 tmp2 = tcg_const_i32(0);
aa47cfdd 7196 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7197 tcg_temp_free_i32(tmp2);
aa47cfdd 7198 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7199 break;
aa47cfdd 7200 }
600b828c 7201 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7202 {
7203 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7204 tmp2 = tcg_const_i32(0);
aa47cfdd 7205 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7206 tcg_temp_free_i32(tmp2);
aa47cfdd 7207 tcg_temp_free_ptr(fpstatus);
0e326109 7208 break;
aa47cfdd 7209 }
600b828c 7210 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7211 {
7212 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7213 tmp2 = tcg_const_i32(0);
aa47cfdd 7214 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7215 tcg_temp_free_i32(tmp2);
aa47cfdd 7216 tcg_temp_free_ptr(fpstatus);
0e326109 7217 break;
aa47cfdd 7218 }
600b828c 7219 case NEON_2RM_VABS_F:
4373f3ce 7220 gen_vfp_abs(0);
9ee6e8bb 7221 break;
600b828c 7222 case NEON_2RM_VNEG_F:
4373f3ce 7223 gen_vfp_neg(0);
9ee6e8bb 7224 break;
600b828c 7225 case NEON_2RM_VSWP:
dd8fbd78
FN
7226 tmp2 = neon_load_reg(rd, pass);
7227 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7228 break;
600b828c 7229 case NEON_2RM_VTRN:
dd8fbd78 7230 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7231 switch (size) {
dd8fbd78
FN
7232 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7233 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7234 default: abort();
9ee6e8bb 7235 }
dd8fbd78 7236 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7237 break;
34f7b0a2
WN
7238 case NEON_2RM_VRINTN:
7239 case NEON_2RM_VRINTA:
7240 case NEON_2RM_VRINTM:
7241 case NEON_2RM_VRINTP:
7242 case NEON_2RM_VRINTZ:
7243 {
7244 TCGv_i32 tcg_rmode;
7245 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7246 int rmode;
7247
7248 if (op == NEON_2RM_VRINTZ) {
7249 rmode = FPROUNDING_ZERO;
7250 } else {
7251 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7252 }
7253
7254 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7255 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7256 cpu_env);
7257 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7258 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7259 cpu_env);
7260 tcg_temp_free_ptr(fpstatus);
7261 tcg_temp_free_i32(tcg_rmode);
7262 break;
7263 }
2ce70625
WN
7264 case NEON_2RM_VRINTX:
7265 {
7266 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7267 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7268 tcg_temp_free_ptr(fpstatus);
7269 break;
7270 }
901ad525
WN
7271 case NEON_2RM_VCVTAU:
7272 case NEON_2RM_VCVTAS:
7273 case NEON_2RM_VCVTNU:
7274 case NEON_2RM_VCVTNS:
7275 case NEON_2RM_VCVTPU:
7276 case NEON_2RM_VCVTPS:
7277 case NEON_2RM_VCVTMU:
7278 case NEON_2RM_VCVTMS:
7279 {
7280 bool is_signed = !extract32(insn, 7, 1);
7281 TCGv_ptr fpst = get_fpstatus_ptr(1);
7282 TCGv_i32 tcg_rmode, tcg_shift;
7283 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7284
7285 tcg_shift = tcg_const_i32(0);
7286 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7287 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7288 cpu_env);
7289
7290 if (is_signed) {
7291 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7292 tcg_shift, fpst);
7293 } else {
7294 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7295 tcg_shift, fpst);
7296 }
7297
7298 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7299 cpu_env);
7300 tcg_temp_free_i32(tcg_rmode);
7301 tcg_temp_free_i32(tcg_shift);
7302 tcg_temp_free_ptr(fpst);
7303 break;
7304 }
600b828c 7305 case NEON_2RM_VRECPE:
b6d4443a
AB
7306 {
7307 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7308 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7309 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7310 break;
b6d4443a 7311 }
600b828c 7312 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7313 {
7314 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7315 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7316 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7317 break;
c2fb418e 7318 }
600b828c 7319 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7320 {
7321 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7322 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7323 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7324 break;
b6d4443a 7325 }
600b828c 7326 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7327 {
7328 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7329 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7330 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7331 break;
c2fb418e 7332 }
600b828c 7333 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7334 gen_vfp_sito(0, 1);
9ee6e8bb 7335 break;
600b828c 7336 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7337 gen_vfp_uito(0, 1);
9ee6e8bb 7338 break;
600b828c 7339 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7340 gen_vfp_tosiz(0, 1);
9ee6e8bb 7341 break;
600b828c 7342 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7343 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7344 break;
7345 default:
600b828c
PM
7346 /* Reserved op values were caught by the
7347 * neon_2rm_sizes[] check earlier.
7348 */
7349 abort();
9ee6e8bb 7350 }
600b828c 7351 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7352 tcg_gen_st_f32(cpu_F0s, cpu_env,
7353 neon_reg_offset(rd, pass));
9ee6e8bb 7354 } else {
dd8fbd78 7355 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7356 }
7357 }
7358 break;
7359 }
7360 } else if ((insn & (1 << 10)) == 0) {
7361 /* VTBL, VTBX. */
56907d77
PM
7362 int n = ((insn >> 8) & 3) + 1;
7363 if ((rn + n) > 32) {
7364 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7365 * helper function running off the end of the register file.
7366 */
7367 return 1;
7368 }
7369 n <<= 3;
9ee6e8bb 7370 if (insn & (1 << 6)) {
8f8e3aa4 7371 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7372 } else {
7d1b0095 7373 tmp = tcg_temp_new_i32();
8f8e3aa4 7374 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7375 }
8f8e3aa4 7376 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7377 tmp4 = tcg_const_i32(rn);
7378 tmp5 = tcg_const_i32(n);
9ef39277 7379 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7380 tcg_temp_free_i32(tmp);
9ee6e8bb 7381 if (insn & (1 << 6)) {
8f8e3aa4 7382 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7383 } else {
7d1b0095 7384 tmp = tcg_temp_new_i32();
8f8e3aa4 7385 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7386 }
8f8e3aa4 7387 tmp3 = neon_load_reg(rm, 1);
9ef39277 7388 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7389 tcg_temp_free_i32(tmp5);
7390 tcg_temp_free_i32(tmp4);
8f8e3aa4 7391 neon_store_reg(rd, 0, tmp2);
3018f259 7392 neon_store_reg(rd, 1, tmp3);
7d1b0095 7393 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7394 } else if ((insn & 0x380) == 0) {
7395 /* VDUP */
133da6aa
JR
7396 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7397 return 1;
7398 }
9ee6e8bb 7399 if (insn & (1 << 19)) {
dd8fbd78 7400 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7401 } else {
dd8fbd78 7402 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7403 }
7404 if (insn & (1 << 16)) {
dd8fbd78 7405 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7406 } else if (insn & (1 << 17)) {
7407 if ((insn >> 18) & 1)
dd8fbd78 7408 gen_neon_dup_high16(tmp);
9ee6e8bb 7409 else
dd8fbd78 7410 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7411 }
7412 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7413 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7414 tcg_gen_mov_i32(tmp2, tmp);
7415 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7416 }
7d1b0095 7417 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7418 } else {
7419 return 1;
7420 }
7421 }
7422 }
7423 return 0;
7424}
7425
7dcc1f89 7426static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7427{
4b6a83fb
PM
7428 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7429 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7430
7431 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7432
7433 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7434 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7435 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7436 return 1;
7437 }
d614a513 7438 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7439 return disas_iwmmxt_insn(s, insn);
d614a513 7440 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7441 return disas_dsp_insn(s, insn);
c0f4af17
PM
7442 }
7443 return 1;
4b6a83fb
PM
7444 }
7445
7446 /* Otherwise treat as a generic register access */
7447 is64 = (insn & (1 << 25)) == 0;
7448 if (!is64 && ((insn & (1 << 4)) == 0)) {
7449 /* cdp */
7450 return 1;
7451 }
7452
7453 crm = insn & 0xf;
7454 if (is64) {
7455 crn = 0;
7456 opc1 = (insn >> 4) & 0xf;
7457 opc2 = 0;
7458 rt2 = (insn >> 16) & 0xf;
7459 } else {
7460 crn = (insn >> 16) & 0xf;
7461 opc1 = (insn >> 21) & 7;
7462 opc2 = (insn >> 5) & 7;
7463 rt2 = 0;
7464 }
7465 isread = (insn >> 20) & 1;
7466 rt = (insn >> 12) & 0xf;
7467
60322b39 7468 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7469 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7470 if (ri) {
7471 /* Check access permissions */
dcbff19b 7472 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7473 return 1;
7474 }
7475
c0f4af17 7476 if (ri->accessfn ||
d614a513 7477 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7478 /* Emit code to perform further access permissions checks at
7479 * runtime; this may result in an exception.
c0f4af17
PM
7480 * Note that on XScale all cp0..c13 registers do an access check
7481 * call in order to handle c15_cpar.
f59df3f2
PM
7482 */
7483 TCGv_ptr tmpptr;
3f208fd7 7484 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7485 uint32_t syndrome;
7486
7487 /* Note that since we are an implementation which takes an
7488 * exception on a trapped conditional instruction only if the
7489 * instruction passes its condition code check, we can take
7490 * advantage of the clause in the ARM ARM that allows us to set
7491 * the COND field in the instruction to 0xE in all cases.
7492 * We could fish the actual condition out of the insn (ARM)
7493 * or the condexec bits (Thumb) but it isn't necessary.
7494 */
7495 switch (cpnum) {
7496 case 14:
7497 if (is64) {
7498 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7499 isread, false);
8bcbf37c
PM
7500 } else {
7501 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7502 rt, isread, false);
8bcbf37c
PM
7503 }
7504 break;
7505 case 15:
7506 if (is64) {
7507 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7508 isread, false);
8bcbf37c
PM
7509 } else {
7510 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7511 rt, isread, false);
8bcbf37c
PM
7512 }
7513 break;
7514 default:
7515 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7516 * so this can only happen if this is an ARMv7 or earlier CPU,
7517 * in which case the syndrome information won't actually be
7518 * guest visible.
7519 */
d614a513 7520 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7521 syndrome = syn_uncategorized();
7522 break;
7523 }
7524
43bfa4a1 7525 gen_set_condexec(s);
3977ee5d 7526 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7527 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7528 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7529 tcg_isread = tcg_const_i32(isread);
7530 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7531 tcg_isread);
f59df3f2 7532 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7533 tcg_temp_free_i32(tcg_syn);
3f208fd7 7534 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7535 }
7536
4b6a83fb
PM
7537 /* Handle special cases first */
7538 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7539 case ARM_CP_NOP:
7540 return 0;
7541 case ARM_CP_WFI:
7542 if (isread) {
7543 return 1;
7544 }
eaed129d 7545 gen_set_pc_im(s, s->pc);
4b6a83fb 7546 s->is_jmp = DISAS_WFI;
2bee5105 7547 return 0;
4b6a83fb
PM
7548 default:
7549 break;
7550 }
7551
bd79255d 7552 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7553 gen_io_start();
7554 }
7555
4b6a83fb
PM
7556 if (isread) {
7557 /* Read */
7558 if (is64) {
7559 TCGv_i64 tmp64;
7560 TCGv_i32 tmp;
7561 if (ri->type & ARM_CP_CONST) {
7562 tmp64 = tcg_const_i64(ri->resetvalue);
7563 } else if (ri->readfn) {
7564 TCGv_ptr tmpptr;
4b6a83fb
PM
7565 tmp64 = tcg_temp_new_i64();
7566 tmpptr = tcg_const_ptr(ri);
7567 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7568 tcg_temp_free_ptr(tmpptr);
7569 } else {
7570 tmp64 = tcg_temp_new_i64();
7571 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7572 }
7573 tmp = tcg_temp_new_i32();
ecc7b3aa 7574 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7575 store_reg(s, rt, tmp);
7576 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7577 tmp = tcg_temp_new_i32();
ecc7b3aa 7578 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7579 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7580 store_reg(s, rt2, tmp);
7581 } else {
39d5492a 7582 TCGv_i32 tmp;
4b6a83fb
PM
7583 if (ri->type & ARM_CP_CONST) {
7584 tmp = tcg_const_i32(ri->resetvalue);
7585 } else if (ri->readfn) {
7586 TCGv_ptr tmpptr;
4b6a83fb
PM
7587 tmp = tcg_temp_new_i32();
7588 tmpptr = tcg_const_ptr(ri);
7589 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7590 tcg_temp_free_ptr(tmpptr);
7591 } else {
7592 tmp = load_cpu_offset(ri->fieldoffset);
7593 }
7594 if (rt == 15) {
7595 /* Destination register of r15 for 32 bit loads sets
7596 * the condition codes from the high 4 bits of the value
7597 */
7598 gen_set_nzcv(tmp);
7599 tcg_temp_free_i32(tmp);
7600 } else {
7601 store_reg(s, rt, tmp);
7602 }
7603 }
7604 } else {
7605 /* Write */
7606 if (ri->type & ARM_CP_CONST) {
7607 /* If not forbidden by access permissions, treat as WI */
7608 return 0;
7609 }
7610
7611 if (is64) {
39d5492a 7612 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7613 TCGv_i64 tmp64 = tcg_temp_new_i64();
7614 tmplo = load_reg(s, rt);
7615 tmphi = load_reg(s, rt2);
7616 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7617 tcg_temp_free_i32(tmplo);
7618 tcg_temp_free_i32(tmphi);
7619 if (ri->writefn) {
7620 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7621 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7622 tcg_temp_free_ptr(tmpptr);
7623 } else {
7624 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7625 }
7626 tcg_temp_free_i64(tmp64);
7627 } else {
7628 if (ri->writefn) {
39d5492a 7629 TCGv_i32 tmp;
4b6a83fb 7630 TCGv_ptr tmpptr;
4b6a83fb
PM
7631 tmp = load_reg(s, rt);
7632 tmpptr = tcg_const_ptr(ri);
7633 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7634 tcg_temp_free_ptr(tmpptr);
7635 tcg_temp_free_i32(tmp);
7636 } else {
39d5492a 7637 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7638 store_cpu_offset(tmp, ri->fieldoffset);
7639 }
7640 }
2452731c
PM
7641 }
7642
bd79255d 7643 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7644 /* I/O operations must end the TB here (whether read or write) */
7645 gen_io_end();
7646 gen_lookup_tb(s);
7647 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7648 /* We default to ending the TB on a coprocessor register write,
7649 * but allow this to be suppressed by the register definition
7650 * (usually only necessary to work around guest bugs).
7651 */
2452731c 7652 gen_lookup_tb(s);
4b6a83fb 7653 }
2452731c 7654
4b6a83fb
PM
7655 return 0;
7656 }
7657
626187d8
PM
7658 /* Unknown register; this might be a guest error or a QEMU
7659 * unimplemented feature.
7660 */
7661 if (is64) {
7662 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7663 "64 bit system register cp:%d opc1: %d crm:%d "
7664 "(%s)\n",
7665 isread ? "read" : "write", cpnum, opc1, crm,
7666 s->ns ? "non-secure" : "secure");
626187d8
PM
7667 } else {
7668 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7669 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7670 "(%s)\n",
7671 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7672 s->ns ? "non-secure" : "secure");
626187d8
PM
7673 }
7674
4a9a539f 7675 return 1;
9ee6e8bb
PB
7676}
7677
5e3f878a
PB
7678
7679/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7680static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7681{
39d5492a 7682 TCGv_i32 tmp;
7d1b0095 7683 tmp = tcg_temp_new_i32();
ecc7b3aa 7684 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7685 store_reg(s, rlow, tmp);
7d1b0095 7686 tmp = tcg_temp_new_i32();
5e3f878a 7687 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7688 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7689 store_reg(s, rhigh, tmp);
7690}
7691
7692/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7693static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7694{
a7812ae4 7695 TCGv_i64 tmp;
39d5492a 7696 TCGv_i32 tmp2;
5e3f878a 7697
36aa55dc 7698 /* Load value and extend to 64 bits. */
a7812ae4 7699 tmp = tcg_temp_new_i64();
5e3f878a
PB
7700 tmp2 = load_reg(s, rlow);
7701 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7702 tcg_temp_free_i32(tmp2);
5e3f878a 7703 tcg_gen_add_i64(val, val, tmp);
b75263d6 7704 tcg_temp_free_i64(tmp);
5e3f878a
PB
7705}
7706
7707/* load and add a 64-bit value from a register pair. */
a7812ae4 7708static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7709{
a7812ae4 7710 TCGv_i64 tmp;
39d5492a
PM
7711 TCGv_i32 tmpl;
7712 TCGv_i32 tmph;
5e3f878a
PB
7713
7714 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7715 tmpl = load_reg(s, rlow);
7716 tmph = load_reg(s, rhigh);
a7812ae4 7717 tmp = tcg_temp_new_i64();
36aa55dc 7718 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7719 tcg_temp_free_i32(tmpl);
7720 tcg_temp_free_i32(tmph);
5e3f878a 7721 tcg_gen_add_i64(val, val, tmp);
b75263d6 7722 tcg_temp_free_i64(tmp);
5e3f878a
PB
7723}
7724
c9f10124 7725/* Set N and Z flags from hi|lo. */
39d5492a 7726static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7727{
c9f10124
RH
7728 tcg_gen_mov_i32(cpu_NF, hi);
7729 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7730}
7731
426f5abc
PB
7732/* Load/Store exclusive instructions are implemented by remembering
7733 the value/address loaded, and seeing if these are the same
b90372ad 7734 when the store is performed. This should be sufficient to implement
426f5abc
PB
7735 the architecturally mandated semantics, and avoids having to monitor
7736 regular stores.
7737
7738 In system emulation mode only one CPU will be running at once, so
7739 this sequence is effectively atomic. In user emulation mode we
7740 throw an exception and handle the atomic operation elsewhere. */
7741static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7742 TCGv_i32 addr, int size)
426f5abc 7743{
94ee24e7 7744 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7745
50225ad0
PM
7746 s->is_ldex = true;
7747
426f5abc
PB
7748 switch (size) {
7749 case 0:
12dcc321 7750 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7751 break;
7752 case 1:
12dcc321 7753 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7754 break;
7755 case 2:
7756 case 3:
12dcc321 7757 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7758 break;
7759 default:
7760 abort();
7761 }
03d05e2d 7762
426f5abc 7763 if (size == 3) {
39d5492a 7764 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7765 TCGv_i32 tmp3 = tcg_temp_new_i32();
7766
2c9adbda 7767 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7768 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7769 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7770 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7771 store_reg(s, rt2, tmp3);
7772 } else {
7773 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7774 }
03d05e2d
PM
7775
7776 store_reg(s, rt, tmp);
7777 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7778}
7779
7780static void gen_clrex(DisasContext *s)
7781{
03d05e2d 7782 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7783}
7784
7785#ifdef CONFIG_USER_ONLY
7786static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7787 TCGv_i32 addr, int size)
426f5abc 7788{
03d05e2d 7789 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7790 tcg_gen_movi_i32(cpu_exclusive_info,
7791 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7792 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7793}
7794#else
7795static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7796 TCGv_i32 addr, int size)
426f5abc 7797{
39d5492a 7798 TCGv_i32 tmp;
03d05e2d 7799 TCGv_i64 val64, extaddr;
42a268c2
RH
7800 TCGLabel *done_label;
7801 TCGLabel *fail_label;
426f5abc
PB
7802
7803 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7804 [addr] = {Rt};
7805 {Rd} = 0;
7806 } else {
7807 {Rd} = 1;
7808 } */
7809 fail_label = gen_new_label();
7810 done_label = gen_new_label();
03d05e2d
PM
7811 extaddr = tcg_temp_new_i64();
7812 tcg_gen_extu_i32_i64(extaddr, addr);
7813 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7814 tcg_temp_free_i64(extaddr);
7815
94ee24e7 7816 tmp = tcg_temp_new_i32();
426f5abc
PB
7817 switch (size) {
7818 case 0:
12dcc321 7819 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7820 break;
7821 case 1:
12dcc321 7822 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7823 break;
7824 case 2:
7825 case 3:
12dcc321 7826 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7827 break;
7828 default:
7829 abort();
7830 }
03d05e2d
PM
7831
7832 val64 = tcg_temp_new_i64();
426f5abc 7833 if (size == 3) {
39d5492a 7834 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7835 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7836 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7837 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7838 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7839 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7840 tcg_temp_free_i32(tmp3);
7841 } else {
7842 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7843 }
03d05e2d
PM
7844 tcg_temp_free_i32(tmp);
7845
7846 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7847 tcg_temp_free_i64(val64);
7848
426f5abc
PB
7849 tmp = load_reg(s, rt);
7850 switch (size) {
7851 case 0:
12dcc321 7852 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7853 break;
7854 case 1:
12dcc321 7855 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7856 break;
7857 case 2:
7858 case 3:
12dcc321 7859 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7860 break;
7861 default:
7862 abort();
7863 }
94ee24e7 7864 tcg_temp_free_i32(tmp);
426f5abc
PB
7865 if (size == 3) {
7866 tcg_gen_addi_i32(addr, addr, 4);
7867 tmp = load_reg(s, rt2);
12dcc321 7868 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
94ee24e7 7869 tcg_temp_free_i32(tmp);
426f5abc
PB
7870 }
7871 tcg_gen_movi_i32(cpu_R[rd], 0);
7872 tcg_gen_br(done_label);
7873 gen_set_label(fail_label);
7874 tcg_gen_movi_i32(cpu_R[rd], 1);
7875 gen_set_label(done_label);
03d05e2d 7876 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7877}
7878#endif
7879
81465888
PM
7880/* gen_srs:
7881 * @env: CPUARMState
7882 * @s: DisasContext
7883 * @mode: mode field from insn (which stack to store to)
7884 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7885 * @writeback: true if writeback bit set
7886 *
7887 * Generate code for the SRS (Store Return State) insn.
7888 */
7889static void gen_srs(DisasContext *s,
7890 uint32_t mode, uint32_t amode, bool writeback)
7891{
7892 int32_t offset;
cbc0326b
PM
7893 TCGv_i32 addr, tmp;
7894 bool undef = false;
7895
7896 /* SRS is:
7897 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7898 * and specified mode is monitor mode
cbc0326b
PM
7899 * - UNDEFINED in Hyp mode
7900 * - UNPREDICTABLE in User or System mode
7901 * - UNPREDICTABLE if the specified mode is:
7902 * -- not implemented
7903 * -- not a valid mode number
7904 * -- a mode that's at a higher exception level
7905 * -- Monitor, if we are Non-secure
f01377f5 7906 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7907 */
ba63cf47 7908 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7909 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7910 return;
7911 }
7912
7913 if (s->current_el == 0 || s->current_el == 2) {
7914 undef = true;
7915 }
7916
7917 switch (mode) {
7918 case ARM_CPU_MODE_USR:
7919 case ARM_CPU_MODE_FIQ:
7920 case ARM_CPU_MODE_IRQ:
7921 case ARM_CPU_MODE_SVC:
7922 case ARM_CPU_MODE_ABT:
7923 case ARM_CPU_MODE_UND:
7924 case ARM_CPU_MODE_SYS:
7925 break;
7926 case ARM_CPU_MODE_HYP:
7927 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7928 undef = true;
7929 }
7930 break;
7931 case ARM_CPU_MODE_MON:
7932 /* No need to check specifically for "are we non-secure" because
7933 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7934 * so if this isn't EL3 then we must be non-secure.
7935 */
7936 if (s->current_el != 3) {
7937 undef = true;
7938 }
7939 break;
7940 default:
7941 undef = true;
7942 }
7943
7944 if (undef) {
7945 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7946 default_exception_el(s));
7947 return;
7948 }
7949
7950 addr = tcg_temp_new_i32();
7951 tmp = tcg_const_i32(mode);
f01377f5
PM
7952 /* get_r13_banked() will raise an exception if called from System mode */
7953 gen_set_condexec(s);
7954 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7955 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7956 tcg_temp_free_i32(tmp);
7957 switch (amode) {
7958 case 0: /* DA */
7959 offset = -4;
7960 break;
7961 case 1: /* IA */
7962 offset = 0;
7963 break;
7964 case 2: /* DB */
7965 offset = -8;
7966 break;
7967 case 3: /* IB */
7968 offset = 4;
7969 break;
7970 default:
7971 abort();
7972 }
7973 tcg_gen_addi_i32(addr, addr, offset);
7974 tmp = load_reg(s, 14);
12dcc321 7975 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7976 tcg_temp_free_i32(tmp);
81465888
PM
7977 tmp = load_cpu_field(spsr);
7978 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7979 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7980 tcg_temp_free_i32(tmp);
81465888
PM
7981 if (writeback) {
7982 switch (amode) {
7983 case 0:
7984 offset = -8;
7985 break;
7986 case 1:
7987 offset = 4;
7988 break;
7989 case 2:
7990 offset = -4;
7991 break;
7992 case 3:
7993 offset = 0;
7994 break;
7995 default:
7996 abort();
7997 }
7998 tcg_gen_addi_i32(addr, addr, offset);
7999 tmp = tcg_const_i32(mode);
8000 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8001 tcg_temp_free_i32(tmp);
8002 }
8003 tcg_temp_free_i32(addr);
f01377f5 8004 s->is_jmp = DISAS_UPDATE;
81465888
PM
8005}
8006
f4df2210 8007static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8008{
f4df2210 8009 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8010 TCGv_i32 tmp;
8011 TCGv_i32 tmp2;
8012 TCGv_i32 tmp3;
8013 TCGv_i32 addr;
a7812ae4 8014 TCGv_i64 tmp64;
9ee6e8bb 8015
9ee6e8bb 8016 /* M variants do not implement ARM mode. */
b53d8923 8017 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 8018 goto illegal_op;
b53d8923 8019 }
9ee6e8bb
PB
8020 cond = insn >> 28;
8021 if (cond == 0xf){
be5e7a76
DES
8022 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8023 * choose to UNDEF. In ARMv5 and above the space is used
8024 * for miscellaneous unconditional instructions.
8025 */
8026 ARCH(5);
8027
9ee6e8bb
PB
8028 /* Unconditional instructions. */
8029 if (((insn >> 25) & 7) == 1) {
8030 /* NEON Data processing. */
d614a513 8031 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8032 goto illegal_op;
d614a513 8033 }
9ee6e8bb 8034
7dcc1f89 8035 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8036 goto illegal_op;
7dcc1f89 8037 }
9ee6e8bb
PB
8038 return;
8039 }
8040 if ((insn & 0x0f100000) == 0x04000000) {
8041 /* NEON load/store. */
d614a513 8042 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8043 goto illegal_op;
d614a513 8044 }
9ee6e8bb 8045
7dcc1f89 8046 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8047 goto illegal_op;
7dcc1f89 8048 }
9ee6e8bb
PB
8049 return;
8050 }
6a57f3eb
WN
8051 if ((insn & 0x0f000e10) == 0x0e000a00) {
8052 /* VFP. */
7dcc1f89 8053 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8054 goto illegal_op;
8055 }
8056 return;
8057 }
3d185e5d
PM
8058 if (((insn & 0x0f30f000) == 0x0510f000) ||
8059 ((insn & 0x0f30f010) == 0x0710f000)) {
8060 if ((insn & (1 << 22)) == 0) {
8061 /* PLDW; v7MP */
d614a513 8062 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8063 goto illegal_op;
8064 }
8065 }
8066 /* Otherwise PLD; v5TE+ */
be5e7a76 8067 ARCH(5TE);
3d185e5d
PM
8068 return;
8069 }
8070 if (((insn & 0x0f70f000) == 0x0450f000) ||
8071 ((insn & 0x0f70f010) == 0x0650f000)) {
8072 ARCH(7);
8073 return; /* PLI; V7 */
8074 }
8075 if (((insn & 0x0f700000) == 0x04100000) ||
8076 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8077 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8078 goto illegal_op;
8079 }
8080 return; /* v7MP: Unallocated memory hint: must NOP */
8081 }
8082
8083 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8084 ARCH(6);
8085 /* setend */
9886ecdf
PB
8086 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8087 gen_helper_setend(cpu_env);
8088 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8089 }
8090 return;
8091 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8092 switch ((insn >> 4) & 0xf) {
8093 case 1: /* clrex */
8094 ARCH(6K);
426f5abc 8095 gen_clrex(s);
9ee6e8bb
PB
8096 return;
8097 case 4: /* dsb */
8098 case 5: /* dmb */
9ee6e8bb 8099 ARCH(7);
61e4c432 8100 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8101 return;
6df99dec
SS
8102 case 6: /* isb */
8103 /* We need to break the TB after this insn to execute
8104 * self-modifying code correctly and also to take
8105 * any pending interrupts immediately.
8106 */
8107 gen_lookup_tb(s);
8108 return;
9ee6e8bb
PB
8109 default:
8110 goto illegal_op;
8111 }
8112 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8113 /* srs */
81465888
PM
8114 ARCH(6);
8115 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8116 return;
ea825eee 8117 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8118 /* rfe */
c67b6b71 8119 int32_t offset;
9ee6e8bb
PB
8120 if (IS_USER(s))
8121 goto illegal_op;
8122 ARCH(6);
8123 rn = (insn >> 16) & 0xf;
b0109805 8124 addr = load_reg(s, rn);
9ee6e8bb
PB
8125 i = (insn >> 23) & 3;
8126 switch (i) {
b0109805 8127 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8128 case 1: offset = 0; break; /* IA */
8129 case 2: offset = -8; break; /* DB */
b0109805 8130 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8131 default: abort();
8132 }
8133 if (offset)
b0109805
PB
8134 tcg_gen_addi_i32(addr, addr, offset);
8135 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8136 tmp = tcg_temp_new_i32();
12dcc321 8137 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8138 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8139 tmp2 = tcg_temp_new_i32();
12dcc321 8140 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8141 if (insn & (1 << 21)) {
8142 /* Base writeback. */
8143 switch (i) {
b0109805 8144 case 0: offset = -8; break;
c67b6b71
FN
8145 case 1: offset = 4; break;
8146 case 2: offset = -4; break;
b0109805 8147 case 3: offset = 0; break;
9ee6e8bb
PB
8148 default: abort();
8149 }
8150 if (offset)
b0109805
PB
8151 tcg_gen_addi_i32(addr, addr, offset);
8152 store_reg(s, rn, addr);
8153 } else {
7d1b0095 8154 tcg_temp_free_i32(addr);
9ee6e8bb 8155 }
b0109805 8156 gen_rfe(s, tmp, tmp2);
c67b6b71 8157 return;
9ee6e8bb
PB
8158 } else if ((insn & 0x0e000000) == 0x0a000000) {
8159 /* branch link and change to thumb (blx <offset>) */
8160 int32_t offset;
8161
8162 val = (uint32_t)s->pc;
7d1b0095 8163 tmp = tcg_temp_new_i32();
d9ba4830
PB
8164 tcg_gen_movi_i32(tmp, val);
8165 store_reg(s, 14, tmp);
9ee6e8bb
PB
8166 /* Sign-extend the 24-bit offset */
8167 offset = (((int32_t)insn) << 8) >> 8;
8168 /* offset * 4 + bit24 * 2 + (thumb bit) */
8169 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8170 /* pipeline offset */
8171 val += 4;
be5e7a76 8172 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8173 gen_bx_im(s, val);
9ee6e8bb
PB
8174 return;
8175 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8176 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8177 /* iWMMXt register transfer. */
c0f4af17 8178 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8179 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8180 return;
c0f4af17
PM
8181 }
8182 }
9ee6e8bb
PB
8183 }
8184 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8185 /* Coprocessor double register transfer. */
be5e7a76 8186 ARCH(5TE);
9ee6e8bb
PB
8187 } else if ((insn & 0x0f000010) == 0x0e000010) {
8188 /* Additional coprocessor register transfer. */
7997d92f 8189 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8190 uint32_t mask;
8191 uint32_t val;
8192 /* cps (privileged) */
8193 if (IS_USER(s))
8194 return;
8195 mask = val = 0;
8196 if (insn & (1 << 19)) {
8197 if (insn & (1 << 8))
8198 mask |= CPSR_A;
8199 if (insn & (1 << 7))
8200 mask |= CPSR_I;
8201 if (insn & (1 << 6))
8202 mask |= CPSR_F;
8203 if (insn & (1 << 18))
8204 val |= mask;
8205 }
7997d92f 8206 if (insn & (1 << 17)) {
9ee6e8bb
PB
8207 mask |= CPSR_M;
8208 val |= (insn & 0x1f);
8209 }
8210 if (mask) {
2fbac54b 8211 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8212 }
8213 return;
8214 }
8215 goto illegal_op;
8216 }
8217 if (cond != 0xe) {
8218 /* if not always execute, we generate a conditional jump to
8219 next instruction */
8220 s->condlabel = gen_new_label();
39fb730a 8221 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8222 s->condjmp = 1;
8223 }
8224 if ((insn & 0x0f900000) == 0x03000000) {
8225 if ((insn & (1 << 21)) == 0) {
8226 ARCH(6T2);
8227 rd = (insn >> 12) & 0xf;
8228 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8229 if ((insn & (1 << 22)) == 0) {
8230 /* MOVW */
7d1b0095 8231 tmp = tcg_temp_new_i32();
5e3f878a 8232 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8233 } else {
8234 /* MOVT */
5e3f878a 8235 tmp = load_reg(s, rd);
86831435 8236 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8237 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8238 }
5e3f878a 8239 store_reg(s, rd, tmp);
9ee6e8bb
PB
8240 } else {
8241 if (((insn >> 12) & 0xf) != 0xf)
8242 goto illegal_op;
8243 if (((insn >> 16) & 0xf) == 0) {
8244 gen_nop_hint(s, insn & 0xff);
8245 } else {
8246 /* CPSR = immediate */
8247 val = insn & 0xff;
8248 shift = ((insn >> 8) & 0xf) * 2;
8249 if (shift)
8250 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8251 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8252 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8253 i, val)) {
9ee6e8bb 8254 goto illegal_op;
7dcc1f89 8255 }
9ee6e8bb
PB
8256 }
8257 }
8258 } else if ((insn & 0x0f900000) == 0x01000000
8259 && (insn & 0x00000090) != 0x00000090) {
8260 /* miscellaneous instructions */
8261 op1 = (insn >> 21) & 3;
8262 sh = (insn >> 4) & 0xf;
8263 rm = insn & 0xf;
8264 switch (sh) {
8bfd0550
PM
8265 case 0x0: /* MSR, MRS */
8266 if (insn & (1 << 9)) {
8267 /* MSR (banked) and MRS (banked) */
8268 int sysm = extract32(insn, 16, 4) |
8269 (extract32(insn, 8, 1) << 4);
8270 int r = extract32(insn, 22, 1);
8271
8272 if (op1 & 1) {
8273 /* MSR (banked) */
8274 gen_msr_banked(s, r, sysm, rm);
8275 } else {
8276 /* MRS (banked) */
8277 int rd = extract32(insn, 12, 4);
8278
8279 gen_mrs_banked(s, r, sysm, rd);
8280 }
8281 break;
8282 }
8283
8284 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8285 if (op1 & 1) {
8286 /* PSR = reg */
2fbac54b 8287 tmp = load_reg(s, rm);
9ee6e8bb 8288 i = ((op1 & 2) != 0);
7dcc1f89 8289 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8290 goto illegal_op;
8291 } else {
8292 /* reg = PSR */
8293 rd = (insn >> 12) & 0xf;
8294 if (op1 & 2) {
8295 if (IS_USER(s))
8296 goto illegal_op;
d9ba4830 8297 tmp = load_cpu_field(spsr);
9ee6e8bb 8298 } else {
7d1b0095 8299 tmp = tcg_temp_new_i32();
9ef39277 8300 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8301 }
d9ba4830 8302 store_reg(s, rd, tmp);
9ee6e8bb
PB
8303 }
8304 break;
8305 case 0x1:
8306 if (op1 == 1) {
8307 /* branch/exchange thumb (bx). */
be5e7a76 8308 ARCH(4T);
d9ba4830
PB
8309 tmp = load_reg(s, rm);
8310 gen_bx(s, tmp);
9ee6e8bb
PB
8311 } else if (op1 == 3) {
8312 /* clz */
be5e7a76 8313 ARCH(5);
9ee6e8bb 8314 rd = (insn >> 12) & 0xf;
1497c961
PB
8315 tmp = load_reg(s, rm);
8316 gen_helper_clz(tmp, tmp);
8317 store_reg(s, rd, tmp);
9ee6e8bb
PB
8318 } else {
8319 goto illegal_op;
8320 }
8321 break;
8322 case 0x2:
8323 if (op1 == 1) {
8324 ARCH(5J); /* bxj */
8325 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8326 tmp = load_reg(s, rm);
8327 gen_bx(s, tmp);
9ee6e8bb
PB
8328 } else {
8329 goto illegal_op;
8330 }
8331 break;
8332 case 0x3:
8333 if (op1 != 1)
8334 goto illegal_op;
8335
be5e7a76 8336 ARCH(5);
9ee6e8bb 8337 /* branch link/exchange thumb (blx) */
d9ba4830 8338 tmp = load_reg(s, rm);
7d1b0095 8339 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8340 tcg_gen_movi_i32(tmp2, s->pc);
8341 store_reg(s, 14, tmp2);
8342 gen_bx(s, tmp);
9ee6e8bb 8343 break;
eb0ecd5a
WN
8344 case 0x4:
8345 {
8346 /* crc32/crc32c */
8347 uint32_t c = extract32(insn, 8, 4);
8348
8349 /* Check this CPU supports ARMv8 CRC instructions.
8350 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8351 * Bits 8, 10 and 11 should be zero.
8352 */
d614a513 8353 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8354 (c & 0xd) != 0) {
8355 goto illegal_op;
8356 }
8357
8358 rn = extract32(insn, 16, 4);
8359 rd = extract32(insn, 12, 4);
8360
8361 tmp = load_reg(s, rn);
8362 tmp2 = load_reg(s, rm);
aa633469
PM
8363 if (op1 == 0) {
8364 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8365 } else if (op1 == 1) {
8366 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8367 }
eb0ecd5a
WN
8368 tmp3 = tcg_const_i32(1 << op1);
8369 if (c & 0x2) {
8370 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8371 } else {
8372 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8373 }
8374 tcg_temp_free_i32(tmp2);
8375 tcg_temp_free_i32(tmp3);
8376 store_reg(s, rd, tmp);
8377 break;
8378 }
9ee6e8bb 8379 case 0x5: /* saturating add/subtract */
be5e7a76 8380 ARCH(5TE);
9ee6e8bb
PB
8381 rd = (insn >> 12) & 0xf;
8382 rn = (insn >> 16) & 0xf;
b40d0353 8383 tmp = load_reg(s, rm);
5e3f878a 8384 tmp2 = load_reg(s, rn);
9ee6e8bb 8385 if (op1 & 2)
9ef39277 8386 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8387 if (op1 & 1)
9ef39277 8388 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8389 else
9ef39277 8390 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8391 tcg_temp_free_i32(tmp2);
5e3f878a 8392 store_reg(s, rd, tmp);
9ee6e8bb 8393 break;
49e14940 8394 case 7:
d4a2dc67
PM
8395 {
8396 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
8397 switch (op1) {
8398 case 1:
8399 /* bkpt */
8400 ARCH(5);
8401 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8402 syn_aa32_bkpt(imm16, false),
8403 default_exception_el(s));
37e6456e
PM
8404 break;
8405 case 2:
8406 /* Hypervisor call (v7) */
8407 ARCH(7);
8408 if (IS_USER(s)) {
8409 goto illegal_op;
8410 }
8411 gen_hvc(s, imm16);
8412 break;
8413 case 3:
8414 /* Secure monitor call (v6+) */
8415 ARCH(6K);
8416 if (IS_USER(s)) {
8417 goto illegal_op;
8418 }
8419 gen_smc(s);
8420 break;
8421 default:
49e14940
AL
8422 goto illegal_op;
8423 }
9ee6e8bb 8424 break;
d4a2dc67 8425 }
9ee6e8bb
PB
8426 case 0x8: /* signed multiply */
8427 case 0xa:
8428 case 0xc:
8429 case 0xe:
be5e7a76 8430 ARCH(5TE);
9ee6e8bb
PB
8431 rs = (insn >> 8) & 0xf;
8432 rn = (insn >> 12) & 0xf;
8433 rd = (insn >> 16) & 0xf;
8434 if (op1 == 1) {
8435 /* (32 * 16) >> 16 */
5e3f878a
PB
8436 tmp = load_reg(s, rm);
8437 tmp2 = load_reg(s, rs);
9ee6e8bb 8438 if (sh & 4)
5e3f878a 8439 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8440 else
5e3f878a 8441 gen_sxth(tmp2);
a7812ae4
PB
8442 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8443 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8444 tmp = tcg_temp_new_i32();
ecc7b3aa 8445 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8446 tcg_temp_free_i64(tmp64);
9ee6e8bb 8447 if ((sh & 2) == 0) {
5e3f878a 8448 tmp2 = load_reg(s, rn);
9ef39277 8449 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8450 tcg_temp_free_i32(tmp2);
9ee6e8bb 8451 }
5e3f878a 8452 store_reg(s, rd, tmp);
9ee6e8bb
PB
8453 } else {
8454 /* 16 * 16 */
5e3f878a
PB
8455 tmp = load_reg(s, rm);
8456 tmp2 = load_reg(s, rs);
8457 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8458 tcg_temp_free_i32(tmp2);
9ee6e8bb 8459 if (op1 == 2) {
a7812ae4
PB
8460 tmp64 = tcg_temp_new_i64();
8461 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8462 tcg_temp_free_i32(tmp);
a7812ae4
PB
8463 gen_addq(s, tmp64, rn, rd);
8464 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8465 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8466 } else {
8467 if (op1 == 0) {
5e3f878a 8468 tmp2 = load_reg(s, rn);
9ef39277 8469 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8470 tcg_temp_free_i32(tmp2);
9ee6e8bb 8471 }
5e3f878a 8472 store_reg(s, rd, tmp);
9ee6e8bb
PB
8473 }
8474 }
8475 break;
8476 default:
8477 goto illegal_op;
8478 }
8479 } else if (((insn & 0x0e000000) == 0 &&
8480 (insn & 0x00000090) != 0x90) ||
8481 ((insn & 0x0e000000) == (1 << 25))) {
8482 int set_cc, logic_cc, shiftop;
8483
8484 op1 = (insn >> 21) & 0xf;
8485 set_cc = (insn >> 20) & 1;
8486 logic_cc = table_logic_cc[op1] & set_cc;
8487
8488 /* data processing instruction */
8489 if (insn & (1 << 25)) {
8490 /* immediate operand */
8491 val = insn & 0xff;
8492 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8493 if (shift) {
9ee6e8bb 8494 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8495 }
7d1b0095 8496 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8497 tcg_gen_movi_i32(tmp2, val);
8498 if (logic_cc && shift) {
8499 gen_set_CF_bit31(tmp2);
8500 }
9ee6e8bb
PB
8501 } else {
8502 /* register */
8503 rm = (insn) & 0xf;
e9bb4aa9 8504 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8505 shiftop = (insn >> 5) & 3;
8506 if (!(insn & (1 << 4))) {
8507 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8508 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8509 } else {
8510 rs = (insn >> 8) & 0xf;
8984bd2e 8511 tmp = load_reg(s, rs);
e9bb4aa9 8512 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8513 }
8514 }
8515 if (op1 != 0x0f && op1 != 0x0d) {
8516 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8517 tmp = load_reg(s, rn);
8518 } else {
39d5492a 8519 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8520 }
8521 rd = (insn >> 12) & 0xf;
8522 switch(op1) {
8523 case 0x00:
e9bb4aa9
JR
8524 tcg_gen_and_i32(tmp, tmp, tmp2);
8525 if (logic_cc) {
8526 gen_logic_CC(tmp);
8527 }
7dcc1f89 8528 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8529 break;
8530 case 0x01:
e9bb4aa9
JR
8531 tcg_gen_xor_i32(tmp, tmp, tmp2);
8532 if (logic_cc) {
8533 gen_logic_CC(tmp);
8534 }
7dcc1f89 8535 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8536 break;
8537 case 0x02:
8538 if (set_cc && rd == 15) {
8539 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8540 if (IS_USER(s)) {
9ee6e8bb 8541 goto illegal_op;
e9bb4aa9 8542 }
72485ec4 8543 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8544 gen_exception_return(s, tmp);
9ee6e8bb 8545 } else {
e9bb4aa9 8546 if (set_cc) {
72485ec4 8547 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8548 } else {
8549 tcg_gen_sub_i32(tmp, tmp, tmp2);
8550 }
7dcc1f89 8551 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8552 }
8553 break;
8554 case 0x03:
e9bb4aa9 8555 if (set_cc) {
72485ec4 8556 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8557 } else {
8558 tcg_gen_sub_i32(tmp, tmp2, tmp);
8559 }
7dcc1f89 8560 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8561 break;
8562 case 0x04:
e9bb4aa9 8563 if (set_cc) {
72485ec4 8564 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8565 } else {
8566 tcg_gen_add_i32(tmp, tmp, tmp2);
8567 }
7dcc1f89 8568 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8569 break;
8570 case 0x05:
e9bb4aa9 8571 if (set_cc) {
49b4c31e 8572 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8573 } else {
8574 gen_add_carry(tmp, tmp, tmp2);
8575 }
7dcc1f89 8576 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8577 break;
8578 case 0x06:
e9bb4aa9 8579 if (set_cc) {
2de68a49 8580 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8581 } else {
8582 gen_sub_carry(tmp, tmp, tmp2);
8583 }
7dcc1f89 8584 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8585 break;
8586 case 0x07:
e9bb4aa9 8587 if (set_cc) {
2de68a49 8588 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8589 } else {
8590 gen_sub_carry(tmp, tmp2, tmp);
8591 }
7dcc1f89 8592 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8593 break;
8594 case 0x08:
8595 if (set_cc) {
e9bb4aa9
JR
8596 tcg_gen_and_i32(tmp, tmp, tmp2);
8597 gen_logic_CC(tmp);
9ee6e8bb 8598 }
7d1b0095 8599 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8600 break;
8601 case 0x09:
8602 if (set_cc) {
e9bb4aa9
JR
8603 tcg_gen_xor_i32(tmp, tmp, tmp2);
8604 gen_logic_CC(tmp);
9ee6e8bb 8605 }
7d1b0095 8606 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8607 break;
8608 case 0x0a:
8609 if (set_cc) {
72485ec4 8610 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8611 }
7d1b0095 8612 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8613 break;
8614 case 0x0b:
8615 if (set_cc) {
72485ec4 8616 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8617 }
7d1b0095 8618 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8619 break;
8620 case 0x0c:
e9bb4aa9
JR
8621 tcg_gen_or_i32(tmp, tmp, tmp2);
8622 if (logic_cc) {
8623 gen_logic_CC(tmp);
8624 }
7dcc1f89 8625 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8626 break;
8627 case 0x0d:
8628 if (logic_cc && rd == 15) {
8629 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8630 if (IS_USER(s)) {
9ee6e8bb 8631 goto illegal_op;
e9bb4aa9
JR
8632 }
8633 gen_exception_return(s, tmp2);
9ee6e8bb 8634 } else {
e9bb4aa9
JR
8635 if (logic_cc) {
8636 gen_logic_CC(tmp2);
8637 }
7dcc1f89 8638 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8639 }
8640 break;
8641 case 0x0e:
f669df27 8642 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8643 if (logic_cc) {
8644 gen_logic_CC(tmp);
8645 }
7dcc1f89 8646 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8647 break;
8648 default:
8649 case 0x0f:
e9bb4aa9
JR
8650 tcg_gen_not_i32(tmp2, tmp2);
8651 if (logic_cc) {
8652 gen_logic_CC(tmp2);
8653 }
7dcc1f89 8654 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8655 break;
8656 }
e9bb4aa9 8657 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8658 tcg_temp_free_i32(tmp2);
e9bb4aa9 8659 }
9ee6e8bb
PB
8660 } else {
8661 /* other instructions */
8662 op1 = (insn >> 24) & 0xf;
8663 switch(op1) {
8664 case 0x0:
8665 case 0x1:
8666 /* multiplies, extra load/stores */
8667 sh = (insn >> 5) & 3;
8668 if (sh == 0) {
8669 if (op1 == 0x0) {
8670 rd = (insn >> 16) & 0xf;
8671 rn = (insn >> 12) & 0xf;
8672 rs = (insn >> 8) & 0xf;
8673 rm = (insn) & 0xf;
8674 op1 = (insn >> 20) & 0xf;
8675 switch (op1) {
8676 case 0: case 1: case 2: case 3: case 6:
8677 /* 32 bit mul */
5e3f878a
PB
8678 tmp = load_reg(s, rs);
8679 tmp2 = load_reg(s, rm);
8680 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8681 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8682 if (insn & (1 << 22)) {
8683 /* Subtract (mls) */
8684 ARCH(6T2);
5e3f878a
PB
8685 tmp2 = load_reg(s, rn);
8686 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8687 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8688 } else if (insn & (1 << 21)) {
8689 /* Add */
5e3f878a
PB
8690 tmp2 = load_reg(s, rn);
8691 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8692 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8693 }
8694 if (insn & (1 << 20))
5e3f878a
PB
8695 gen_logic_CC(tmp);
8696 store_reg(s, rd, tmp);
9ee6e8bb 8697 break;
8aac08b1
AJ
8698 case 4:
8699 /* 64 bit mul double accumulate (UMAAL) */
8700 ARCH(6);
8701 tmp = load_reg(s, rs);
8702 tmp2 = load_reg(s, rm);
8703 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8704 gen_addq_lo(s, tmp64, rn);
8705 gen_addq_lo(s, tmp64, rd);
8706 gen_storeq_reg(s, rn, rd, tmp64);
8707 tcg_temp_free_i64(tmp64);
8708 break;
8709 case 8: case 9: case 10: case 11:
8710 case 12: case 13: case 14: case 15:
8711 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8712 tmp = load_reg(s, rs);
8713 tmp2 = load_reg(s, rm);
8aac08b1 8714 if (insn & (1 << 22)) {
c9f10124 8715 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8716 } else {
c9f10124 8717 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8718 }
8719 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8720 TCGv_i32 al = load_reg(s, rn);
8721 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8722 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8723 tcg_temp_free_i32(al);
8724 tcg_temp_free_i32(ah);
9ee6e8bb 8725 }
8aac08b1 8726 if (insn & (1 << 20)) {
c9f10124 8727 gen_logicq_cc(tmp, tmp2);
8aac08b1 8728 }
c9f10124
RH
8729 store_reg(s, rn, tmp);
8730 store_reg(s, rd, tmp2);
9ee6e8bb 8731 break;
8aac08b1
AJ
8732 default:
8733 goto illegal_op;
9ee6e8bb
PB
8734 }
8735 } else {
8736 rn = (insn >> 16) & 0xf;
8737 rd = (insn >> 12) & 0xf;
8738 if (insn & (1 << 23)) {
8739 /* load/store exclusive */
2359bf80 8740 int op2 = (insn >> 8) & 3;
86753403 8741 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8742
8743 switch (op2) {
8744 case 0: /* lda/stl */
8745 if (op1 == 1) {
8746 goto illegal_op;
8747 }
8748 ARCH(8);
8749 break;
8750 case 1: /* reserved */
8751 goto illegal_op;
8752 case 2: /* ldaex/stlex */
8753 ARCH(8);
8754 break;
8755 case 3: /* ldrex/strex */
8756 if (op1) {
8757 ARCH(6K);
8758 } else {
8759 ARCH(6);
8760 }
8761 break;
8762 }
8763
3174f8e9 8764 addr = tcg_temp_local_new_i32();
98a46317 8765 load_reg_var(s, addr, rn);
2359bf80
MR
8766
8767 /* Since the emulation does not have barriers,
8768 the acquire/release semantics need no special
8769 handling */
8770 if (op2 == 0) {
8771 if (insn & (1 << 20)) {
8772 tmp = tcg_temp_new_i32();
8773 switch (op1) {
8774 case 0: /* lda */
12dcc321
PB
8775 gen_aa32_ld32u(s, tmp, addr,
8776 get_mem_index(s));
2359bf80
MR
8777 break;
8778 case 2: /* ldab */
12dcc321
PB
8779 gen_aa32_ld8u(s, tmp, addr,
8780 get_mem_index(s));
2359bf80
MR
8781 break;
8782 case 3: /* ldah */
12dcc321
PB
8783 gen_aa32_ld16u(s, tmp, addr,
8784 get_mem_index(s));
2359bf80
MR
8785 break;
8786 default:
8787 abort();
8788 }
8789 store_reg(s, rd, tmp);
8790 } else {
8791 rm = insn & 0xf;
8792 tmp = load_reg(s, rm);
8793 switch (op1) {
8794 case 0: /* stl */
12dcc321
PB
8795 gen_aa32_st32(s, tmp, addr,
8796 get_mem_index(s));
2359bf80
MR
8797 break;
8798 case 2: /* stlb */
12dcc321
PB
8799 gen_aa32_st8(s, tmp, addr,
8800 get_mem_index(s));
2359bf80
MR
8801 break;
8802 case 3: /* stlh */
12dcc321
PB
8803 gen_aa32_st16(s, tmp, addr,
8804 get_mem_index(s));
2359bf80
MR
8805 break;
8806 default:
8807 abort();
8808 }
8809 tcg_temp_free_i32(tmp);
8810 }
8811 } else if (insn & (1 << 20)) {
86753403
PB
8812 switch (op1) {
8813 case 0: /* ldrex */
426f5abc 8814 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8815 break;
8816 case 1: /* ldrexd */
426f5abc 8817 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8818 break;
8819 case 2: /* ldrexb */
426f5abc 8820 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8821 break;
8822 case 3: /* ldrexh */
426f5abc 8823 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8824 break;
8825 default:
8826 abort();
8827 }
9ee6e8bb
PB
8828 } else {
8829 rm = insn & 0xf;
86753403
PB
8830 switch (op1) {
8831 case 0: /* strex */
426f5abc 8832 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8833 break;
8834 case 1: /* strexd */
502e64fe 8835 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8836 break;
8837 case 2: /* strexb */
426f5abc 8838 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8839 break;
8840 case 3: /* strexh */
426f5abc 8841 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8842 break;
8843 default:
8844 abort();
8845 }
9ee6e8bb 8846 }
39d5492a 8847 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8848 } else {
8849 /* SWP instruction */
8850 rm = (insn) & 0xf;
8851
8984bd2e
PB
8852 /* ??? This is not really atomic. However we know
8853 we never have multiple CPUs running in parallel,
8854 so it is good enough. */
8855 addr = load_reg(s, rn);
8856 tmp = load_reg(s, rm);
5a839c0d 8857 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8858 if (insn & (1 << 22)) {
12dcc321
PB
8859 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8860 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8861 } else {
12dcc321
PB
8862 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8863 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8864 }
5a839c0d 8865 tcg_temp_free_i32(tmp);
7d1b0095 8866 tcg_temp_free_i32(addr);
8984bd2e 8867 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8868 }
8869 }
8870 } else {
8871 int address_offset;
3960c336
PM
8872 bool load = insn & (1 << 20);
8873 bool doubleword = false;
9ee6e8bb
PB
8874 /* Misc load/store */
8875 rn = (insn >> 16) & 0xf;
8876 rd = (insn >> 12) & 0xf;
3960c336
PM
8877
8878 if (!load && (sh & 2)) {
8879 /* doubleword */
8880 ARCH(5TE);
8881 if (rd & 1) {
8882 /* UNPREDICTABLE; we choose to UNDEF */
8883 goto illegal_op;
8884 }
8885 load = (sh & 1) == 0;
8886 doubleword = true;
8887 }
8888
b0109805 8889 addr = load_reg(s, rn);
9ee6e8bb 8890 if (insn & (1 << 24))
b0109805 8891 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8892 address_offset = 0;
3960c336
PM
8893
8894 if (doubleword) {
8895 if (!load) {
9ee6e8bb 8896 /* store */
b0109805 8897 tmp = load_reg(s, rd);
12dcc321 8898 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8899 tcg_temp_free_i32(tmp);
b0109805
PB
8900 tcg_gen_addi_i32(addr, addr, 4);
8901 tmp = load_reg(s, rd + 1);
12dcc321 8902 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8903 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8904 } else {
8905 /* load */
5a839c0d 8906 tmp = tcg_temp_new_i32();
12dcc321 8907 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8908 store_reg(s, rd, tmp);
8909 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8910 tmp = tcg_temp_new_i32();
12dcc321 8911 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8912 rd++;
9ee6e8bb
PB
8913 }
8914 address_offset = -4;
3960c336
PM
8915 } else if (load) {
8916 /* load */
8917 tmp = tcg_temp_new_i32();
8918 switch (sh) {
8919 case 1:
12dcc321 8920 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3960c336
PM
8921 break;
8922 case 2:
12dcc321 8923 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8924 break;
8925 default:
8926 case 3:
12dcc321 8927 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8928 break;
8929 }
9ee6e8bb
PB
8930 } else {
8931 /* store */
b0109805 8932 tmp = load_reg(s, rd);
12dcc321 8933 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5a839c0d 8934 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8935 }
8936 /* Perform base writeback before the loaded value to
8937 ensure correct behavior with overlapping index registers.
b6af0975 8938 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8939 destination and index registers overlap. */
8940 if (!(insn & (1 << 24))) {
b0109805
PB
8941 gen_add_datah_offset(s, insn, address_offset, addr);
8942 store_reg(s, rn, addr);
9ee6e8bb
PB
8943 } else if (insn & (1 << 21)) {
8944 if (address_offset)
b0109805
PB
8945 tcg_gen_addi_i32(addr, addr, address_offset);
8946 store_reg(s, rn, addr);
8947 } else {
7d1b0095 8948 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8949 }
8950 if (load) {
8951 /* Complete the load. */
b0109805 8952 store_reg(s, rd, tmp);
9ee6e8bb
PB
8953 }
8954 }
8955 break;
8956 case 0x4:
8957 case 0x5:
8958 goto do_ldst;
8959 case 0x6:
8960 case 0x7:
8961 if (insn & (1 << 4)) {
8962 ARCH(6);
8963 /* Armv6 Media instructions. */
8964 rm = insn & 0xf;
8965 rn = (insn >> 16) & 0xf;
2c0262af 8966 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8967 rs = (insn >> 8) & 0xf;
8968 switch ((insn >> 23) & 3) {
8969 case 0: /* Parallel add/subtract. */
8970 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8971 tmp = load_reg(s, rn);
8972 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8973 sh = (insn >> 5) & 7;
8974 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8975 goto illegal_op;
6ddbc6e4 8976 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8977 tcg_temp_free_i32(tmp2);
6ddbc6e4 8978 store_reg(s, rd, tmp);
9ee6e8bb
PB
8979 break;
8980 case 1:
8981 if ((insn & 0x00700020) == 0) {
6c95676b 8982 /* Halfword pack. */
3670669c
PB
8983 tmp = load_reg(s, rn);
8984 tmp2 = load_reg(s, rm);
9ee6e8bb 8985 shift = (insn >> 7) & 0x1f;
3670669c
PB
8986 if (insn & (1 << 6)) {
8987 /* pkhtb */
22478e79
AZ
8988 if (shift == 0)
8989 shift = 31;
8990 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8991 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8992 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8993 } else {
8994 /* pkhbt */
22478e79
AZ
8995 if (shift)
8996 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8997 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8998 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8999 }
9000 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9001 tcg_temp_free_i32(tmp2);
3670669c 9002 store_reg(s, rd, tmp);
9ee6e8bb
PB
9003 } else if ((insn & 0x00200020) == 0x00200000) {
9004 /* [us]sat */
6ddbc6e4 9005 tmp = load_reg(s, rm);
9ee6e8bb
PB
9006 shift = (insn >> 7) & 0x1f;
9007 if (insn & (1 << 6)) {
9008 if (shift == 0)
9009 shift = 31;
6ddbc6e4 9010 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9011 } else {
6ddbc6e4 9012 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9013 }
9014 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9015 tmp2 = tcg_const_i32(sh);
9016 if (insn & (1 << 22))
9ef39277 9017 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9018 else
9ef39277 9019 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9020 tcg_temp_free_i32(tmp2);
6ddbc6e4 9021 store_reg(s, rd, tmp);
9ee6e8bb
PB
9022 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9023 /* [us]sat16 */
6ddbc6e4 9024 tmp = load_reg(s, rm);
9ee6e8bb 9025 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9026 tmp2 = tcg_const_i32(sh);
9027 if (insn & (1 << 22))
9ef39277 9028 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9029 else
9ef39277 9030 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9031 tcg_temp_free_i32(tmp2);
6ddbc6e4 9032 store_reg(s, rd, tmp);
9ee6e8bb
PB
9033 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9034 /* Select bytes. */
6ddbc6e4
PB
9035 tmp = load_reg(s, rn);
9036 tmp2 = load_reg(s, rm);
7d1b0095 9037 tmp3 = tcg_temp_new_i32();
0ecb72a5 9038 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9039 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9040 tcg_temp_free_i32(tmp3);
9041 tcg_temp_free_i32(tmp2);
6ddbc6e4 9042 store_reg(s, rd, tmp);
9ee6e8bb 9043 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9044 tmp = load_reg(s, rm);
9ee6e8bb 9045 shift = (insn >> 10) & 3;
1301f322 9046 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9047 rotate, a shift is sufficient. */
9048 if (shift != 0)
f669df27 9049 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9050 op1 = (insn >> 20) & 7;
9051 switch (op1) {
5e3f878a
PB
9052 case 0: gen_sxtb16(tmp); break;
9053 case 2: gen_sxtb(tmp); break;
9054 case 3: gen_sxth(tmp); break;
9055 case 4: gen_uxtb16(tmp); break;
9056 case 6: gen_uxtb(tmp); break;
9057 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9058 default: goto illegal_op;
9059 }
9060 if (rn != 15) {
5e3f878a 9061 tmp2 = load_reg(s, rn);
9ee6e8bb 9062 if ((op1 & 3) == 0) {
5e3f878a 9063 gen_add16(tmp, tmp2);
9ee6e8bb 9064 } else {
5e3f878a 9065 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9066 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9067 }
9068 }
6c95676b 9069 store_reg(s, rd, tmp);
9ee6e8bb
PB
9070 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9071 /* rev */
b0109805 9072 tmp = load_reg(s, rm);
9ee6e8bb
PB
9073 if (insn & (1 << 22)) {
9074 if (insn & (1 << 7)) {
b0109805 9075 gen_revsh(tmp);
9ee6e8bb
PB
9076 } else {
9077 ARCH(6T2);
b0109805 9078 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9079 }
9080 } else {
9081 if (insn & (1 << 7))
b0109805 9082 gen_rev16(tmp);
9ee6e8bb 9083 else
66896cb8 9084 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9085 }
b0109805 9086 store_reg(s, rd, tmp);
9ee6e8bb
PB
9087 } else {
9088 goto illegal_op;
9089 }
9090 break;
9091 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9092 switch ((insn >> 20) & 0x7) {
9093 case 5:
9094 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9095 /* op2 not 00x or 11x : UNDEF */
9096 goto illegal_op;
9097 }
838fa72d
AJ
9098 /* Signed multiply most significant [accumulate].
9099 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9100 tmp = load_reg(s, rm);
9101 tmp2 = load_reg(s, rs);
a7812ae4 9102 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9103
955a7dd5 9104 if (rd != 15) {
838fa72d 9105 tmp = load_reg(s, rd);
9ee6e8bb 9106 if (insn & (1 << 6)) {
838fa72d 9107 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9108 } else {
838fa72d 9109 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9110 }
9111 }
838fa72d
AJ
9112 if (insn & (1 << 5)) {
9113 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9114 }
9115 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9116 tmp = tcg_temp_new_i32();
ecc7b3aa 9117 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9118 tcg_temp_free_i64(tmp64);
955a7dd5 9119 store_reg(s, rn, tmp);
41e9564d
PM
9120 break;
9121 case 0:
9122 case 4:
9123 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9124 if (insn & (1 << 7)) {
9125 goto illegal_op;
9126 }
9127 tmp = load_reg(s, rm);
9128 tmp2 = load_reg(s, rs);
9ee6e8bb 9129 if (insn & (1 << 5))
5e3f878a
PB
9130 gen_swap_half(tmp2);
9131 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9132 if (insn & (1 << 22)) {
5e3f878a 9133 /* smlald, smlsld */
33bbd75a
PC
9134 TCGv_i64 tmp64_2;
9135
a7812ae4 9136 tmp64 = tcg_temp_new_i64();
33bbd75a 9137 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9138 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9139 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9140 tcg_temp_free_i32(tmp);
33bbd75a
PC
9141 tcg_temp_free_i32(tmp2);
9142 if (insn & (1 << 6)) {
9143 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9144 } else {
9145 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9146 }
9147 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9148 gen_addq(s, tmp64, rd, rn);
9149 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9150 tcg_temp_free_i64(tmp64);
9ee6e8bb 9151 } else {
5e3f878a 9152 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9153 if (insn & (1 << 6)) {
9154 /* This subtraction cannot overflow. */
9155 tcg_gen_sub_i32(tmp, tmp, tmp2);
9156 } else {
9157 /* This addition cannot overflow 32 bits;
9158 * however it may overflow considered as a
9159 * signed operation, in which case we must set
9160 * the Q flag.
9161 */
9162 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9163 }
9164 tcg_temp_free_i32(tmp2);
22478e79 9165 if (rd != 15)
9ee6e8bb 9166 {
22478e79 9167 tmp2 = load_reg(s, rd);
9ef39277 9168 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9169 tcg_temp_free_i32(tmp2);
9ee6e8bb 9170 }
22478e79 9171 store_reg(s, rn, tmp);
9ee6e8bb 9172 }
41e9564d 9173 break;
b8b8ea05
PM
9174 case 1:
9175 case 3:
9176 /* SDIV, UDIV */
d614a513 9177 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9178 goto illegal_op;
9179 }
9180 if (((insn >> 5) & 7) || (rd != 15)) {
9181 goto illegal_op;
9182 }
9183 tmp = load_reg(s, rm);
9184 tmp2 = load_reg(s, rs);
9185 if (insn & (1 << 21)) {
9186 gen_helper_udiv(tmp, tmp, tmp2);
9187 } else {
9188 gen_helper_sdiv(tmp, tmp, tmp2);
9189 }
9190 tcg_temp_free_i32(tmp2);
9191 store_reg(s, rn, tmp);
9192 break;
41e9564d
PM
9193 default:
9194 goto illegal_op;
9ee6e8bb
PB
9195 }
9196 break;
9197 case 3:
9198 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9199 switch (op1) {
9200 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9201 ARCH(6);
9202 tmp = load_reg(s, rm);
9203 tmp2 = load_reg(s, rs);
9204 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9205 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9206 if (rd != 15) {
9207 tmp2 = load_reg(s, rd);
6ddbc6e4 9208 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9209 tcg_temp_free_i32(tmp2);
9ee6e8bb 9210 }
ded9d295 9211 store_reg(s, rn, tmp);
9ee6e8bb
PB
9212 break;
9213 case 0x20: case 0x24: case 0x28: case 0x2c:
9214 /* Bitfield insert/clear. */
9215 ARCH(6T2);
9216 shift = (insn >> 7) & 0x1f;
9217 i = (insn >> 16) & 0x1f;
45140a57
KB
9218 if (i < shift) {
9219 /* UNPREDICTABLE; we choose to UNDEF */
9220 goto illegal_op;
9221 }
9ee6e8bb
PB
9222 i = i + 1 - shift;
9223 if (rm == 15) {
7d1b0095 9224 tmp = tcg_temp_new_i32();
5e3f878a 9225 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9226 } else {
5e3f878a 9227 tmp = load_reg(s, rm);
9ee6e8bb
PB
9228 }
9229 if (i != 32) {
5e3f878a 9230 tmp2 = load_reg(s, rd);
d593c48e 9231 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9232 tcg_temp_free_i32(tmp2);
9ee6e8bb 9233 }
5e3f878a 9234 store_reg(s, rd, tmp);
9ee6e8bb
PB
9235 break;
9236 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9237 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9238 ARCH(6T2);
5e3f878a 9239 tmp = load_reg(s, rm);
9ee6e8bb
PB
9240 shift = (insn >> 7) & 0x1f;
9241 i = ((insn >> 16) & 0x1f) + 1;
9242 if (shift + i > 32)
9243 goto illegal_op;
9244 if (i < 32) {
9245 if (op1 & 0x20) {
5e3f878a 9246 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 9247 } else {
5e3f878a 9248 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
9249 }
9250 }
5e3f878a 9251 store_reg(s, rd, tmp);
9ee6e8bb
PB
9252 break;
9253 default:
9254 goto illegal_op;
9255 }
9256 break;
9257 }
9258 break;
9259 }
9260 do_ldst:
9261 /* Check for undefined extension instructions
9262 * per the ARM Bible IE:
9263 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9264 */
9265 sh = (0xf << 20) | (0xf << 4);
9266 if (op1 == 0x7 && ((insn & sh) == sh))
9267 {
9268 goto illegal_op;
9269 }
9270 /* load/store byte/word */
9271 rn = (insn >> 16) & 0xf;
9272 rd = (insn >> 12) & 0xf;
b0109805 9273 tmp2 = load_reg(s, rn);
a99caa48
PM
9274 if ((insn & 0x01200000) == 0x00200000) {
9275 /* ldrt/strt */
579d21cc 9276 i = get_a32_user_mem_index(s);
a99caa48
PM
9277 } else {
9278 i = get_mem_index(s);
9279 }
9ee6e8bb 9280 if (insn & (1 << 24))
b0109805 9281 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9282 if (insn & (1 << 20)) {
9283 /* load */
5a839c0d 9284 tmp = tcg_temp_new_i32();
9ee6e8bb 9285 if (insn & (1 << 22)) {
12dcc321 9286 gen_aa32_ld8u(s, tmp, tmp2, i);
9ee6e8bb 9287 } else {
12dcc321 9288 gen_aa32_ld32u(s, tmp, tmp2, i);
9ee6e8bb 9289 }
9ee6e8bb
PB
9290 } else {
9291 /* store */
b0109805 9292 tmp = load_reg(s, rd);
5a839c0d 9293 if (insn & (1 << 22)) {
12dcc321 9294 gen_aa32_st8(s, tmp, tmp2, i);
5a839c0d 9295 } else {
12dcc321 9296 gen_aa32_st32(s, tmp, tmp2, i);
5a839c0d
PM
9297 }
9298 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9299 }
9300 if (!(insn & (1 << 24))) {
b0109805
PB
9301 gen_add_data_offset(s, insn, tmp2);
9302 store_reg(s, rn, tmp2);
9303 } else if (insn & (1 << 21)) {
9304 store_reg(s, rn, tmp2);
9305 } else {
7d1b0095 9306 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9307 }
9308 if (insn & (1 << 20)) {
9309 /* Complete the load. */
7dcc1f89 9310 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9311 }
9312 break;
9313 case 0x08:
9314 case 0x09:
9315 {
da3e53dd
PM
9316 int j, n, loaded_base;
9317 bool exc_return = false;
9318 bool is_load = extract32(insn, 20, 1);
9319 bool user = false;
39d5492a 9320 TCGv_i32 loaded_var;
9ee6e8bb
PB
9321 /* load/store multiple words */
9322 /* XXX: store correct base if write back */
9ee6e8bb 9323 if (insn & (1 << 22)) {
da3e53dd 9324 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9325 if (IS_USER(s))
9326 goto illegal_op; /* only usable in supervisor mode */
9327
da3e53dd
PM
9328 if (is_load && extract32(insn, 15, 1)) {
9329 exc_return = true;
9330 } else {
9331 user = true;
9332 }
9ee6e8bb
PB
9333 }
9334 rn = (insn >> 16) & 0xf;
b0109805 9335 addr = load_reg(s, rn);
9ee6e8bb
PB
9336
9337 /* compute total size */
9338 loaded_base = 0;
39d5492a 9339 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9340 n = 0;
9341 for(i=0;i<16;i++) {
9342 if (insn & (1 << i))
9343 n++;
9344 }
9345 /* XXX: test invalid n == 0 case ? */
9346 if (insn & (1 << 23)) {
9347 if (insn & (1 << 24)) {
9348 /* pre increment */
b0109805 9349 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9350 } else {
9351 /* post increment */
9352 }
9353 } else {
9354 if (insn & (1 << 24)) {
9355 /* pre decrement */
b0109805 9356 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9357 } else {
9358 /* post decrement */
9359 if (n != 1)
b0109805 9360 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9361 }
9362 }
9363 j = 0;
9364 for(i=0;i<16;i++) {
9365 if (insn & (1 << i)) {
da3e53dd 9366 if (is_load) {
9ee6e8bb 9367 /* load */
5a839c0d 9368 tmp = tcg_temp_new_i32();
12dcc321 9369 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9370 if (user) {
b75263d6 9371 tmp2 = tcg_const_i32(i);
1ce94f81 9372 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9373 tcg_temp_free_i32(tmp2);
7d1b0095 9374 tcg_temp_free_i32(tmp);
9ee6e8bb 9375 } else if (i == rn) {
b0109805 9376 loaded_var = tmp;
9ee6e8bb 9377 loaded_base = 1;
fb0e8e79
PM
9378 } else if (rn == 15 && exc_return) {
9379 store_pc_exc_ret(s, tmp);
9ee6e8bb 9380 } else {
7dcc1f89 9381 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9382 }
9383 } else {
9384 /* store */
9385 if (i == 15) {
9386 /* special case: r15 = PC + 8 */
9387 val = (long)s->pc + 4;
7d1b0095 9388 tmp = tcg_temp_new_i32();
b0109805 9389 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9390 } else if (user) {
7d1b0095 9391 tmp = tcg_temp_new_i32();
b75263d6 9392 tmp2 = tcg_const_i32(i);
9ef39277 9393 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9394 tcg_temp_free_i32(tmp2);
9ee6e8bb 9395 } else {
b0109805 9396 tmp = load_reg(s, i);
9ee6e8bb 9397 }
12dcc321 9398 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9399 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9400 }
9401 j++;
9402 /* no need to add after the last transfer */
9403 if (j != n)
b0109805 9404 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9405 }
9406 }
9407 if (insn & (1 << 21)) {
9408 /* write back */
9409 if (insn & (1 << 23)) {
9410 if (insn & (1 << 24)) {
9411 /* pre increment */
9412 } else {
9413 /* post increment */
b0109805 9414 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9415 }
9416 } else {
9417 if (insn & (1 << 24)) {
9418 /* pre decrement */
9419 if (n != 1)
b0109805 9420 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9421 } else {
9422 /* post decrement */
b0109805 9423 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9424 }
9425 }
b0109805
PB
9426 store_reg(s, rn, addr);
9427 } else {
7d1b0095 9428 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9429 }
9430 if (loaded_base) {
b0109805 9431 store_reg(s, rn, loaded_var);
9ee6e8bb 9432 }
da3e53dd 9433 if (exc_return) {
9ee6e8bb 9434 /* Restore CPSR from SPSR. */
d9ba4830 9435 tmp = load_cpu_field(spsr);
235ea1f5 9436 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9437 tcg_temp_free_i32(tmp);
577bf808 9438 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9439 }
9440 }
9441 break;
9442 case 0xa:
9443 case 0xb:
9444 {
9445 int32_t offset;
9446
9447 /* branch (and link) */
9448 val = (int32_t)s->pc;
9449 if (insn & (1 << 24)) {
7d1b0095 9450 tmp = tcg_temp_new_i32();
5e3f878a
PB
9451 tcg_gen_movi_i32(tmp, val);
9452 store_reg(s, 14, tmp);
9ee6e8bb 9453 }
534df156
PM
9454 offset = sextract32(insn << 2, 0, 26);
9455 val += offset + 4;
9ee6e8bb
PB
9456 gen_jmp(s, val);
9457 }
9458 break;
9459 case 0xc:
9460 case 0xd:
9461 case 0xe:
6a57f3eb
WN
9462 if (((insn >> 8) & 0xe) == 10) {
9463 /* VFP. */
7dcc1f89 9464 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9465 goto illegal_op;
9466 }
7dcc1f89 9467 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9468 /* Coprocessor. */
9ee6e8bb 9469 goto illegal_op;
6a57f3eb 9470 }
9ee6e8bb
PB
9471 break;
9472 case 0xf:
9473 /* swi */
eaed129d 9474 gen_set_pc_im(s, s->pc);
d4a2dc67 9475 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9476 s->is_jmp = DISAS_SWI;
9477 break;
9478 default:
9479 illegal_op:
73710361
GB
9480 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9481 default_exception_el(s));
9ee6e8bb
PB
9482 break;
9483 }
9484 }
9485}
9486
9487/* Return true if this is a Thumb-2 logical op. */
9488static int
9489thumb2_logic_op(int op)
9490{
9491 return (op < 8);
9492}
9493
9494/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9495 then set condition code flags based on the result of the operation.
9496 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9497 to the high bit of T1.
9498 Returns zero if the opcode is valid. */
9499
9500static int
39d5492a
PM
9501gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9502 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9503{
9504 int logic_cc;
9505
9506 logic_cc = 0;
9507 switch (op) {
9508 case 0: /* and */
396e467c 9509 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9510 logic_cc = conds;
9511 break;
9512 case 1: /* bic */
f669df27 9513 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9514 logic_cc = conds;
9515 break;
9516 case 2: /* orr */
396e467c 9517 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9518 logic_cc = conds;
9519 break;
9520 case 3: /* orn */
29501f1b 9521 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9522 logic_cc = conds;
9523 break;
9524 case 4: /* eor */
396e467c 9525 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9526 logic_cc = conds;
9527 break;
9528 case 8: /* add */
9529 if (conds)
72485ec4 9530 gen_add_CC(t0, t0, t1);
9ee6e8bb 9531 else
396e467c 9532 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9533 break;
9534 case 10: /* adc */
9535 if (conds)
49b4c31e 9536 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9537 else
396e467c 9538 gen_adc(t0, t1);
9ee6e8bb
PB
9539 break;
9540 case 11: /* sbc */
2de68a49
RH
9541 if (conds) {
9542 gen_sbc_CC(t0, t0, t1);
9543 } else {
396e467c 9544 gen_sub_carry(t0, t0, t1);
2de68a49 9545 }
9ee6e8bb
PB
9546 break;
9547 case 13: /* sub */
9548 if (conds)
72485ec4 9549 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9550 else
396e467c 9551 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9552 break;
9553 case 14: /* rsb */
9554 if (conds)
72485ec4 9555 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9556 else
396e467c 9557 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9558 break;
9559 default: /* 5, 6, 7, 9, 12, 15. */
9560 return 1;
9561 }
9562 if (logic_cc) {
396e467c 9563 gen_logic_CC(t0);
9ee6e8bb 9564 if (shifter_out)
396e467c 9565 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9566 }
9567 return 0;
9568}
9569
9570/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9571 is not legal. */
0ecb72a5 9572static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9573{
b0109805 9574 uint32_t insn, imm, shift, offset;
9ee6e8bb 9575 uint32_t rd, rn, rm, rs;
39d5492a
PM
9576 TCGv_i32 tmp;
9577 TCGv_i32 tmp2;
9578 TCGv_i32 tmp3;
9579 TCGv_i32 addr;
a7812ae4 9580 TCGv_i64 tmp64;
9ee6e8bb
PB
9581 int op;
9582 int shiftop;
9583 int conds;
9584 int logic_cc;
9585
d614a513
PM
9586 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9587 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9588 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9589 16-bit instructions to get correct prefetch abort behavior. */
9590 insn = insn_hw1;
9591 if ((insn & (1 << 12)) == 0) {
be5e7a76 9592 ARCH(5);
9ee6e8bb
PB
9593 /* Second half of blx. */
9594 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9595 tmp = load_reg(s, 14);
9596 tcg_gen_addi_i32(tmp, tmp, offset);
9597 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9598
7d1b0095 9599 tmp2 = tcg_temp_new_i32();
b0109805 9600 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9601 store_reg(s, 14, tmp2);
9602 gen_bx(s, tmp);
9ee6e8bb
PB
9603 return 0;
9604 }
9605 if (insn & (1 << 11)) {
9606 /* Second half of bl. */
9607 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9608 tmp = load_reg(s, 14);
6a0d8a1d 9609 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9610
7d1b0095 9611 tmp2 = tcg_temp_new_i32();
b0109805 9612 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9613 store_reg(s, 14, tmp2);
9614 gen_bx(s, tmp);
9ee6e8bb
PB
9615 return 0;
9616 }
9617 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9618 /* Instruction spans a page boundary. Implement it as two
9619 16-bit instructions in case the second half causes an
9620 prefetch abort. */
9621 offset = ((int32_t)insn << 21) >> 9;
396e467c 9622 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9623 return 0;
9624 }
9625 /* Fall through to 32-bit decode. */
9626 }
9627
f9fd40eb 9628 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9629 s->pc += 2;
9630 insn |= (uint32_t)insn_hw1 << 16;
9631
9632 if ((insn & 0xf800e800) != 0xf000e800) {
9633 ARCH(6T2);
9634 }
9635
9636 rn = (insn >> 16) & 0xf;
9637 rs = (insn >> 12) & 0xf;
9638 rd = (insn >> 8) & 0xf;
9639 rm = insn & 0xf;
9640 switch ((insn >> 25) & 0xf) {
9641 case 0: case 1: case 2: case 3:
9642 /* 16-bit instructions. Should never happen. */
9643 abort();
9644 case 4:
9645 if (insn & (1 << 22)) {
9646 /* Other load/store, table branch. */
9647 if (insn & 0x01200000) {
9648 /* Load/store doubleword. */
9649 if (rn == 15) {
7d1b0095 9650 addr = tcg_temp_new_i32();
b0109805 9651 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9652 } else {
b0109805 9653 addr = load_reg(s, rn);
9ee6e8bb
PB
9654 }
9655 offset = (insn & 0xff) * 4;
9656 if ((insn & (1 << 23)) == 0)
9657 offset = -offset;
9658 if (insn & (1 << 24)) {
b0109805 9659 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9660 offset = 0;
9661 }
9662 if (insn & (1 << 20)) {
9663 /* ldrd */
e2592fad 9664 tmp = tcg_temp_new_i32();
12dcc321 9665 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9666 store_reg(s, rs, tmp);
9667 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9668 tmp = tcg_temp_new_i32();
12dcc321 9669 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9670 store_reg(s, rd, tmp);
9ee6e8bb
PB
9671 } else {
9672 /* strd */
b0109805 9673 tmp = load_reg(s, rs);
12dcc321 9674 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9675 tcg_temp_free_i32(tmp);
b0109805
PB
9676 tcg_gen_addi_i32(addr, addr, 4);
9677 tmp = load_reg(s, rd);
12dcc321 9678 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9679 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9680 }
9681 if (insn & (1 << 21)) {
9682 /* Base writeback. */
9683 if (rn == 15)
9684 goto illegal_op;
b0109805
PB
9685 tcg_gen_addi_i32(addr, addr, offset - 4);
9686 store_reg(s, rn, addr);
9687 } else {
7d1b0095 9688 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9689 }
9690 } else if ((insn & (1 << 23)) == 0) {
9691 /* Load/store exclusive word. */
39d5492a 9692 addr = tcg_temp_local_new_i32();
98a46317 9693 load_reg_var(s, addr, rn);
426f5abc 9694 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9695 if (insn & (1 << 20)) {
426f5abc 9696 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9697 } else {
426f5abc 9698 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9699 }
39d5492a 9700 tcg_temp_free_i32(addr);
2359bf80 9701 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9702 /* Table Branch. */
9703 if (rn == 15) {
7d1b0095 9704 addr = tcg_temp_new_i32();
b0109805 9705 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9706 } else {
b0109805 9707 addr = load_reg(s, rn);
9ee6e8bb 9708 }
b26eefb6 9709 tmp = load_reg(s, rm);
b0109805 9710 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9711 if (insn & (1 << 4)) {
9712 /* tbh */
b0109805 9713 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9714 tcg_temp_free_i32(tmp);
e2592fad 9715 tmp = tcg_temp_new_i32();
12dcc321 9716 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9717 } else { /* tbb */
7d1b0095 9718 tcg_temp_free_i32(tmp);
e2592fad 9719 tmp = tcg_temp_new_i32();
12dcc321 9720 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9721 }
7d1b0095 9722 tcg_temp_free_i32(addr);
b0109805
PB
9723 tcg_gen_shli_i32(tmp, tmp, 1);
9724 tcg_gen_addi_i32(tmp, tmp, s->pc);
9725 store_reg(s, 15, tmp);
9ee6e8bb 9726 } else {
2359bf80 9727 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9728 op = (insn >> 4) & 0x3;
2359bf80
MR
9729 switch (op2) {
9730 case 0:
426f5abc 9731 goto illegal_op;
2359bf80
MR
9732 case 1:
9733 /* Load/store exclusive byte/halfword/doubleword */
9734 if (op == 2) {
9735 goto illegal_op;
9736 }
9737 ARCH(7);
9738 break;
9739 case 2:
9740 /* Load-acquire/store-release */
9741 if (op == 3) {
9742 goto illegal_op;
9743 }
9744 /* Fall through */
9745 case 3:
9746 /* Load-acquire/store-release exclusive */
9747 ARCH(8);
9748 break;
426f5abc 9749 }
39d5492a 9750 addr = tcg_temp_local_new_i32();
98a46317 9751 load_reg_var(s, addr, rn);
2359bf80
MR
9752 if (!(op2 & 1)) {
9753 if (insn & (1 << 20)) {
9754 tmp = tcg_temp_new_i32();
9755 switch (op) {
9756 case 0: /* ldab */
12dcc321 9757 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9758 break;
9759 case 1: /* ldah */
12dcc321 9760 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9761 break;
9762 case 2: /* lda */
12dcc321 9763 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9764 break;
9765 default:
9766 abort();
9767 }
9768 store_reg(s, rs, tmp);
9769 } else {
9770 tmp = load_reg(s, rs);
9771 switch (op) {
9772 case 0: /* stlb */
12dcc321 9773 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9774 break;
9775 case 1: /* stlh */
12dcc321 9776 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9777 break;
9778 case 2: /* stl */
12dcc321 9779 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9780 break;
9781 default:
9782 abort();
9783 }
9784 tcg_temp_free_i32(tmp);
9785 }
9786 } else if (insn & (1 << 20)) {
426f5abc 9787 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9788 } else {
426f5abc 9789 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9790 }
39d5492a 9791 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9792 }
9793 } else {
9794 /* Load/store multiple, RFE, SRS. */
9795 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9796 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9797 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9798 goto illegal_op;
00115976 9799 }
9ee6e8bb
PB
9800 if (insn & (1 << 20)) {
9801 /* rfe */
b0109805
PB
9802 addr = load_reg(s, rn);
9803 if ((insn & (1 << 24)) == 0)
9804 tcg_gen_addi_i32(addr, addr, -8);
9805 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9806 tmp = tcg_temp_new_i32();
12dcc321 9807 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9808 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9809 tmp2 = tcg_temp_new_i32();
12dcc321 9810 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9811 if (insn & (1 << 21)) {
9812 /* Base writeback. */
b0109805
PB
9813 if (insn & (1 << 24)) {
9814 tcg_gen_addi_i32(addr, addr, 4);
9815 } else {
9816 tcg_gen_addi_i32(addr, addr, -4);
9817 }
9818 store_reg(s, rn, addr);
9819 } else {
7d1b0095 9820 tcg_temp_free_i32(addr);
9ee6e8bb 9821 }
b0109805 9822 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9823 } else {
9824 /* srs */
81465888
PM
9825 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9826 insn & (1 << 21));
9ee6e8bb
PB
9827 }
9828 } else {
5856d44e 9829 int i, loaded_base = 0;
39d5492a 9830 TCGv_i32 loaded_var;
9ee6e8bb 9831 /* Load/store multiple. */
b0109805 9832 addr = load_reg(s, rn);
9ee6e8bb
PB
9833 offset = 0;
9834 for (i = 0; i < 16; i++) {
9835 if (insn & (1 << i))
9836 offset += 4;
9837 }
9838 if (insn & (1 << 24)) {
b0109805 9839 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9840 }
9841
39d5492a 9842 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9843 for (i = 0; i < 16; i++) {
9844 if ((insn & (1 << i)) == 0)
9845 continue;
9846 if (insn & (1 << 20)) {
9847 /* Load. */
e2592fad 9848 tmp = tcg_temp_new_i32();
12dcc321 9849 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9850 if (i == 15) {
b0109805 9851 gen_bx(s, tmp);
5856d44e
YO
9852 } else if (i == rn) {
9853 loaded_var = tmp;
9854 loaded_base = 1;
9ee6e8bb 9855 } else {
b0109805 9856 store_reg(s, i, tmp);
9ee6e8bb
PB
9857 }
9858 } else {
9859 /* Store. */
b0109805 9860 tmp = load_reg(s, i);
12dcc321 9861 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9862 tcg_temp_free_i32(tmp);
9ee6e8bb 9863 }
b0109805 9864 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9865 }
5856d44e
YO
9866 if (loaded_base) {
9867 store_reg(s, rn, loaded_var);
9868 }
9ee6e8bb
PB
9869 if (insn & (1 << 21)) {
9870 /* Base register writeback. */
9871 if (insn & (1 << 24)) {
b0109805 9872 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9873 }
9874 /* Fault if writeback register is in register list. */
9875 if (insn & (1 << rn))
9876 goto illegal_op;
b0109805
PB
9877 store_reg(s, rn, addr);
9878 } else {
7d1b0095 9879 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9880 }
9881 }
9882 }
9883 break;
2af9ab77
JB
9884 case 5:
9885
9ee6e8bb 9886 op = (insn >> 21) & 0xf;
2af9ab77 9887 if (op == 6) {
62b44f05
AR
9888 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9889 goto illegal_op;
9890 }
2af9ab77
JB
9891 /* Halfword pack. */
9892 tmp = load_reg(s, rn);
9893 tmp2 = load_reg(s, rm);
9894 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9895 if (insn & (1 << 5)) {
9896 /* pkhtb */
9897 if (shift == 0)
9898 shift = 31;
9899 tcg_gen_sari_i32(tmp2, tmp2, shift);
9900 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9901 tcg_gen_ext16u_i32(tmp2, tmp2);
9902 } else {
9903 /* pkhbt */
9904 if (shift)
9905 tcg_gen_shli_i32(tmp2, tmp2, shift);
9906 tcg_gen_ext16u_i32(tmp, tmp);
9907 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9908 }
9909 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9910 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9911 store_reg(s, rd, tmp);
9912 } else {
2af9ab77
JB
9913 /* Data processing register constant shift. */
9914 if (rn == 15) {
7d1b0095 9915 tmp = tcg_temp_new_i32();
2af9ab77
JB
9916 tcg_gen_movi_i32(tmp, 0);
9917 } else {
9918 tmp = load_reg(s, rn);
9919 }
9920 tmp2 = load_reg(s, rm);
9921
9922 shiftop = (insn >> 4) & 3;
9923 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9924 conds = (insn & (1 << 20)) != 0;
9925 logic_cc = (conds && thumb2_logic_op(op));
9926 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9927 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9928 goto illegal_op;
7d1b0095 9929 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9930 if (rd != 15) {
9931 store_reg(s, rd, tmp);
9932 } else {
7d1b0095 9933 tcg_temp_free_i32(tmp);
2af9ab77 9934 }
3174f8e9 9935 }
9ee6e8bb
PB
9936 break;
9937 case 13: /* Misc data processing. */
9938 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9939 if (op < 4 && (insn & 0xf000) != 0xf000)
9940 goto illegal_op;
9941 switch (op) {
9942 case 0: /* Register controlled shift. */
8984bd2e
PB
9943 tmp = load_reg(s, rn);
9944 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9945 if ((insn & 0x70) != 0)
9946 goto illegal_op;
9947 op = (insn >> 21) & 3;
8984bd2e
PB
9948 logic_cc = (insn & (1 << 20)) != 0;
9949 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9950 if (logic_cc)
9951 gen_logic_CC(tmp);
7dcc1f89 9952 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9953 break;
9954 case 1: /* Sign/zero extend. */
62b44f05
AR
9955 op = (insn >> 20) & 7;
9956 switch (op) {
9957 case 0: /* SXTAH, SXTH */
9958 case 1: /* UXTAH, UXTH */
9959 case 4: /* SXTAB, SXTB */
9960 case 5: /* UXTAB, UXTB */
9961 break;
9962 case 2: /* SXTAB16, SXTB16 */
9963 case 3: /* UXTAB16, UXTB16 */
9964 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9965 goto illegal_op;
9966 }
9967 break;
9968 default:
9969 goto illegal_op;
9970 }
9971 if (rn != 15) {
9972 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9973 goto illegal_op;
9974 }
9975 }
5e3f878a 9976 tmp = load_reg(s, rm);
9ee6e8bb 9977 shift = (insn >> 4) & 3;
1301f322 9978 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9979 rotate, a shift is sufficient. */
9980 if (shift != 0)
f669df27 9981 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9982 op = (insn >> 20) & 7;
9983 switch (op) {
5e3f878a
PB
9984 case 0: gen_sxth(tmp); break;
9985 case 1: gen_uxth(tmp); break;
9986 case 2: gen_sxtb16(tmp); break;
9987 case 3: gen_uxtb16(tmp); break;
9988 case 4: gen_sxtb(tmp); break;
9989 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9990 default:
9991 g_assert_not_reached();
9ee6e8bb
PB
9992 }
9993 if (rn != 15) {
5e3f878a 9994 tmp2 = load_reg(s, rn);
9ee6e8bb 9995 if ((op >> 1) == 1) {
5e3f878a 9996 gen_add16(tmp, tmp2);
9ee6e8bb 9997 } else {
5e3f878a 9998 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9999 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10000 }
10001 }
5e3f878a 10002 store_reg(s, rd, tmp);
9ee6e8bb
PB
10003 break;
10004 case 2: /* SIMD add/subtract. */
62b44f05
AR
10005 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10006 goto illegal_op;
10007 }
9ee6e8bb
PB
10008 op = (insn >> 20) & 7;
10009 shift = (insn >> 4) & 7;
10010 if ((op & 3) == 3 || (shift & 3) == 3)
10011 goto illegal_op;
6ddbc6e4
PB
10012 tmp = load_reg(s, rn);
10013 tmp2 = load_reg(s, rm);
10014 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10015 tcg_temp_free_i32(tmp2);
6ddbc6e4 10016 store_reg(s, rd, tmp);
9ee6e8bb
PB
10017 break;
10018 case 3: /* Other data processing. */
10019 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10020 if (op < 4) {
10021 /* Saturating add/subtract. */
62b44f05
AR
10022 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10023 goto illegal_op;
10024 }
d9ba4830
PB
10025 tmp = load_reg(s, rn);
10026 tmp2 = load_reg(s, rm);
9ee6e8bb 10027 if (op & 1)
9ef39277 10028 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10029 if (op & 2)
9ef39277 10030 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10031 else
9ef39277 10032 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10033 tcg_temp_free_i32(tmp2);
9ee6e8bb 10034 } else {
62b44f05
AR
10035 switch (op) {
10036 case 0x0a: /* rbit */
10037 case 0x08: /* rev */
10038 case 0x09: /* rev16 */
10039 case 0x0b: /* revsh */
10040 case 0x18: /* clz */
10041 break;
10042 case 0x10: /* sel */
10043 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10044 goto illegal_op;
10045 }
10046 break;
10047 case 0x20: /* crc32/crc32c */
10048 case 0x21:
10049 case 0x22:
10050 case 0x28:
10051 case 0x29:
10052 case 0x2a:
10053 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10054 goto illegal_op;
10055 }
10056 break;
10057 default:
10058 goto illegal_op;
10059 }
d9ba4830 10060 tmp = load_reg(s, rn);
9ee6e8bb
PB
10061 switch (op) {
10062 case 0x0a: /* rbit */
d9ba4830 10063 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10064 break;
10065 case 0x08: /* rev */
66896cb8 10066 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10067 break;
10068 case 0x09: /* rev16 */
d9ba4830 10069 gen_rev16(tmp);
9ee6e8bb
PB
10070 break;
10071 case 0x0b: /* revsh */
d9ba4830 10072 gen_revsh(tmp);
9ee6e8bb
PB
10073 break;
10074 case 0x10: /* sel */
d9ba4830 10075 tmp2 = load_reg(s, rm);
7d1b0095 10076 tmp3 = tcg_temp_new_i32();
0ecb72a5 10077 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10078 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10079 tcg_temp_free_i32(tmp3);
10080 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10081 break;
10082 case 0x18: /* clz */
d9ba4830 10083 gen_helper_clz(tmp, tmp);
9ee6e8bb 10084 break;
eb0ecd5a
WN
10085 case 0x20:
10086 case 0x21:
10087 case 0x22:
10088 case 0x28:
10089 case 0x29:
10090 case 0x2a:
10091 {
10092 /* crc32/crc32c */
10093 uint32_t sz = op & 0x3;
10094 uint32_t c = op & 0x8;
10095
eb0ecd5a 10096 tmp2 = load_reg(s, rm);
aa633469
PM
10097 if (sz == 0) {
10098 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10099 } else if (sz == 1) {
10100 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10101 }
eb0ecd5a
WN
10102 tmp3 = tcg_const_i32(1 << sz);
10103 if (c) {
10104 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10105 } else {
10106 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10107 }
10108 tcg_temp_free_i32(tmp2);
10109 tcg_temp_free_i32(tmp3);
10110 break;
10111 }
9ee6e8bb 10112 default:
62b44f05 10113 g_assert_not_reached();
9ee6e8bb
PB
10114 }
10115 }
d9ba4830 10116 store_reg(s, rd, tmp);
9ee6e8bb
PB
10117 break;
10118 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10119 switch ((insn >> 20) & 7) {
10120 case 0: /* 32 x 32 -> 32 */
10121 case 7: /* Unsigned sum of absolute differences. */
10122 break;
10123 case 1: /* 16 x 16 -> 32 */
10124 case 2: /* Dual multiply add. */
10125 case 3: /* 32 * 16 -> 32msb */
10126 case 4: /* Dual multiply subtract. */
10127 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10128 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10129 goto illegal_op;
10130 }
10131 break;
10132 }
9ee6e8bb 10133 op = (insn >> 4) & 0xf;
d9ba4830
PB
10134 tmp = load_reg(s, rn);
10135 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10136 switch ((insn >> 20) & 7) {
10137 case 0: /* 32 x 32 -> 32 */
d9ba4830 10138 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10139 tcg_temp_free_i32(tmp2);
9ee6e8bb 10140 if (rs != 15) {
d9ba4830 10141 tmp2 = load_reg(s, rs);
9ee6e8bb 10142 if (op)
d9ba4830 10143 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10144 else
d9ba4830 10145 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10146 tcg_temp_free_i32(tmp2);
9ee6e8bb 10147 }
9ee6e8bb
PB
10148 break;
10149 case 1: /* 16 x 16 -> 32 */
d9ba4830 10150 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10151 tcg_temp_free_i32(tmp2);
9ee6e8bb 10152 if (rs != 15) {
d9ba4830 10153 tmp2 = load_reg(s, rs);
9ef39277 10154 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10155 tcg_temp_free_i32(tmp2);
9ee6e8bb 10156 }
9ee6e8bb
PB
10157 break;
10158 case 2: /* Dual multiply add. */
10159 case 4: /* Dual multiply subtract. */
10160 if (op)
d9ba4830
PB
10161 gen_swap_half(tmp2);
10162 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10163 if (insn & (1 << 22)) {
e1d177b9 10164 /* This subtraction cannot overflow. */
d9ba4830 10165 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10166 } else {
e1d177b9
PM
10167 /* This addition cannot overflow 32 bits;
10168 * however it may overflow considered as a signed
10169 * operation, in which case we must set the Q flag.
10170 */
9ef39277 10171 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10172 }
7d1b0095 10173 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10174 if (rs != 15)
10175 {
d9ba4830 10176 tmp2 = load_reg(s, rs);
9ef39277 10177 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10178 tcg_temp_free_i32(tmp2);
9ee6e8bb 10179 }
9ee6e8bb
PB
10180 break;
10181 case 3: /* 32 * 16 -> 32msb */
10182 if (op)
d9ba4830 10183 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10184 else
d9ba4830 10185 gen_sxth(tmp2);
a7812ae4
PB
10186 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10187 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10188 tmp = tcg_temp_new_i32();
ecc7b3aa 10189 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10190 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10191 if (rs != 15)
10192 {
d9ba4830 10193 tmp2 = load_reg(s, rs);
9ef39277 10194 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10195 tcg_temp_free_i32(tmp2);
9ee6e8bb 10196 }
9ee6e8bb 10197 break;
838fa72d
AJ
10198 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10199 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10200 if (rs != 15) {
838fa72d
AJ
10201 tmp = load_reg(s, rs);
10202 if (insn & (1 << 20)) {
10203 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10204 } else {
838fa72d 10205 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10206 }
2c0262af 10207 }
838fa72d
AJ
10208 if (insn & (1 << 4)) {
10209 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10210 }
10211 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10212 tmp = tcg_temp_new_i32();
ecc7b3aa 10213 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10214 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10215 break;
10216 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10217 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10218 tcg_temp_free_i32(tmp2);
9ee6e8bb 10219 if (rs != 15) {
d9ba4830
PB
10220 tmp2 = load_reg(s, rs);
10221 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10222 tcg_temp_free_i32(tmp2);
5fd46862 10223 }
9ee6e8bb 10224 break;
2c0262af 10225 }
d9ba4830 10226 store_reg(s, rd, tmp);
2c0262af 10227 break;
9ee6e8bb
PB
10228 case 6: case 7: /* 64-bit multiply, Divide. */
10229 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10230 tmp = load_reg(s, rn);
10231 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10232 if ((op & 0x50) == 0x10) {
10233 /* sdiv, udiv */
d614a513 10234 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10235 goto illegal_op;
47789990 10236 }
9ee6e8bb 10237 if (op & 0x20)
5e3f878a 10238 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10239 else
5e3f878a 10240 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10241 tcg_temp_free_i32(tmp2);
5e3f878a 10242 store_reg(s, rd, tmp);
9ee6e8bb
PB
10243 } else if ((op & 0xe) == 0xc) {
10244 /* Dual multiply accumulate long. */
62b44f05
AR
10245 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10246 tcg_temp_free_i32(tmp);
10247 tcg_temp_free_i32(tmp2);
10248 goto illegal_op;
10249 }
9ee6e8bb 10250 if (op & 1)
5e3f878a
PB
10251 gen_swap_half(tmp2);
10252 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10253 if (op & 0x10) {
5e3f878a 10254 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10255 } else {
5e3f878a 10256 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10257 }
7d1b0095 10258 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10259 /* BUGFIX */
10260 tmp64 = tcg_temp_new_i64();
10261 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10262 tcg_temp_free_i32(tmp);
a7812ae4
PB
10263 gen_addq(s, tmp64, rs, rd);
10264 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10265 tcg_temp_free_i64(tmp64);
2c0262af 10266 } else {
9ee6e8bb
PB
10267 if (op & 0x20) {
10268 /* Unsigned 64-bit multiply */
a7812ae4 10269 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10270 } else {
9ee6e8bb
PB
10271 if (op & 8) {
10272 /* smlalxy */
62b44f05
AR
10273 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10274 tcg_temp_free_i32(tmp2);
10275 tcg_temp_free_i32(tmp);
10276 goto illegal_op;
10277 }
5e3f878a 10278 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10279 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10280 tmp64 = tcg_temp_new_i64();
10281 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10282 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10283 } else {
10284 /* Signed 64-bit multiply */
a7812ae4 10285 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10286 }
b5ff1b31 10287 }
9ee6e8bb
PB
10288 if (op & 4) {
10289 /* umaal */
62b44f05
AR
10290 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10291 tcg_temp_free_i64(tmp64);
10292 goto illegal_op;
10293 }
a7812ae4
PB
10294 gen_addq_lo(s, tmp64, rs);
10295 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10296 } else if (op & 0x40) {
10297 /* 64-bit accumulate. */
a7812ae4 10298 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10299 }
a7812ae4 10300 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10301 tcg_temp_free_i64(tmp64);
5fd46862 10302 }
2c0262af 10303 break;
9ee6e8bb
PB
10304 }
10305 break;
10306 case 6: case 7: case 14: case 15:
10307 /* Coprocessor. */
10308 if (((insn >> 24) & 3) == 3) {
10309 /* Translate into the equivalent ARM encoding. */
f06053e3 10310 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10311 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10312 goto illegal_op;
7dcc1f89 10313 }
6a57f3eb 10314 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10315 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10316 goto illegal_op;
10317 }
9ee6e8bb
PB
10318 } else {
10319 if (insn & (1 << 28))
10320 goto illegal_op;
7dcc1f89 10321 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10322 goto illegal_op;
7dcc1f89 10323 }
9ee6e8bb
PB
10324 }
10325 break;
10326 case 8: case 9: case 10: case 11:
10327 if (insn & (1 << 15)) {
10328 /* Branches, misc control. */
10329 if (insn & 0x5000) {
10330 /* Unconditional branch. */
10331 /* signextend(hw1[10:0]) -> offset[:12]. */
10332 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10333 /* hw1[10:0] -> offset[11:1]. */
10334 offset |= (insn & 0x7ff) << 1;
10335 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10336 offset[24:22] already have the same value because of the
10337 sign extension above. */
10338 offset ^= ((~insn) & (1 << 13)) << 10;
10339 offset ^= ((~insn) & (1 << 11)) << 11;
10340
9ee6e8bb
PB
10341 if (insn & (1 << 14)) {
10342 /* Branch and link. */
3174f8e9 10343 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10344 }
3b46e624 10345
b0109805 10346 offset += s->pc;
9ee6e8bb
PB
10347 if (insn & (1 << 12)) {
10348 /* b/bl */
b0109805 10349 gen_jmp(s, offset);
9ee6e8bb
PB
10350 } else {
10351 /* blx */
b0109805 10352 offset &= ~(uint32_t)2;
be5e7a76 10353 /* thumb2 bx, no need to check */
b0109805 10354 gen_bx_im(s, offset);
2c0262af 10355 }
9ee6e8bb
PB
10356 } else if (((insn >> 23) & 7) == 7) {
10357 /* Misc control */
10358 if (insn & (1 << 13))
10359 goto illegal_op;
10360
10361 if (insn & (1 << 26)) {
37e6456e
PM
10362 if (!(insn & (1 << 20))) {
10363 /* Hypervisor call (v7) */
10364 int imm16 = extract32(insn, 16, 4) << 12
10365 | extract32(insn, 0, 12);
10366 ARCH(7);
10367 if (IS_USER(s)) {
10368 goto illegal_op;
10369 }
10370 gen_hvc(s, imm16);
10371 } else {
10372 /* Secure monitor call (v6+) */
10373 ARCH(6K);
10374 if (IS_USER(s)) {
10375 goto illegal_op;
10376 }
10377 gen_smc(s);
10378 }
2c0262af 10379 } else {
9ee6e8bb
PB
10380 op = (insn >> 20) & 7;
10381 switch (op) {
10382 case 0: /* msr cpsr. */
b53d8923 10383 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10384 tmp = load_reg(s, rn);
10385 addr = tcg_const_i32(insn & 0xff);
10386 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10387 tcg_temp_free_i32(addr);
7d1b0095 10388 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10389 gen_lookup_tb(s);
10390 break;
10391 }
10392 /* fall through */
10393 case 1: /* msr spsr. */
b53d8923 10394 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10395 goto illegal_op;
b53d8923 10396 }
8bfd0550
PM
10397
10398 if (extract32(insn, 5, 1)) {
10399 /* MSR (banked) */
10400 int sysm = extract32(insn, 8, 4) |
10401 (extract32(insn, 4, 1) << 4);
10402 int r = op & 1;
10403
10404 gen_msr_banked(s, r, sysm, rm);
10405 break;
10406 }
10407
10408 /* MSR (for PSRs) */
2fbac54b
FN
10409 tmp = load_reg(s, rn);
10410 if (gen_set_psr(s,
7dcc1f89 10411 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10412 op == 1, tmp))
9ee6e8bb
PB
10413 goto illegal_op;
10414 break;
10415 case 2: /* cps, nop-hint. */
10416 if (((insn >> 8) & 7) == 0) {
10417 gen_nop_hint(s, insn & 0xff);
10418 }
10419 /* Implemented as NOP in user mode. */
10420 if (IS_USER(s))
10421 break;
10422 offset = 0;
10423 imm = 0;
10424 if (insn & (1 << 10)) {
10425 if (insn & (1 << 7))
10426 offset |= CPSR_A;
10427 if (insn & (1 << 6))
10428 offset |= CPSR_I;
10429 if (insn & (1 << 5))
10430 offset |= CPSR_F;
10431 if (insn & (1 << 9))
10432 imm = CPSR_A | CPSR_I | CPSR_F;
10433 }
10434 if (insn & (1 << 8)) {
10435 offset |= 0x1f;
10436 imm |= (insn & 0x1f);
10437 }
10438 if (offset) {
2fbac54b 10439 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10440 }
10441 break;
10442 case 3: /* Special control operations. */
426f5abc 10443 ARCH(7);
9ee6e8bb
PB
10444 op = (insn >> 4) & 0xf;
10445 switch (op) {
10446 case 2: /* clrex */
426f5abc 10447 gen_clrex(s);
9ee6e8bb
PB
10448 break;
10449 case 4: /* dsb */
10450 case 5: /* dmb */
61e4c432 10451 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10452 break;
6df99dec
SS
10453 case 6: /* isb */
10454 /* We need to break the TB after this insn
10455 * to execute self-modifying code correctly
10456 * and also to take any pending interrupts
10457 * immediately.
10458 */
10459 gen_lookup_tb(s);
10460 break;
9ee6e8bb
PB
10461 default:
10462 goto illegal_op;
10463 }
10464 break;
10465 case 4: /* bxj */
10466 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10467 tmp = load_reg(s, rn);
10468 gen_bx(s, tmp);
9ee6e8bb
PB
10469 break;
10470 case 5: /* Exception return. */
b8b45b68
RV
10471 if (IS_USER(s)) {
10472 goto illegal_op;
10473 }
10474 if (rn != 14 || rd != 15) {
10475 goto illegal_op;
10476 }
10477 tmp = load_reg(s, rn);
10478 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10479 gen_exception_return(s, tmp);
10480 break;
8bfd0550
PM
10481 case 6: /* MRS */
10482 if (extract32(insn, 5, 1)) {
10483 /* MRS (banked) */
10484 int sysm = extract32(insn, 16, 4) |
10485 (extract32(insn, 4, 1) << 4);
10486
10487 gen_mrs_banked(s, 0, sysm, rd);
10488 break;
10489 }
10490
10491 /* mrs cpsr */
7d1b0095 10492 tmp = tcg_temp_new_i32();
b53d8923 10493 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10494 addr = tcg_const_i32(insn & 0xff);
10495 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10496 tcg_temp_free_i32(addr);
9ee6e8bb 10497 } else {
9ef39277 10498 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10499 }
8984bd2e 10500 store_reg(s, rd, tmp);
9ee6e8bb 10501 break;
8bfd0550
PM
10502 case 7: /* MRS */
10503 if (extract32(insn, 5, 1)) {
10504 /* MRS (banked) */
10505 int sysm = extract32(insn, 16, 4) |
10506 (extract32(insn, 4, 1) << 4);
10507
10508 gen_mrs_banked(s, 1, sysm, rd);
10509 break;
10510 }
10511
10512 /* mrs spsr. */
9ee6e8bb 10513 /* Not accessible in user mode. */
b53d8923 10514 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10515 goto illegal_op;
b53d8923 10516 }
d9ba4830
PB
10517 tmp = load_cpu_field(spsr);
10518 store_reg(s, rd, tmp);
9ee6e8bb 10519 break;
2c0262af
FB
10520 }
10521 }
9ee6e8bb
PB
10522 } else {
10523 /* Conditional branch. */
10524 op = (insn >> 22) & 0xf;
10525 /* Generate a conditional jump to next instruction. */
10526 s->condlabel = gen_new_label();
39fb730a 10527 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10528 s->condjmp = 1;
10529
10530 /* offset[11:1] = insn[10:0] */
10531 offset = (insn & 0x7ff) << 1;
10532 /* offset[17:12] = insn[21:16]. */
10533 offset |= (insn & 0x003f0000) >> 4;
10534 /* offset[31:20] = insn[26]. */
10535 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10536 /* offset[18] = insn[13]. */
10537 offset |= (insn & (1 << 13)) << 5;
10538 /* offset[19] = insn[11]. */
10539 offset |= (insn & (1 << 11)) << 8;
10540
10541 /* jump to the offset */
b0109805 10542 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10543 }
10544 } else {
10545 /* Data processing immediate. */
10546 if (insn & (1 << 25)) {
10547 if (insn & (1 << 24)) {
10548 if (insn & (1 << 20))
10549 goto illegal_op;
10550 /* Bitfield/Saturate. */
10551 op = (insn >> 21) & 7;
10552 imm = insn & 0x1f;
10553 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10554 if (rn == 15) {
7d1b0095 10555 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10556 tcg_gen_movi_i32(tmp, 0);
10557 } else {
10558 tmp = load_reg(s, rn);
10559 }
9ee6e8bb
PB
10560 switch (op) {
10561 case 2: /* Signed bitfield extract. */
10562 imm++;
10563 if (shift + imm > 32)
10564 goto illegal_op;
10565 if (imm < 32)
6ddbc6e4 10566 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10567 break;
10568 case 6: /* Unsigned bitfield extract. */
10569 imm++;
10570 if (shift + imm > 32)
10571 goto illegal_op;
10572 if (imm < 32)
6ddbc6e4 10573 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10574 break;
10575 case 3: /* Bitfield insert/clear. */
10576 if (imm < shift)
10577 goto illegal_op;
10578 imm = imm + 1 - shift;
10579 if (imm != 32) {
6ddbc6e4 10580 tmp2 = load_reg(s, rd);
d593c48e 10581 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10582 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10583 }
10584 break;
10585 case 7:
10586 goto illegal_op;
10587 default: /* Saturate. */
9ee6e8bb
PB
10588 if (shift) {
10589 if (op & 1)
6ddbc6e4 10590 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10591 else
6ddbc6e4 10592 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10593 }
6ddbc6e4 10594 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10595 if (op & 4) {
10596 /* Unsigned. */
62b44f05
AR
10597 if ((op & 1) && shift == 0) {
10598 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10599 tcg_temp_free_i32(tmp);
10600 tcg_temp_free_i32(tmp2);
10601 goto illegal_op;
10602 }
9ef39277 10603 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10604 } else {
9ef39277 10605 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10606 }
2c0262af 10607 } else {
9ee6e8bb 10608 /* Signed. */
62b44f05
AR
10609 if ((op & 1) && shift == 0) {
10610 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10611 tcg_temp_free_i32(tmp);
10612 tcg_temp_free_i32(tmp2);
10613 goto illegal_op;
10614 }
9ef39277 10615 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10616 } else {
9ef39277 10617 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10618 }
2c0262af 10619 }
b75263d6 10620 tcg_temp_free_i32(tmp2);
9ee6e8bb 10621 break;
2c0262af 10622 }
6ddbc6e4 10623 store_reg(s, rd, tmp);
9ee6e8bb
PB
10624 } else {
10625 imm = ((insn & 0x04000000) >> 15)
10626 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10627 if (insn & (1 << 22)) {
10628 /* 16-bit immediate. */
10629 imm |= (insn >> 4) & 0xf000;
10630 if (insn & (1 << 23)) {
10631 /* movt */
5e3f878a 10632 tmp = load_reg(s, rd);
86831435 10633 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10634 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10635 } else {
9ee6e8bb 10636 /* movw */
7d1b0095 10637 tmp = tcg_temp_new_i32();
5e3f878a 10638 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10639 }
10640 } else {
9ee6e8bb
PB
10641 /* Add/sub 12-bit immediate. */
10642 if (rn == 15) {
b0109805 10643 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10644 if (insn & (1 << 23))
b0109805 10645 offset -= imm;
9ee6e8bb 10646 else
b0109805 10647 offset += imm;
7d1b0095 10648 tmp = tcg_temp_new_i32();
5e3f878a 10649 tcg_gen_movi_i32(tmp, offset);
2c0262af 10650 } else {
5e3f878a 10651 tmp = load_reg(s, rn);
9ee6e8bb 10652 if (insn & (1 << 23))
5e3f878a 10653 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10654 else
5e3f878a 10655 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10656 }
9ee6e8bb 10657 }
5e3f878a 10658 store_reg(s, rd, tmp);
191abaa2 10659 }
9ee6e8bb
PB
10660 } else {
10661 int shifter_out = 0;
10662 /* modified 12-bit immediate. */
10663 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10664 imm = (insn & 0xff);
10665 switch (shift) {
10666 case 0: /* XY */
10667 /* Nothing to do. */
10668 break;
10669 case 1: /* 00XY00XY */
10670 imm |= imm << 16;
10671 break;
10672 case 2: /* XY00XY00 */
10673 imm |= imm << 16;
10674 imm <<= 8;
10675 break;
10676 case 3: /* XYXYXYXY */
10677 imm |= imm << 16;
10678 imm |= imm << 8;
10679 break;
10680 default: /* Rotated constant. */
10681 shift = (shift << 1) | (imm >> 7);
10682 imm |= 0x80;
10683 imm = imm << (32 - shift);
10684 shifter_out = 1;
10685 break;
b5ff1b31 10686 }
7d1b0095 10687 tmp2 = tcg_temp_new_i32();
3174f8e9 10688 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10689 rn = (insn >> 16) & 0xf;
3174f8e9 10690 if (rn == 15) {
7d1b0095 10691 tmp = tcg_temp_new_i32();
3174f8e9
FN
10692 tcg_gen_movi_i32(tmp, 0);
10693 } else {
10694 tmp = load_reg(s, rn);
10695 }
9ee6e8bb
PB
10696 op = (insn >> 21) & 0xf;
10697 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10698 shifter_out, tmp, tmp2))
9ee6e8bb 10699 goto illegal_op;
7d1b0095 10700 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10701 rd = (insn >> 8) & 0xf;
10702 if (rd != 15) {
3174f8e9
FN
10703 store_reg(s, rd, tmp);
10704 } else {
7d1b0095 10705 tcg_temp_free_i32(tmp);
2c0262af 10706 }
2c0262af 10707 }
9ee6e8bb
PB
10708 }
10709 break;
10710 case 12: /* Load/store single data item. */
10711 {
10712 int postinc = 0;
10713 int writeback = 0;
a99caa48 10714 int memidx;
9ee6e8bb 10715 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10716 if (disas_neon_ls_insn(s, insn)) {
c1713132 10717 goto illegal_op;
7dcc1f89 10718 }
9ee6e8bb
PB
10719 break;
10720 }
a2fdc890
PM
10721 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10722 if (rs == 15) {
10723 if (!(insn & (1 << 20))) {
10724 goto illegal_op;
10725 }
10726 if (op != 2) {
10727 /* Byte or halfword load space with dest == r15 : memory hints.
10728 * Catch them early so we don't emit pointless addressing code.
10729 * This space is a mix of:
10730 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10731 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10732 * cores)
10733 * unallocated hints, which must be treated as NOPs
10734 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10735 * which is easiest for the decoding logic
10736 * Some space which must UNDEF
10737 */
10738 int op1 = (insn >> 23) & 3;
10739 int op2 = (insn >> 6) & 0x3f;
10740 if (op & 2) {
10741 goto illegal_op;
10742 }
10743 if (rn == 15) {
02afbf64
PM
10744 /* UNPREDICTABLE, unallocated hint or
10745 * PLD/PLDW/PLI (literal)
10746 */
a2fdc890
PM
10747 return 0;
10748 }
10749 if (op1 & 1) {
02afbf64 10750 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10751 }
10752 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10753 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10754 }
10755 /* UNDEF space, or an UNPREDICTABLE */
10756 return 1;
10757 }
10758 }
a99caa48 10759 memidx = get_mem_index(s);
9ee6e8bb 10760 if (rn == 15) {
7d1b0095 10761 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10762 /* PC relative. */
10763 /* s->pc has already been incremented by 4. */
10764 imm = s->pc & 0xfffffffc;
10765 if (insn & (1 << 23))
10766 imm += insn & 0xfff;
10767 else
10768 imm -= insn & 0xfff;
b0109805 10769 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10770 } else {
b0109805 10771 addr = load_reg(s, rn);
9ee6e8bb
PB
10772 if (insn & (1 << 23)) {
10773 /* Positive offset. */
10774 imm = insn & 0xfff;
b0109805 10775 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10776 } else {
9ee6e8bb 10777 imm = insn & 0xff;
2a0308c5
PM
10778 switch ((insn >> 8) & 0xf) {
10779 case 0x0: /* Shifted Register. */
9ee6e8bb 10780 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10781 if (shift > 3) {
10782 tcg_temp_free_i32(addr);
18c9b560 10783 goto illegal_op;
2a0308c5 10784 }
b26eefb6 10785 tmp = load_reg(s, rm);
9ee6e8bb 10786 if (shift)
b26eefb6 10787 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10788 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10789 tcg_temp_free_i32(tmp);
9ee6e8bb 10790 break;
2a0308c5 10791 case 0xc: /* Negative offset. */
b0109805 10792 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10793 break;
2a0308c5 10794 case 0xe: /* User privilege. */
b0109805 10795 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10796 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10797 break;
2a0308c5 10798 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10799 imm = -imm;
10800 /* Fall through. */
2a0308c5 10801 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10802 postinc = 1;
10803 writeback = 1;
10804 break;
2a0308c5 10805 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10806 imm = -imm;
10807 /* Fall through. */
2a0308c5 10808 case 0xf: /* Pre-increment. */
b0109805 10809 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10810 writeback = 1;
10811 break;
10812 default:
2a0308c5 10813 tcg_temp_free_i32(addr);
b7bcbe95 10814 goto illegal_op;
9ee6e8bb
PB
10815 }
10816 }
10817 }
9ee6e8bb
PB
10818 if (insn & (1 << 20)) {
10819 /* Load. */
5a839c0d 10820 tmp = tcg_temp_new_i32();
a2fdc890 10821 switch (op) {
5a839c0d 10822 case 0:
12dcc321 10823 gen_aa32_ld8u(s, tmp, addr, memidx);
5a839c0d
PM
10824 break;
10825 case 4:
12dcc321 10826 gen_aa32_ld8s(s, tmp, addr, memidx);
5a839c0d
PM
10827 break;
10828 case 1:
12dcc321 10829 gen_aa32_ld16u(s, tmp, addr, memidx);
5a839c0d
PM
10830 break;
10831 case 5:
12dcc321 10832 gen_aa32_ld16s(s, tmp, addr, memidx);
5a839c0d
PM
10833 break;
10834 case 2:
12dcc321 10835 gen_aa32_ld32u(s, tmp, addr, memidx);
5a839c0d 10836 break;
2a0308c5 10837 default:
5a839c0d 10838 tcg_temp_free_i32(tmp);
2a0308c5
PM
10839 tcg_temp_free_i32(addr);
10840 goto illegal_op;
a2fdc890
PM
10841 }
10842 if (rs == 15) {
10843 gen_bx(s, tmp);
9ee6e8bb 10844 } else {
a2fdc890 10845 store_reg(s, rs, tmp);
9ee6e8bb
PB
10846 }
10847 } else {
10848 /* Store. */
b0109805 10849 tmp = load_reg(s, rs);
9ee6e8bb 10850 switch (op) {
5a839c0d 10851 case 0:
12dcc321 10852 gen_aa32_st8(s, tmp, addr, memidx);
5a839c0d
PM
10853 break;
10854 case 1:
12dcc321 10855 gen_aa32_st16(s, tmp, addr, memidx);
5a839c0d
PM
10856 break;
10857 case 2:
12dcc321 10858 gen_aa32_st32(s, tmp, addr, memidx);
5a839c0d 10859 break;
2a0308c5 10860 default:
5a839c0d 10861 tcg_temp_free_i32(tmp);
2a0308c5
PM
10862 tcg_temp_free_i32(addr);
10863 goto illegal_op;
b7bcbe95 10864 }
5a839c0d 10865 tcg_temp_free_i32(tmp);
2c0262af 10866 }
9ee6e8bb 10867 if (postinc)
b0109805
PB
10868 tcg_gen_addi_i32(addr, addr, imm);
10869 if (writeback) {
10870 store_reg(s, rn, addr);
10871 } else {
7d1b0095 10872 tcg_temp_free_i32(addr);
b0109805 10873 }
9ee6e8bb
PB
10874 }
10875 break;
10876 default:
10877 goto illegal_op;
2c0262af 10878 }
9ee6e8bb
PB
10879 return 0;
10880illegal_op:
10881 return 1;
2c0262af
FB
10882}
10883
0ecb72a5 10884static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10885{
10886 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10887 int32_t offset;
10888 int i;
39d5492a
PM
10889 TCGv_i32 tmp;
10890 TCGv_i32 tmp2;
10891 TCGv_i32 addr;
99c475ab 10892
9ee6e8bb
PB
10893 if (s->condexec_mask) {
10894 cond = s->condexec_cond;
bedd2912
JB
10895 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10896 s->condlabel = gen_new_label();
39fb730a 10897 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10898 s->condjmp = 1;
10899 }
9ee6e8bb
PB
10900 }
10901
f9fd40eb 10902 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 10903 s->pc += 2;
b5ff1b31 10904
99c475ab
FB
10905 switch (insn >> 12) {
10906 case 0: case 1:
396e467c 10907
99c475ab
FB
10908 rd = insn & 7;
10909 op = (insn >> 11) & 3;
10910 if (op == 3) {
10911 /* add/subtract */
10912 rn = (insn >> 3) & 7;
396e467c 10913 tmp = load_reg(s, rn);
99c475ab
FB
10914 if (insn & (1 << 10)) {
10915 /* immediate */
7d1b0095 10916 tmp2 = tcg_temp_new_i32();
396e467c 10917 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10918 } else {
10919 /* reg */
10920 rm = (insn >> 6) & 7;
396e467c 10921 tmp2 = load_reg(s, rm);
99c475ab 10922 }
9ee6e8bb
PB
10923 if (insn & (1 << 9)) {
10924 if (s->condexec_mask)
396e467c 10925 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10926 else
72485ec4 10927 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10928 } else {
10929 if (s->condexec_mask)
396e467c 10930 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10931 else
72485ec4 10932 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10933 }
7d1b0095 10934 tcg_temp_free_i32(tmp2);
396e467c 10935 store_reg(s, rd, tmp);
99c475ab
FB
10936 } else {
10937 /* shift immediate */
10938 rm = (insn >> 3) & 7;
10939 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10940 tmp = load_reg(s, rm);
10941 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10942 if (!s->condexec_mask)
10943 gen_logic_CC(tmp);
10944 store_reg(s, rd, tmp);
99c475ab
FB
10945 }
10946 break;
10947 case 2: case 3:
10948 /* arithmetic large immediate */
10949 op = (insn >> 11) & 3;
10950 rd = (insn >> 8) & 0x7;
396e467c 10951 if (op == 0) { /* mov */
7d1b0095 10952 tmp = tcg_temp_new_i32();
396e467c 10953 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10954 if (!s->condexec_mask)
396e467c
FN
10955 gen_logic_CC(tmp);
10956 store_reg(s, rd, tmp);
10957 } else {
10958 tmp = load_reg(s, rd);
7d1b0095 10959 tmp2 = tcg_temp_new_i32();
396e467c
FN
10960 tcg_gen_movi_i32(tmp2, insn & 0xff);
10961 switch (op) {
10962 case 1: /* cmp */
72485ec4 10963 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10964 tcg_temp_free_i32(tmp);
10965 tcg_temp_free_i32(tmp2);
396e467c
FN
10966 break;
10967 case 2: /* add */
10968 if (s->condexec_mask)
10969 tcg_gen_add_i32(tmp, tmp, tmp2);
10970 else
72485ec4 10971 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10972 tcg_temp_free_i32(tmp2);
396e467c
FN
10973 store_reg(s, rd, tmp);
10974 break;
10975 case 3: /* sub */
10976 if (s->condexec_mask)
10977 tcg_gen_sub_i32(tmp, tmp, tmp2);
10978 else
72485ec4 10979 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10980 tcg_temp_free_i32(tmp2);
396e467c
FN
10981 store_reg(s, rd, tmp);
10982 break;
10983 }
99c475ab 10984 }
99c475ab
FB
10985 break;
10986 case 4:
10987 if (insn & (1 << 11)) {
10988 rd = (insn >> 8) & 7;
5899f386
FB
10989 /* load pc-relative. Bit 1 of PC is ignored. */
10990 val = s->pc + 2 + ((insn & 0xff) * 4);
10991 val &= ~(uint32_t)2;
7d1b0095 10992 addr = tcg_temp_new_i32();
b0109805 10993 tcg_gen_movi_i32(addr, val);
c40c8556 10994 tmp = tcg_temp_new_i32();
12dcc321 10995 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7d1b0095 10996 tcg_temp_free_i32(addr);
b0109805 10997 store_reg(s, rd, tmp);
99c475ab
FB
10998 break;
10999 }
11000 if (insn & (1 << 10)) {
11001 /* data processing extended or blx */
11002 rd = (insn & 7) | ((insn >> 4) & 8);
11003 rm = (insn >> 3) & 0xf;
11004 op = (insn >> 8) & 3;
11005 switch (op) {
11006 case 0: /* add */
396e467c
FN
11007 tmp = load_reg(s, rd);
11008 tmp2 = load_reg(s, rm);
11009 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11010 tcg_temp_free_i32(tmp2);
396e467c 11011 store_reg(s, rd, tmp);
99c475ab
FB
11012 break;
11013 case 1: /* cmp */
396e467c
FN
11014 tmp = load_reg(s, rd);
11015 tmp2 = load_reg(s, rm);
72485ec4 11016 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11017 tcg_temp_free_i32(tmp2);
11018 tcg_temp_free_i32(tmp);
99c475ab
FB
11019 break;
11020 case 2: /* mov/cpy */
396e467c
FN
11021 tmp = load_reg(s, rm);
11022 store_reg(s, rd, tmp);
99c475ab
FB
11023 break;
11024 case 3:/* branch [and link] exchange thumb register */
b0109805 11025 tmp = load_reg(s, rm);
99c475ab 11026 if (insn & (1 << 7)) {
be5e7a76 11027 ARCH(5);
99c475ab 11028 val = (uint32_t)s->pc | 1;
7d1b0095 11029 tmp2 = tcg_temp_new_i32();
b0109805
PB
11030 tcg_gen_movi_i32(tmp2, val);
11031 store_reg(s, 14, tmp2);
99c475ab 11032 }
be5e7a76 11033 /* already thumb, no need to check */
d9ba4830 11034 gen_bx(s, tmp);
99c475ab
FB
11035 break;
11036 }
11037 break;
11038 }
11039
11040 /* data processing register */
11041 rd = insn & 7;
11042 rm = (insn >> 3) & 7;
11043 op = (insn >> 6) & 0xf;
11044 if (op == 2 || op == 3 || op == 4 || op == 7) {
11045 /* the shift/rotate ops want the operands backwards */
11046 val = rm;
11047 rm = rd;
11048 rd = val;
11049 val = 1;
11050 } else {
11051 val = 0;
11052 }
11053
396e467c 11054 if (op == 9) { /* neg */
7d1b0095 11055 tmp = tcg_temp_new_i32();
396e467c
FN
11056 tcg_gen_movi_i32(tmp, 0);
11057 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11058 tmp = load_reg(s, rd);
11059 } else {
39d5492a 11060 TCGV_UNUSED_I32(tmp);
396e467c 11061 }
99c475ab 11062
396e467c 11063 tmp2 = load_reg(s, rm);
5899f386 11064 switch (op) {
99c475ab 11065 case 0x0: /* and */
396e467c 11066 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11067 if (!s->condexec_mask)
396e467c 11068 gen_logic_CC(tmp);
99c475ab
FB
11069 break;
11070 case 0x1: /* eor */
396e467c 11071 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11072 if (!s->condexec_mask)
396e467c 11073 gen_logic_CC(tmp);
99c475ab
FB
11074 break;
11075 case 0x2: /* lsl */
9ee6e8bb 11076 if (s->condexec_mask) {
365af80e 11077 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11078 } else {
9ef39277 11079 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11080 gen_logic_CC(tmp2);
9ee6e8bb 11081 }
99c475ab
FB
11082 break;
11083 case 0x3: /* lsr */
9ee6e8bb 11084 if (s->condexec_mask) {
365af80e 11085 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11086 } else {
9ef39277 11087 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11088 gen_logic_CC(tmp2);
9ee6e8bb 11089 }
99c475ab
FB
11090 break;
11091 case 0x4: /* asr */
9ee6e8bb 11092 if (s->condexec_mask) {
365af80e 11093 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11094 } else {
9ef39277 11095 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11096 gen_logic_CC(tmp2);
9ee6e8bb 11097 }
99c475ab
FB
11098 break;
11099 case 0x5: /* adc */
49b4c31e 11100 if (s->condexec_mask) {
396e467c 11101 gen_adc(tmp, tmp2);
49b4c31e
RH
11102 } else {
11103 gen_adc_CC(tmp, tmp, tmp2);
11104 }
99c475ab
FB
11105 break;
11106 case 0x6: /* sbc */
2de68a49 11107 if (s->condexec_mask) {
396e467c 11108 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11109 } else {
11110 gen_sbc_CC(tmp, tmp, tmp2);
11111 }
99c475ab
FB
11112 break;
11113 case 0x7: /* ror */
9ee6e8bb 11114 if (s->condexec_mask) {
f669df27
AJ
11115 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11116 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11117 } else {
9ef39277 11118 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11119 gen_logic_CC(tmp2);
9ee6e8bb 11120 }
99c475ab
FB
11121 break;
11122 case 0x8: /* tst */
396e467c
FN
11123 tcg_gen_and_i32(tmp, tmp, tmp2);
11124 gen_logic_CC(tmp);
99c475ab 11125 rd = 16;
5899f386 11126 break;
99c475ab 11127 case 0x9: /* neg */
9ee6e8bb 11128 if (s->condexec_mask)
396e467c 11129 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11130 else
72485ec4 11131 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11132 break;
11133 case 0xa: /* cmp */
72485ec4 11134 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11135 rd = 16;
11136 break;
11137 case 0xb: /* cmn */
72485ec4 11138 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11139 rd = 16;
11140 break;
11141 case 0xc: /* orr */
396e467c 11142 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11143 if (!s->condexec_mask)
396e467c 11144 gen_logic_CC(tmp);
99c475ab
FB
11145 break;
11146 case 0xd: /* mul */
7b2919a0 11147 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11148 if (!s->condexec_mask)
396e467c 11149 gen_logic_CC(tmp);
99c475ab
FB
11150 break;
11151 case 0xe: /* bic */
f669df27 11152 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11153 if (!s->condexec_mask)
396e467c 11154 gen_logic_CC(tmp);
99c475ab
FB
11155 break;
11156 case 0xf: /* mvn */
396e467c 11157 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11158 if (!s->condexec_mask)
396e467c 11159 gen_logic_CC(tmp2);
99c475ab 11160 val = 1;
5899f386 11161 rm = rd;
99c475ab
FB
11162 break;
11163 }
11164 if (rd != 16) {
396e467c
FN
11165 if (val) {
11166 store_reg(s, rm, tmp2);
11167 if (op != 0xf)
7d1b0095 11168 tcg_temp_free_i32(tmp);
396e467c
FN
11169 } else {
11170 store_reg(s, rd, tmp);
7d1b0095 11171 tcg_temp_free_i32(tmp2);
396e467c
FN
11172 }
11173 } else {
7d1b0095
PM
11174 tcg_temp_free_i32(tmp);
11175 tcg_temp_free_i32(tmp2);
99c475ab
FB
11176 }
11177 break;
11178
11179 case 5:
11180 /* load/store register offset. */
11181 rd = insn & 7;
11182 rn = (insn >> 3) & 7;
11183 rm = (insn >> 6) & 7;
11184 op = (insn >> 9) & 7;
b0109805 11185 addr = load_reg(s, rn);
b26eefb6 11186 tmp = load_reg(s, rm);
b0109805 11187 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11188 tcg_temp_free_i32(tmp);
99c475ab 11189
c40c8556 11190 if (op < 3) { /* store */
b0109805 11191 tmp = load_reg(s, rd);
c40c8556
PM
11192 } else {
11193 tmp = tcg_temp_new_i32();
11194 }
99c475ab
FB
11195
11196 switch (op) {
11197 case 0: /* str */
12dcc321 11198 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11199 break;
11200 case 1: /* strh */
12dcc321 11201 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11202 break;
11203 case 2: /* strb */
12dcc321 11204 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11205 break;
11206 case 3: /* ldrsb */
12dcc321 11207 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11208 break;
11209 case 4: /* ldr */
12dcc321 11210 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11211 break;
11212 case 5: /* ldrh */
12dcc321 11213 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11214 break;
11215 case 6: /* ldrb */
12dcc321 11216 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11217 break;
11218 case 7: /* ldrsh */
12dcc321 11219 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11220 break;
11221 }
c40c8556 11222 if (op >= 3) { /* load */
b0109805 11223 store_reg(s, rd, tmp);
c40c8556
PM
11224 } else {
11225 tcg_temp_free_i32(tmp);
11226 }
7d1b0095 11227 tcg_temp_free_i32(addr);
99c475ab
FB
11228 break;
11229
11230 case 6:
11231 /* load/store word immediate offset */
11232 rd = insn & 7;
11233 rn = (insn >> 3) & 7;
b0109805 11234 addr = load_reg(s, rn);
99c475ab 11235 val = (insn >> 4) & 0x7c;
b0109805 11236 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11237
11238 if (insn & (1 << 11)) {
11239 /* load */
c40c8556 11240 tmp = tcg_temp_new_i32();
12dcc321 11241 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11242 store_reg(s, rd, tmp);
99c475ab
FB
11243 } else {
11244 /* store */
b0109805 11245 tmp = load_reg(s, rd);
12dcc321 11246 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11247 tcg_temp_free_i32(tmp);
99c475ab 11248 }
7d1b0095 11249 tcg_temp_free_i32(addr);
99c475ab
FB
11250 break;
11251
11252 case 7:
11253 /* load/store byte immediate offset */
11254 rd = insn & 7;
11255 rn = (insn >> 3) & 7;
b0109805 11256 addr = load_reg(s, rn);
99c475ab 11257 val = (insn >> 6) & 0x1f;
b0109805 11258 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11259
11260 if (insn & (1 << 11)) {
11261 /* load */
c40c8556 11262 tmp = tcg_temp_new_i32();
12dcc321 11263 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
b0109805 11264 store_reg(s, rd, tmp);
99c475ab
FB
11265 } else {
11266 /* store */
b0109805 11267 tmp = load_reg(s, rd);
12dcc321 11268 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
c40c8556 11269 tcg_temp_free_i32(tmp);
99c475ab 11270 }
7d1b0095 11271 tcg_temp_free_i32(addr);
99c475ab
FB
11272 break;
11273
11274 case 8:
11275 /* load/store halfword immediate offset */
11276 rd = insn & 7;
11277 rn = (insn >> 3) & 7;
b0109805 11278 addr = load_reg(s, rn);
99c475ab 11279 val = (insn >> 5) & 0x3e;
b0109805 11280 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11281
11282 if (insn & (1 << 11)) {
11283 /* load */
c40c8556 11284 tmp = tcg_temp_new_i32();
12dcc321 11285 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
b0109805 11286 store_reg(s, rd, tmp);
99c475ab
FB
11287 } else {
11288 /* store */
b0109805 11289 tmp = load_reg(s, rd);
12dcc321 11290 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
c40c8556 11291 tcg_temp_free_i32(tmp);
99c475ab 11292 }
7d1b0095 11293 tcg_temp_free_i32(addr);
99c475ab
FB
11294 break;
11295
11296 case 9:
11297 /* load/store from stack */
11298 rd = (insn >> 8) & 7;
b0109805 11299 addr = load_reg(s, 13);
99c475ab 11300 val = (insn & 0xff) * 4;
b0109805 11301 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11302
11303 if (insn & (1 << 11)) {
11304 /* load */
c40c8556 11305 tmp = tcg_temp_new_i32();
12dcc321 11306 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11307 store_reg(s, rd, tmp);
99c475ab
FB
11308 } else {
11309 /* store */
b0109805 11310 tmp = load_reg(s, rd);
12dcc321 11311 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11312 tcg_temp_free_i32(tmp);
99c475ab 11313 }
7d1b0095 11314 tcg_temp_free_i32(addr);
99c475ab
FB
11315 break;
11316
11317 case 10:
11318 /* add to high reg */
11319 rd = (insn >> 8) & 7;
5899f386
FB
11320 if (insn & (1 << 11)) {
11321 /* SP */
5e3f878a 11322 tmp = load_reg(s, 13);
5899f386
FB
11323 } else {
11324 /* PC. bit 1 is ignored. */
7d1b0095 11325 tmp = tcg_temp_new_i32();
5e3f878a 11326 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11327 }
99c475ab 11328 val = (insn & 0xff) * 4;
5e3f878a
PB
11329 tcg_gen_addi_i32(tmp, tmp, val);
11330 store_reg(s, rd, tmp);
99c475ab
FB
11331 break;
11332
11333 case 11:
11334 /* misc */
11335 op = (insn >> 8) & 0xf;
11336 switch (op) {
11337 case 0:
11338 /* adjust stack pointer */
b26eefb6 11339 tmp = load_reg(s, 13);
99c475ab
FB
11340 val = (insn & 0x7f) * 4;
11341 if (insn & (1 << 7))
6a0d8a1d 11342 val = -(int32_t)val;
b26eefb6
PB
11343 tcg_gen_addi_i32(tmp, tmp, val);
11344 store_reg(s, 13, tmp);
99c475ab
FB
11345 break;
11346
9ee6e8bb
PB
11347 case 2: /* sign/zero extend. */
11348 ARCH(6);
11349 rd = insn & 7;
11350 rm = (insn >> 3) & 7;
b0109805 11351 tmp = load_reg(s, rm);
9ee6e8bb 11352 switch ((insn >> 6) & 3) {
b0109805
PB
11353 case 0: gen_sxth(tmp); break;
11354 case 1: gen_sxtb(tmp); break;
11355 case 2: gen_uxth(tmp); break;
11356 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11357 }
b0109805 11358 store_reg(s, rd, tmp);
9ee6e8bb 11359 break;
99c475ab
FB
11360 case 4: case 5: case 0xc: case 0xd:
11361 /* push/pop */
b0109805 11362 addr = load_reg(s, 13);
5899f386
FB
11363 if (insn & (1 << 8))
11364 offset = 4;
99c475ab 11365 else
5899f386
FB
11366 offset = 0;
11367 for (i = 0; i < 8; i++) {
11368 if (insn & (1 << i))
11369 offset += 4;
11370 }
11371 if ((insn & (1 << 11)) == 0) {
b0109805 11372 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11373 }
99c475ab
FB
11374 for (i = 0; i < 8; i++) {
11375 if (insn & (1 << i)) {
11376 if (insn & (1 << 11)) {
11377 /* pop */
c40c8556 11378 tmp = tcg_temp_new_i32();
12dcc321 11379 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11380 store_reg(s, i, tmp);
99c475ab
FB
11381 } else {
11382 /* push */
b0109805 11383 tmp = load_reg(s, i);
12dcc321 11384 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11385 tcg_temp_free_i32(tmp);
99c475ab 11386 }
5899f386 11387 /* advance to the next address. */
b0109805 11388 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11389 }
11390 }
39d5492a 11391 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11392 if (insn & (1 << 8)) {
11393 if (insn & (1 << 11)) {
11394 /* pop pc */
c40c8556 11395 tmp = tcg_temp_new_i32();
12dcc321 11396 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11397 /* don't set the pc until the rest of the instruction
11398 has completed */
11399 } else {
11400 /* push lr */
b0109805 11401 tmp = load_reg(s, 14);
12dcc321 11402 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11403 tcg_temp_free_i32(tmp);
99c475ab 11404 }
b0109805 11405 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11406 }
5899f386 11407 if ((insn & (1 << 11)) == 0) {
b0109805 11408 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11409 }
99c475ab 11410 /* write back the new stack pointer */
b0109805 11411 store_reg(s, 13, addr);
99c475ab 11412 /* set the new PC value */
be5e7a76 11413 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11414 store_reg_from_load(s, 15, tmp);
be5e7a76 11415 }
99c475ab
FB
11416 break;
11417
9ee6e8bb
PB
11418 case 1: case 3: case 9: case 11: /* czb */
11419 rm = insn & 7;
d9ba4830 11420 tmp = load_reg(s, rm);
9ee6e8bb
PB
11421 s->condlabel = gen_new_label();
11422 s->condjmp = 1;
11423 if (insn & (1 << 11))
cb63669a 11424 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11425 else
cb63669a 11426 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11427 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11428 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11429 val = (uint32_t)s->pc + 2;
11430 val += offset;
11431 gen_jmp(s, val);
11432 break;
11433
11434 case 15: /* IT, nop-hint. */
11435 if ((insn & 0xf) == 0) {
11436 gen_nop_hint(s, (insn >> 4) & 0xf);
11437 break;
11438 }
11439 /* If Then. */
11440 s->condexec_cond = (insn >> 4) & 0xe;
11441 s->condexec_mask = insn & 0x1f;
11442 /* No actual code generated for this insn, just setup state. */
11443 break;
11444
06c949e6 11445 case 0xe: /* bkpt */
d4a2dc67
PM
11446 {
11447 int imm8 = extract32(insn, 0, 8);
be5e7a76 11448 ARCH(5);
73710361
GB
11449 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11450 default_exception_el(s));
06c949e6 11451 break;
d4a2dc67 11452 }
06c949e6 11453
9ee6e8bb
PB
11454 case 0xa: /* rev */
11455 ARCH(6);
11456 rn = (insn >> 3) & 0x7;
11457 rd = insn & 0x7;
b0109805 11458 tmp = load_reg(s, rn);
9ee6e8bb 11459 switch ((insn >> 6) & 3) {
66896cb8 11460 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11461 case 1: gen_rev16(tmp); break;
11462 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
11463 default: goto illegal_op;
11464 }
b0109805 11465 store_reg(s, rd, tmp);
9ee6e8bb
PB
11466 break;
11467
d9e028c1
PM
11468 case 6:
11469 switch ((insn >> 5) & 7) {
11470 case 2:
11471 /* setend */
11472 ARCH(6);
9886ecdf
PB
11473 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11474 gen_helper_setend(cpu_env);
11475 s->is_jmp = DISAS_UPDATE;
d9e028c1 11476 }
9ee6e8bb 11477 break;
d9e028c1
PM
11478 case 3:
11479 /* cps */
11480 ARCH(6);
11481 if (IS_USER(s)) {
11482 break;
8984bd2e 11483 }
b53d8923 11484 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11485 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11486 /* FAULTMASK */
11487 if (insn & 1) {
11488 addr = tcg_const_i32(19);
11489 gen_helper_v7m_msr(cpu_env, addr, tmp);
11490 tcg_temp_free_i32(addr);
11491 }
11492 /* PRIMASK */
11493 if (insn & 2) {
11494 addr = tcg_const_i32(16);
11495 gen_helper_v7m_msr(cpu_env, addr, tmp);
11496 tcg_temp_free_i32(addr);
11497 }
11498 tcg_temp_free_i32(tmp);
11499 gen_lookup_tb(s);
11500 } else {
11501 if (insn & (1 << 4)) {
11502 shift = CPSR_A | CPSR_I | CPSR_F;
11503 } else {
11504 shift = 0;
11505 }
11506 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11507 }
d9e028c1
PM
11508 break;
11509 default:
11510 goto undef;
9ee6e8bb
PB
11511 }
11512 break;
11513
99c475ab
FB
11514 default:
11515 goto undef;
11516 }
11517 break;
11518
11519 case 12:
a7d3970d 11520 {
99c475ab 11521 /* load/store multiple */
39d5492a
PM
11522 TCGv_i32 loaded_var;
11523 TCGV_UNUSED_I32(loaded_var);
99c475ab 11524 rn = (insn >> 8) & 0x7;
b0109805 11525 addr = load_reg(s, rn);
99c475ab
FB
11526 for (i = 0; i < 8; i++) {
11527 if (insn & (1 << i)) {
99c475ab
FB
11528 if (insn & (1 << 11)) {
11529 /* load */
c40c8556 11530 tmp = tcg_temp_new_i32();
12dcc321 11531 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11532 if (i == rn) {
11533 loaded_var = tmp;
11534 } else {
11535 store_reg(s, i, tmp);
11536 }
99c475ab
FB
11537 } else {
11538 /* store */
b0109805 11539 tmp = load_reg(s, i);
12dcc321 11540 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11541 tcg_temp_free_i32(tmp);
99c475ab 11542 }
5899f386 11543 /* advance to the next address */
b0109805 11544 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11545 }
11546 }
b0109805 11547 if ((insn & (1 << rn)) == 0) {
a7d3970d 11548 /* base reg not in list: base register writeback */
b0109805
PB
11549 store_reg(s, rn, addr);
11550 } else {
a7d3970d
PM
11551 /* base reg in list: if load, complete it now */
11552 if (insn & (1 << 11)) {
11553 store_reg(s, rn, loaded_var);
11554 }
7d1b0095 11555 tcg_temp_free_i32(addr);
b0109805 11556 }
99c475ab 11557 break;
a7d3970d 11558 }
99c475ab
FB
11559 case 13:
11560 /* conditional branch or swi */
11561 cond = (insn >> 8) & 0xf;
11562 if (cond == 0xe)
11563 goto undef;
11564
11565 if (cond == 0xf) {
11566 /* swi */
eaed129d 11567 gen_set_pc_im(s, s->pc);
d4a2dc67 11568 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11569 s->is_jmp = DISAS_SWI;
99c475ab
FB
11570 break;
11571 }
11572 /* generate a conditional jump to next instruction */
e50e6a20 11573 s->condlabel = gen_new_label();
39fb730a 11574 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11575 s->condjmp = 1;
99c475ab
FB
11576
11577 /* jump to the offset */
5899f386 11578 val = (uint32_t)s->pc + 2;
99c475ab 11579 offset = ((int32_t)insn << 24) >> 24;
5899f386 11580 val += offset << 1;
8aaca4c0 11581 gen_jmp(s, val);
99c475ab
FB
11582 break;
11583
11584 case 14:
358bf29e 11585 if (insn & (1 << 11)) {
9ee6e8bb
PB
11586 if (disas_thumb2_insn(env, s, insn))
11587 goto undef32;
358bf29e
PB
11588 break;
11589 }
9ee6e8bb 11590 /* unconditional branch */
99c475ab
FB
11591 val = (uint32_t)s->pc;
11592 offset = ((int32_t)insn << 21) >> 21;
11593 val += (offset << 1) + 2;
8aaca4c0 11594 gen_jmp(s, val);
99c475ab
FB
11595 break;
11596
11597 case 15:
9ee6e8bb 11598 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11599 goto undef32;
9ee6e8bb 11600 break;
99c475ab
FB
11601 }
11602 return;
9ee6e8bb 11603undef32:
73710361
GB
11604 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11605 default_exception_el(s));
9ee6e8bb
PB
11606 return;
11607illegal_op:
99c475ab 11608undef:
73710361
GB
11609 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11610 default_exception_el(s));
99c475ab
FB
11611}
11612
541ebcd4
PM
11613static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11614{
11615 /* Return true if the insn at dc->pc might cross a page boundary.
11616 * (False positives are OK, false negatives are not.)
11617 */
11618 uint16_t insn;
11619
11620 if ((s->pc & 3) == 0) {
11621 /* At a 4-aligned address we can't be crossing a page */
11622 return false;
11623 }
11624
11625 /* This must be a Thumb insn */
f9fd40eb 11626 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11627
11628 if ((insn >> 11) >= 0x1d) {
11629 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11630 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11631 * end up actually treating this as two 16-bit insns (see the
11632 * code at the start of disas_thumb2_insn()) but we don't bother
11633 * to check for that as it is unlikely, and false positives here
11634 * are harmless.
11635 */
11636 return true;
11637 }
11638 /* Definitely a 16-bit insn, can't be crossing a page. */
11639 return false;
11640}
11641
20157705 11642/* generate intermediate code for basic block 'tb'. */
4e5e1215 11643void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11644{
4e5e1215 11645 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11646 CPUState *cs = CPU(cpu);
2c0262af 11647 DisasContext dc1, *dc = &dc1;
0fa85d43 11648 target_ulong pc_start;
0a2461fa 11649 target_ulong next_page_start;
2e70f6ef
PB
11650 int num_insns;
11651 int max_insns;
541ebcd4 11652 bool end_of_page;
3b46e624 11653
2c0262af 11654 /* generate intermediate code */
40f860cd
PM
11655
11656 /* The A64 decoder has its own top level loop, because it doesn't need
11657 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11658 */
11659 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11660 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11661 return;
11662 }
11663
0fa85d43 11664 pc_start = tb->pc;
3b46e624 11665
2c0262af
FB
11666 dc->tb = tb;
11667
2c0262af
FB
11668 dc->is_jmp = DISAS_NEXT;
11669 dc->pc = pc_start;
ed2803da 11670 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11671 dc->condjmp = 0;
3926cc84 11672
40f860cd 11673 dc->aarch64 = 0;
cef9ee70
SS
11674 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11675 * there is no secure EL1, so we route exceptions to EL3.
11676 */
11677 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11678 !arm_el_is_aa64(env, 3);
40f860cd 11679 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11680 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11681 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11682 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11683 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11684 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11685 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11686#if !defined(CONFIG_USER_ONLY)
c1e37810 11687 dc->user = (dc->current_el == 0);
3926cc84 11688#endif
3f342b9e 11689 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11690 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11691 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11692 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11693 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11694 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11695 dc->cp_regs = cpu->cp_regs;
a984e42c 11696 dc->features = env->features;
40f860cd 11697
50225ad0
PM
11698 /* Single step state. The code-generation logic here is:
11699 * SS_ACTIVE == 0:
11700 * generate code with no special handling for single-stepping (except
11701 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11702 * this happens anyway because those changes are all system register or
11703 * PSTATE writes).
11704 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11705 * emit code for one insn
11706 * emit code to clear PSTATE.SS
11707 * emit code to generate software step exception for completed step
11708 * end TB (as usual for having generated an exception)
11709 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11710 * emit code to generate a software step exception
11711 * end the TB
11712 */
11713 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11714 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11715 dc->is_ldex = false;
11716 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11717
a7812ae4
PB
11718 cpu_F0s = tcg_temp_new_i32();
11719 cpu_F1s = tcg_temp_new_i32();
11720 cpu_F0d = tcg_temp_new_i64();
11721 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11722 cpu_V0 = cpu_F0d;
11723 cpu_V1 = cpu_F1d;
e677137d 11724 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11725 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11726 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11727 num_insns = 0;
11728 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11729 if (max_insns == 0) {
2e70f6ef 11730 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11731 }
11732 if (max_insns > TCG_MAX_INSNS) {
11733 max_insns = TCG_MAX_INSNS;
11734 }
2e70f6ef 11735
cd42d5b2 11736 gen_tb_start(tb);
e12ce78d 11737
3849902c
PM
11738 tcg_clear_temp_count();
11739
e12ce78d
PM
11740 /* A note on handling of the condexec (IT) bits:
11741 *
11742 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11743 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11744 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11745 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11746 * to do it at the end of the block. (For example if we don't do this
11747 * it's hard to identify whether we can safely skip writing condexec
11748 * at the end of the TB, which we definitely want to do for the case
11749 * where a TB doesn't do anything with the IT state at all.)
11750 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11751 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11752 * This is done both for leaving the TB at the end, and for leaving
11753 * it because of an exception we know will happen, which is done in
11754 * gen_exception_insn(). The latter is necessary because we need to
11755 * leave the TB with the PC/IT state just prior to execution of the
11756 * instruction which caused the exception.
11757 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11758 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11759 * This is handled in the same way as restoration of the
4e5e1215
RH
11760 * PC in these situations; we save the value of the condexec bits
11761 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11762 * then uses this to restore them after an exception.
e12ce78d
PM
11763 *
11764 * Note that there are no instructions which can read the condexec
11765 * bits, and none which can write non-static values to them, so
0ecb72a5 11766 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11767 * middle of a TB.
11768 */
11769
9ee6e8bb
PB
11770 /* Reset the conditional execution bits immediately. This avoids
11771 complications trying to do it at the end of the block. */
98eac7ca 11772 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11773 {
39d5492a 11774 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11775 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11776 store_cpu_field(tmp, condexec_bits);
8f01245e 11777 }
2c0262af 11778 do {
52e971d9 11779 tcg_gen_insn_start(dc->pc,
aaa1f954
EI
11780 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11781 0);
b933066a
RH
11782 num_insns++;
11783
fbb4a2e3
PB
11784#ifdef CONFIG_USER_ONLY
11785 /* Intercept jump to the magic kernel page. */
40f860cd 11786 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11787 /* We always get here via a jump, so know we are not in a
11788 conditional execution block. */
d4a2dc67 11789 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11790 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11791 break;
11792 }
11793#else
b53d8923 11794 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11795 /* We always get here via a jump, so know we are not in a
11796 conditional execution block. */
d4a2dc67 11797 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11798 dc->is_jmp = DISAS_EXC;
d60bb01c 11799 break;
9ee6e8bb
PB
11800 }
11801#endif
11802
f0c3c505 11803 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11804 CPUBreakpoint *bp;
f0c3c505 11805 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11806 if (bp->pc == dc->pc) {
5d98bf8f 11807 if (bp->flags & BP_CPU) {
ce8a1b54 11808 gen_set_condexec(dc);
ed6c6448 11809 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11810 gen_helper_check_breakpoints(cpu_env);
11811 /* End the TB early; it's likely not going to be executed */
11812 dc->is_jmp = DISAS_UPDATE;
11813 } else {
11814 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11815 /* The address covered by the breakpoint must be
11816 included in [tb->pc, tb->pc + tb->size) in order
11817 to for it to be properly cleared -- thus we
11818 increment the PC here so that the logic setting
11819 tb->size below does the right thing. */
5d98bf8f
SF
11820 /* TODO: Advance PC by correct instruction length to
11821 * avoid disassembler error messages */
11822 dc->pc += 2;
11823 goto done_generating;
11824 }
11825 break;
1fddef4b
FB
11826 }
11827 }
11828 }
e50e6a20 11829
959082fc 11830 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11831 gen_io_start();
959082fc 11832 }
2e70f6ef 11833
50225ad0
PM
11834 if (dc->ss_active && !dc->pstate_ss) {
11835 /* Singlestep state is Active-pending.
11836 * If we're in this state at the start of a TB then either
11837 * a) we just took an exception to an EL which is being debugged
11838 * and this is the first insn in the exception handler
11839 * b) debug exceptions were masked and we just unmasked them
11840 * without changing EL (eg by clearing PSTATE.D)
11841 * In either case we're going to take a swstep exception in the
11842 * "did not step an insn" case, and so the syndrome ISV and EX
11843 * bits should be zero.
11844 */
959082fc 11845 assert(num_insns == 1);
73710361
GB
11846 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11847 default_exception_el(dc));
50225ad0
PM
11848 goto done_generating;
11849 }
11850
40f860cd 11851 if (dc->thumb) {
9ee6e8bb
PB
11852 disas_thumb_insn(env, dc);
11853 if (dc->condexec_mask) {
11854 dc->condexec_cond = (dc->condexec_cond & 0xe)
11855 | ((dc->condexec_mask >> 4) & 1);
11856 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11857 if (dc->condexec_mask == 0) {
11858 dc->condexec_cond = 0;
11859 }
11860 }
11861 } else {
f9fd40eb 11862 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11863 dc->pc += 4;
11864 disas_arm_insn(dc, insn);
9ee6e8bb 11865 }
e50e6a20
FB
11866
11867 if (dc->condjmp && !dc->is_jmp) {
11868 gen_set_label(dc->condlabel);
11869 dc->condjmp = 0;
11870 }
3849902c
PM
11871
11872 if (tcg_check_temp_count()) {
0a2461fa
AG
11873 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11874 dc->pc);
3849902c
PM
11875 }
11876
aaf2d97d 11877 /* Translation stops when a conditional branch is encountered.
e50e6a20 11878 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11879 * Also stop translation when a page boundary is reached. This
bf20dc07 11880 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11881
11882 /* We want to stop the TB if the next insn starts in a new page,
11883 * or if it spans between this page and the next. This means that
11884 * if we're looking at the last halfword in the page we need to
11885 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11886 * or a 32-bit Thumb insn (which won't).
11887 * This is to avoid generating a silly TB with a single 16-bit insn
11888 * in it at the end of this page (which would execute correctly
11889 * but isn't very efficient).
11890 */
11891 end_of_page = (dc->pc >= next_page_start) ||
11892 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11893
fe700adb 11894 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11895 !cs->singlestep_enabled &&
1b530a6d 11896 !singlestep &&
50225ad0 11897 !dc->ss_active &&
541ebcd4 11898 !end_of_page &&
2e70f6ef
PB
11899 num_insns < max_insns);
11900
11901 if (tb->cflags & CF_LAST_IO) {
11902 if (dc->condjmp) {
11903 /* FIXME: This can theoretically happen with self-modifying
11904 code. */
a47dddd7 11905 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11906 }
11907 gen_io_end();
11908 }
9ee6e8bb 11909
b5ff1b31 11910 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11911 instruction was a conditional branch or trap, and the PC has
11912 already been written. */
50225ad0 11913 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11914 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11915 gen_set_condexec(dc);
7999a5c8
SF
11916 switch (dc->is_jmp) {
11917 case DISAS_SWI:
50225ad0 11918 gen_ss_advance(dc);
73710361
GB
11919 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11920 default_exception_el(dc));
7999a5c8
SF
11921 break;
11922 case DISAS_HVC:
37e6456e 11923 gen_ss_advance(dc);
73710361 11924 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11925 break;
11926 case DISAS_SMC:
37e6456e 11927 gen_ss_advance(dc);
73710361 11928 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11929 break;
11930 case DISAS_NEXT:
11931 case DISAS_UPDATE:
11932 gen_set_pc_im(dc, dc->pc);
11933 /* fall through */
11934 default:
11935 if (dc->ss_active) {
11936 gen_step_complete_exception(dc);
11937 } else {
11938 /* FIXME: Single stepping a WFI insn will not halt
11939 the CPU. */
11940 gen_exception_internal(EXCP_DEBUG);
11941 }
11942 }
11943 if (dc->condjmp) {
11944 /* "Condition failed" instruction codepath. */
11945 gen_set_label(dc->condlabel);
11946 gen_set_condexec(dc);
11947 gen_set_pc_im(dc, dc->pc);
11948 if (dc->ss_active) {
11949 gen_step_complete_exception(dc);
11950 } else {
11951 gen_exception_internal(EXCP_DEBUG);
11952 }
9ee6e8bb 11953 }
8aaca4c0 11954 } else {
9ee6e8bb
PB
11955 /* While branches must always occur at the end of an IT block,
11956 there are a few other things that can cause us to terminate
65626741 11957 the TB in the middle of an IT block:
9ee6e8bb
PB
11958 - Exception generating instructions (bkpt, swi, undefined).
11959 - Page boundaries.
11960 - Hardware watchpoints.
11961 Hardware breakpoints have already been handled and skip this code.
11962 */
11963 gen_set_condexec(dc);
8aaca4c0 11964 switch(dc->is_jmp) {
8aaca4c0 11965 case DISAS_NEXT:
6e256c93 11966 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11967 break;
8aaca4c0 11968 case DISAS_UPDATE:
577bf808
SF
11969 gen_set_pc_im(dc, dc->pc);
11970 /* fall through */
11971 case DISAS_JUMP:
11972 default:
8aaca4c0 11973 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11974 tcg_gen_exit_tb(0);
8aaca4c0
FB
11975 break;
11976 case DISAS_TB_JUMP:
11977 /* nothing more to generate */
11978 break;
9ee6e8bb 11979 case DISAS_WFI:
1ce94f81 11980 gen_helper_wfi(cpu_env);
84549b6d
PM
11981 /* The helper doesn't necessarily throw an exception, but we
11982 * must go back to the main loop to check for interrupts anyway.
11983 */
11984 tcg_gen_exit_tb(0);
9ee6e8bb 11985 break;
72c1d3af
PM
11986 case DISAS_WFE:
11987 gen_helper_wfe(cpu_env);
11988 break;
c87e5a61
PM
11989 case DISAS_YIELD:
11990 gen_helper_yield(cpu_env);
11991 break;
9ee6e8bb 11992 case DISAS_SWI:
73710361
GB
11993 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11994 default_exception_el(dc));
9ee6e8bb 11995 break;
37e6456e 11996 case DISAS_HVC:
73710361 11997 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11998 break;
11999 case DISAS_SMC:
73710361 12000 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12001 break;
8aaca4c0 12002 }
e50e6a20
FB
12003 if (dc->condjmp) {
12004 gen_set_label(dc->condlabel);
9ee6e8bb 12005 gen_set_condexec(dc);
6e256c93 12006 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
12007 dc->condjmp = 0;
12008 }
2c0262af 12009 }
2e70f6ef 12010
9ee6e8bb 12011done_generating:
806f352d 12012 gen_tb_end(tb, num_insns);
2c0262af
FB
12013
12014#ifdef DEBUG_DISAS
06486077
AB
12015 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12016 qemu_log_in_addr_range(pc_start)) {
93fcfe39
AL
12017 qemu_log("----------------\n");
12018 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 12019 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 12020 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12021 qemu_log("\n");
2c0262af
FB
12022 }
12023#endif
4e5e1215
RH
12024 tb->size = dc->pc - pc_start;
12025 tb->icount = num_insns;
2c0262af
FB
12026}
12027
b5ff1b31 12028static const char *cpu_mode_names[16] = {
28c9457d
EI
12029 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12030 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12031};
9ee6e8bb 12032
878096ee
AF
12033void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12034 int flags)
2c0262af 12035{
878096ee
AF
12036 ARMCPU *cpu = ARM_CPU(cs);
12037 CPUARMState *env = &cpu->env;
2c0262af 12038 int i;
b5ff1b31 12039 uint32_t psr;
06e5cf7a 12040 const char *ns_status;
2c0262af 12041
17731115
PM
12042 if (is_a64(env)) {
12043 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12044 return;
12045 }
12046
2c0262af 12047 for(i=0;i<16;i++) {
7fe48483 12048 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12049 if ((i % 4) == 3)
7fe48483 12050 cpu_fprintf(f, "\n");
2c0262af 12051 else
7fe48483 12052 cpu_fprintf(f, " ");
2c0262af 12053 }
b5ff1b31 12054 psr = cpsr_read(env);
06e5cf7a
PM
12055
12056 if (arm_feature(env, ARM_FEATURE_EL3) &&
12057 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12058 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12059 } else {
12060 ns_status = "";
12061 }
12062
12063 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12064 psr,
b5ff1b31
FB
12065 psr & (1 << 31) ? 'N' : '-',
12066 psr & (1 << 30) ? 'Z' : '-',
12067 psr & (1 << 29) ? 'C' : '-',
12068 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12069 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12070 ns_status,
b5ff1b31 12071 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12072
f2617cfc
PM
12073 if (flags & CPU_DUMP_FPU) {
12074 int numvfpregs = 0;
12075 if (arm_feature(env, ARM_FEATURE_VFP)) {
12076 numvfpregs += 16;
12077 }
12078 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12079 numvfpregs += 16;
12080 }
12081 for (i = 0; i < numvfpregs; i++) {
12082 uint64_t v = float64_val(env->vfp.regs[i]);
12083 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12084 i * 2, (uint32_t)v,
12085 i * 2 + 1, (uint32_t)(v >> 32),
12086 i, v);
12087 }
12088 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12089 }
2c0262af 12090}
a6b025d3 12091
bad729e2
RH
12092void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12093 target_ulong *data)
d2856f1a 12094{
3926cc84 12095 if (is_a64(env)) {
bad729e2 12096 env->pc = data[0];
40f860cd 12097 env->condexec_bits = 0;
aaa1f954 12098 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12099 } else {
bad729e2
RH
12100 env->regs[15] = data[0];
12101 env->condexec_bits = data[1];
aaa1f954 12102 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12103 }
d2856f1a 12104}