]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
qemu-log: dfilter-ise exec, out_asm, op and opt_op
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
57fec1fe 26#include "tcg-op.h"
1de7afc9 27#include "qemu/log.h"
534df156 28#include "qemu/bitops.h"
1d854765 29#include "arm_ldst.h"
1497c961 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
2c0262af 33
a7e30d84 34#include "trace-tcg.h"
508127e2 35#include "exec/log.h"
a7e30d84
LV
36
37
2b51668f
PM
38#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 40/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 41#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 42#define ENABLE_ARCH_5J 0
2b51668f
PM
43#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
47#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d 52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
1bcea73e 59TCGv_env cpu_env;
ad69471c 60/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 61static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 62static TCGv_i32 cpu_R[16];
78bcaa3e
RH
63TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
64TCGv_i64 cpu_exclusive_addr;
65TCGv_i64 cpu_exclusive_val;
426f5abc 66#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
67TCGv_i64 cpu_exclusive_test;
68TCGv_i32 cpu_exclusive_info;
426f5abc 69#endif
ad69471c 70
b26eefb6 71/* FIXME: These should be removed. */
39d5492a 72static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 73static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 74
022c62cb 75#include "exec/gen-icount.h"
2e70f6ef 76
155c3eac
FN
77static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
a7812ae4
PB
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 102#ifdef CONFIG_USER_ONLY
e1ccc054 103 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
e1ccc054 105 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 107#endif
155c3eac 108
14ade10f 109 a64_translate_init();
b26eefb6
PB
110}
111
579d21cc
PM
112static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
113{
114 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
115 * insns:
116 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
117 * otherwise, access as if at PL0.
118 */
119 switch (s->mmu_idx) {
120 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
121 case ARMMMUIdx_S12NSE0:
122 case ARMMMUIdx_S12NSE1:
123 return ARMMMUIdx_S12NSE0;
124 case ARMMMUIdx_S1E3:
125 case ARMMMUIdx_S1SE0:
126 case ARMMMUIdx_S1SE1:
127 return ARMMMUIdx_S1SE0;
128 case ARMMMUIdx_S2NS:
129 default:
130 g_assert_not_reached();
131 }
132}
133
39d5492a 134static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 135{
39d5492a 136 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
0ecb72a5 141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 142
39d5492a 143static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
0ecb72a5 150 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 151
b26eefb6 152/* Set a variable to the value of a CPU register. */
39d5492a 153static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
154{
155 if (reg == 15) {
156 uint32_t addr;
b90372ad 157 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 169static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 170{
39d5492a 171 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
39d5492a 178static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
39d5492a 198static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 199{
39d5492a 200 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 201 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
d4a2dc67 207static void gen_exception_internal(int excp)
d9ba4830 208{
d4a2dc67
PM
209 TCGv_i32 tcg_excp = tcg_const_i32(excp);
210
211 assert(excp_is_internal(excp));
212 gen_helper_exception_internal(cpu_env, tcg_excp);
213 tcg_temp_free_i32(tcg_excp);
214}
215
73710361 216static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
217{
218 TCGv_i32 tcg_excp = tcg_const_i32(excp);
219 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 220 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 221
73710361
GB
222 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
223 tcg_syn, tcg_el);
224
225 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
228}
229
50225ad0
PM
230static void gen_ss_advance(DisasContext *s)
231{
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
234 */
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
238 }
239}
240
241static void gen_step_complete_exception(DisasContext *s)
242{
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
251 */
252 gen_ss_advance(s);
73710361
GB
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
254 default_exception_el(s));
50225ad0
PM
255 s->is_jmp = DISAS_EXC;
256}
257
39d5492a 258static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 259{
39d5492a
PM
260 TCGv_i32 tmp1 = tcg_temp_new_i32();
261 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
262 tcg_gen_ext16s_i32(tmp1, a);
263 tcg_gen_ext16s_i32(tmp2, b);
3670669c 264 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 265 tcg_temp_free_i32(tmp2);
3670669c
PB
266 tcg_gen_sari_i32(a, a, 16);
267 tcg_gen_sari_i32(b, b, 16);
268 tcg_gen_mul_i32(b, b, a);
269 tcg_gen_mov_i32(a, tmp1);
7d1b0095 270 tcg_temp_free_i32(tmp1);
3670669c
PB
271}
272
273/* Byteswap each halfword. */
39d5492a 274static void gen_rev16(TCGv_i32 var)
3670669c 275{
39d5492a 276 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_andi_i32(var, var, 0xff00ff00);
281 tcg_gen_or_i32(var, var, tmp);
7d1b0095 282 tcg_temp_free_i32(tmp);
3670669c
PB
283}
284
285/* Byteswap low halfword and sign extend. */
39d5492a 286static void gen_revsh(TCGv_i32 var)
3670669c 287{
1a855029
AJ
288 tcg_gen_ext16u_i32(var, var);
289 tcg_gen_bswap16_i32(var, var);
290 tcg_gen_ext16s_i32(var, var);
3670669c
PB
291}
292
293/* Unsigned bitfield extract. */
39d5492a 294static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
295{
296 if (shift)
297 tcg_gen_shri_i32(var, var, shift);
298 tcg_gen_andi_i32(var, var, mask);
299}
300
301/* Signed bitfield extract. */
39d5492a 302static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
303{
304 uint32_t signbit;
305
306 if (shift)
307 tcg_gen_sari_i32(var, var, shift);
308 if (shift + width < 32) {
309 signbit = 1u << (width - 1);
310 tcg_gen_andi_i32(var, var, (1u << width) - 1);
311 tcg_gen_xori_i32(var, var, signbit);
312 tcg_gen_subi_i32(var, var, signbit);
313 }
314}
315
838fa72d 316/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 317static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 318{
838fa72d
AJ
319 TCGv_i64 tmp64 = tcg_temp_new_i64();
320
321 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 322 tcg_temp_free_i32(b);
838fa72d
AJ
323 tcg_gen_shli_i64(tmp64, tmp64, 32);
324 tcg_gen_add_i64(a, tmp64, a);
325
326 tcg_temp_free_i64(tmp64);
327 return a;
328}
329
330/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 331static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
332{
333 TCGv_i64 tmp64 = tcg_temp_new_i64();
334
335 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 336 tcg_temp_free_i32(b);
838fa72d
AJ
337 tcg_gen_shli_i64(tmp64, tmp64, 32);
338 tcg_gen_sub_i64(a, tmp64, a);
339
340 tcg_temp_free_i64(tmp64);
341 return a;
3670669c
PB
342}
343
5e3f878a 344/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 345static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 346{
39d5492a
PM
347 TCGv_i32 lo = tcg_temp_new_i32();
348 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 349 TCGv_i64 ret;
5e3f878a 350
831d7fe8 351 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 352 tcg_temp_free_i32(a);
7d1b0095 353 tcg_temp_free_i32(b);
831d7fe8
RH
354
355 ret = tcg_temp_new_i64();
356 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
357 tcg_temp_free_i32(lo);
358 tcg_temp_free_i32(hi);
831d7fe8
RH
359
360 return ret;
5e3f878a
PB
361}
362
39d5492a 363static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 364{
39d5492a
PM
365 TCGv_i32 lo = tcg_temp_new_i32();
366 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 367 TCGv_i64 ret;
5e3f878a 368
831d7fe8 369 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 370 tcg_temp_free_i32(a);
7d1b0095 371 tcg_temp_free_i32(b);
831d7fe8
RH
372
373 ret = tcg_temp_new_i64();
374 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
375 tcg_temp_free_i32(lo);
376 tcg_temp_free_i32(hi);
831d7fe8
RH
377
378 return ret;
5e3f878a
PB
379}
380
8f01245e 381/* Swap low and high halfwords. */
39d5492a 382static void gen_swap_half(TCGv_i32 var)
8f01245e 383{
39d5492a 384 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
7d1b0095 388 tcg_temp_free_i32(tmp);
8f01245e
PB
389}
390
b26eefb6
PB
391/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
396 */
397
39d5492a 398static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 399{
39d5492a 400 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
407 tcg_temp_free_i32(tmp);
408 tcg_temp_free_i32(t1);
b26eefb6
PB
409}
410
411/* Set CF to the top bit of var. */
39d5492a 412static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 413{
66c374de 414 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
415}
416
417/* Set N and Z flags from var. */
39d5492a 418static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 419{
66c374de
AJ
420 tcg_gen_mov_i32(cpu_NF, var);
421 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
422}
423
424/* T0 += T1 + CF. */
39d5492a 425static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 426{
396e467c 427 tcg_gen_add_i32(t0, t0, t1);
66c374de 428 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
429}
430
e9bb4aa9 431/* dest = T0 + T1 + CF. */
39d5492a 432static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 433{
e9bb4aa9 434 tcg_gen_add_i32(dest, t0, t1);
66c374de 435 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
436}
437
3670669c 438/* dest = T0 - T1 + CF - 1. */
39d5492a 439static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 440{
3670669c 441 tcg_gen_sub_i32(dest, t0, t1);
66c374de 442 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 443 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
444}
445
72485ec4 446/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 447static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 448{
39d5492a 449 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
450 tcg_gen_movi_i32(tmp, 0);
451 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 452 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 453 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
454 tcg_gen_xor_i32(tmp, t0, t1);
455 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
456 tcg_temp_free_i32(tmp);
457 tcg_gen_mov_i32(dest, cpu_NF);
458}
459
49b4c31e 460/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 461static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 462{
39d5492a 463 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
464 if (TCG_TARGET_HAS_add2_i32) {
465 tcg_gen_movi_i32(tmp, 0);
466 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 467 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
468 } else {
469 TCGv_i64 q0 = tcg_temp_new_i64();
470 TCGv_i64 q1 = tcg_temp_new_i64();
471 tcg_gen_extu_i32_i64(q0, t0);
472 tcg_gen_extu_i32_i64(q1, t1);
473 tcg_gen_add_i64(q0, q0, q1);
474 tcg_gen_extu_i32_i64(q1, cpu_CF);
475 tcg_gen_add_i64(q0, q0, q1);
476 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
477 tcg_temp_free_i64(q0);
478 tcg_temp_free_i64(q1);
479 }
480 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
481 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
482 tcg_gen_xor_i32(tmp, t0, t1);
483 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
484 tcg_temp_free_i32(tmp);
485 tcg_gen_mov_i32(dest, cpu_NF);
486}
487
72485ec4 488/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 489static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 490{
39d5492a 491 TCGv_i32 tmp;
72485ec4
AJ
492 tcg_gen_sub_i32(cpu_NF, t0, t1);
493 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
494 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
495 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
496 tmp = tcg_temp_new_i32();
497 tcg_gen_xor_i32(tmp, t0, t1);
498 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
499 tcg_temp_free_i32(tmp);
500 tcg_gen_mov_i32(dest, cpu_NF);
501}
502
e77f0832 503/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 504static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 505{
39d5492a 506 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
507 tcg_gen_not_i32(tmp, t1);
508 gen_adc_CC(dest, t0, tmp);
39d5492a 509 tcg_temp_free_i32(tmp);
2de68a49
RH
510}
511
365af80e 512#define GEN_SHIFT(name) \
39d5492a 513static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 514{ \
39d5492a 515 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
516 tmp1 = tcg_temp_new_i32(); \
517 tcg_gen_andi_i32(tmp1, t1, 0xff); \
518 tmp2 = tcg_const_i32(0); \
519 tmp3 = tcg_const_i32(0x1f); \
520 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
521 tcg_temp_free_i32(tmp3); \
522 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
523 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
524 tcg_temp_free_i32(tmp2); \
525 tcg_temp_free_i32(tmp1); \
526}
527GEN_SHIFT(shl)
528GEN_SHIFT(shr)
529#undef GEN_SHIFT
530
39d5492a 531static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 532{
39d5492a 533 TCGv_i32 tmp1, tmp2;
365af80e
AJ
534 tmp1 = tcg_temp_new_i32();
535 tcg_gen_andi_i32(tmp1, t1, 0xff);
536 tmp2 = tcg_const_i32(0x1f);
537 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
538 tcg_temp_free_i32(tmp2);
539 tcg_gen_sar_i32(dest, t0, tmp1);
540 tcg_temp_free_i32(tmp1);
541}
542
39d5492a 543static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 544{
39d5492a
PM
545 TCGv_i32 c0 = tcg_const_i32(0);
546 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
547 tcg_gen_neg_i32(tmp, src);
548 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
549 tcg_temp_free_i32(c0);
550 tcg_temp_free_i32(tmp);
551}
ad69471c 552
39d5492a 553static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 554{
9a119ff6 555 if (shift == 0) {
66c374de 556 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 557 } else {
66c374de
AJ
558 tcg_gen_shri_i32(cpu_CF, var, shift);
559 if (shift != 31) {
560 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
561 }
9a119ff6 562 }
9a119ff6 563}
b26eefb6 564
9a119ff6 565/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
566static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
567 int shift, int flags)
9a119ff6
PB
568{
569 switch (shiftop) {
570 case 0: /* LSL */
571 if (shift != 0) {
572 if (flags)
573 shifter_out_im(var, 32 - shift);
574 tcg_gen_shli_i32(var, var, shift);
575 }
576 break;
577 case 1: /* LSR */
578 if (shift == 0) {
579 if (flags) {
66c374de 580 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
581 }
582 tcg_gen_movi_i32(var, 0);
583 } else {
584 if (flags)
585 shifter_out_im(var, shift - 1);
586 tcg_gen_shri_i32(var, var, shift);
587 }
588 break;
589 case 2: /* ASR */
590 if (shift == 0)
591 shift = 32;
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 if (shift == 32)
595 shift = 31;
596 tcg_gen_sari_i32(var, var, shift);
597 break;
598 case 3: /* ROR/RRX */
599 if (shift != 0) {
600 if (flags)
601 shifter_out_im(var, shift - 1);
f669df27 602 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 603 } else {
39d5492a 604 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 605 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
606 if (flags)
607 shifter_out_im(var, 0);
608 tcg_gen_shri_i32(var, var, 1);
b26eefb6 609 tcg_gen_or_i32(var, var, tmp);
7d1b0095 610 tcg_temp_free_i32(tmp);
b26eefb6
PB
611 }
612 }
613};
614
39d5492a
PM
615static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
616 TCGv_i32 shift, int flags)
8984bd2e
PB
617{
618 if (flags) {
619 switch (shiftop) {
9ef39277
BS
620 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
621 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
622 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
623 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
624 }
625 } else {
626 switch (shiftop) {
365af80e
AJ
627 case 0:
628 gen_shl(var, var, shift);
629 break;
630 case 1:
631 gen_shr(var, var, shift);
632 break;
633 case 2:
634 gen_sar(var, var, shift);
635 break;
f669df27
AJ
636 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
637 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
638 }
639 }
7d1b0095 640 tcg_temp_free_i32(shift);
8984bd2e
PB
641}
642
6ddbc6e4
PB
643#define PAS_OP(pfx) \
644 switch (op2) { \
645 case 0: gen_pas_helper(glue(pfx,add16)); break; \
646 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
647 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
648 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
649 case 4: gen_pas_helper(glue(pfx,add8)); break; \
650 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
651 }
39d5492a 652static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 653{
a7812ae4 654 TCGv_ptr tmp;
6ddbc6e4
PB
655
656 switch (op1) {
657#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
658 case 1:
a7812ae4 659 tmp = tcg_temp_new_ptr();
0ecb72a5 660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 661 PAS_OP(s)
b75263d6 662 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
663 break;
664 case 5:
a7812ae4 665 tmp = tcg_temp_new_ptr();
0ecb72a5 666 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 667 PAS_OP(u)
b75263d6 668 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
669 break;
670#undef gen_pas_helper
671#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
672 case 2:
673 PAS_OP(q);
674 break;
675 case 3:
676 PAS_OP(sh);
677 break;
678 case 6:
679 PAS_OP(uq);
680 break;
681 case 7:
682 PAS_OP(uh);
683 break;
684#undef gen_pas_helper
685 }
686}
9ee6e8bb
PB
687#undef PAS_OP
688
6ddbc6e4
PB
689/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
690#define PAS_OP(pfx) \
ed89a2f1 691 switch (op1) { \
6ddbc6e4
PB
692 case 0: gen_pas_helper(glue(pfx,add8)); break; \
693 case 1: gen_pas_helper(glue(pfx,add16)); break; \
694 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
696 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
698 }
39d5492a 699static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 700{
a7812ae4 701 TCGv_ptr tmp;
6ddbc6e4 702
ed89a2f1 703 switch (op2) {
6ddbc6e4
PB
704#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 case 0:
a7812ae4 706 tmp = tcg_temp_new_ptr();
0ecb72a5 707 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 708 PAS_OP(s)
b75263d6 709 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
710 break;
711 case 4:
a7812ae4 712 tmp = tcg_temp_new_ptr();
0ecb72a5 713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 714 PAS_OP(u)
b75263d6 715 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
716 break;
717#undef gen_pas_helper
718#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
719 case 1:
720 PAS_OP(q);
721 break;
722 case 2:
723 PAS_OP(sh);
724 break;
725 case 5:
726 PAS_OP(uq);
727 break;
728 case 6:
729 PAS_OP(uh);
730 break;
731#undef gen_pas_helper
732 }
733}
9ee6e8bb
PB
734#undef PAS_OP
735
39fb730a 736/*
6c2c63d3 737 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
738 * This is common between ARM and Aarch64 targets.
739 */
6c2c63d3 740void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 741{
6c2c63d3
RH
742 TCGv_i32 value;
743 TCGCond cond;
744 bool global = true;
d9ba4830 745
d9ba4830
PB
746 switch (cc) {
747 case 0: /* eq: Z */
d9ba4830 748 case 1: /* ne: !Z */
6c2c63d3
RH
749 cond = TCG_COND_EQ;
750 value = cpu_ZF;
d9ba4830 751 break;
6c2c63d3 752
d9ba4830 753 case 2: /* cs: C */
d9ba4830 754 case 3: /* cc: !C */
6c2c63d3
RH
755 cond = TCG_COND_NE;
756 value = cpu_CF;
d9ba4830 757 break;
6c2c63d3 758
d9ba4830 759 case 4: /* mi: N */
d9ba4830 760 case 5: /* pl: !N */
6c2c63d3
RH
761 cond = TCG_COND_LT;
762 value = cpu_NF;
d9ba4830 763 break;
6c2c63d3 764
d9ba4830 765 case 6: /* vs: V */
d9ba4830 766 case 7: /* vc: !V */
6c2c63d3
RH
767 cond = TCG_COND_LT;
768 value = cpu_VF;
d9ba4830 769 break;
6c2c63d3 770
d9ba4830 771 case 8: /* hi: C && !Z */
6c2c63d3
RH
772 case 9: /* ls: !C || Z -> !(C && !Z) */
773 cond = TCG_COND_NE;
774 value = tcg_temp_new_i32();
775 global = false;
776 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
777 ZF is non-zero for !Z; so AND the two subexpressions. */
778 tcg_gen_neg_i32(value, cpu_CF);
779 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 780 break;
6c2c63d3 781
d9ba4830 782 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 783 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
784 /* Since we're only interested in the sign bit, == 0 is >= 0. */
785 cond = TCG_COND_GE;
786 value = tcg_temp_new_i32();
787 global = false;
788 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 789 break;
6c2c63d3 790
d9ba4830 791 case 12: /* gt: !Z && N == V */
d9ba4830 792 case 13: /* le: Z || N != V */
6c2c63d3
RH
793 cond = TCG_COND_NE;
794 value = tcg_temp_new_i32();
795 global = false;
796 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
797 * the sign bit then AND with ZF to yield the result. */
798 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
799 tcg_gen_sari_i32(value, value, 31);
800 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 801 break;
6c2c63d3 802
9305eac0
RH
803 case 14: /* always */
804 case 15: /* always */
805 /* Use the ALWAYS condition, which will fold early.
806 * It doesn't matter what we use for the value. */
807 cond = TCG_COND_ALWAYS;
808 value = cpu_ZF;
809 goto no_invert;
810
d9ba4830
PB
811 default:
812 fprintf(stderr, "Bad condition code 0x%x\n", cc);
813 abort();
814 }
6c2c63d3
RH
815
816 if (cc & 1) {
817 cond = tcg_invert_cond(cond);
818 }
819
9305eac0 820 no_invert:
6c2c63d3
RH
821 cmp->cond = cond;
822 cmp->value = value;
823 cmp->value_global = global;
824}
825
826void arm_free_cc(DisasCompare *cmp)
827{
828 if (!cmp->value_global) {
829 tcg_temp_free_i32(cmp->value);
830 }
831}
832
833void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
834{
835 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
836}
837
838void arm_gen_test_cc(int cc, TCGLabel *label)
839{
840 DisasCompare cmp;
841 arm_test_cc(&cmp, cc);
842 arm_jump_cc(&cmp, label);
843 arm_free_cc(&cmp);
d9ba4830 844}
2c0262af 845
b1d8e52e 846static const uint8_t table_logic_cc[16] = {
2c0262af
FB
847 1, /* and */
848 1, /* xor */
849 0, /* sub */
850 0, /* rsb */
851 0, /* add */
852 0, /* adc */
853 0, /* sbc */
854 0, /* rsc */
855 1, /* andl */
856 1, /* xorl */
857 0, /* cmp */
858 0, /* cmn */
859 1, /* orr */
860 1, /* mov */
861 1, /* bic */
862 1, /* mvn */
863};
3b46e624 864
d9ba4830
PB
865/* Set PC and Thumb state from an immediate address. */
866static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 867{
39d5492a 868 TCGv_i32 tmp;
99c475ab 869
577bf808 870 s->is_jmp = DISAS_JUMP;
d9ba4830 871 if (s->thumb != (addr & 1)) {
7d1b0095 872 tmp = tcg_temp_new_i32();
d9ba4830 873 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 874 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 875 tcg_temp_free_i32(tmp);
d9ba4830 876 }
155c3eac 877 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
878}
879
880/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 881static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 882{
577bf808 883 s->is_jmp = DISAS_JUMP;
155c3eac
FN
884 tcg_gen_andi_i32(cpu_R[15], var, ~1);
885 tcg_gen_andi_i32(var, var, 1);
886 store_cpu_field(var, thumb);
d9ba4830
PB
887}
888
21aeb343
JR
889/* Variant of store_reg which uses branch&exchange logic when storing
890 to r15 in ARM architecture v7 and above. The source must be a temporary
891 and will be marked as dead. */
7dcc1f89 892static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
893{
894 if (reg == 15 && ENABLE_ARCH_7) {
895 gen_bx(s, var);
896 } else {
897 store_reg(s, reg, var);
898 }
899}
900
be5e7a76
DES
901/* Variant of store_reg which uses branch&exchange logic when storing
902 * to r15 in ARM architecture v5T and above. This is used for storing
903 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
904 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 905static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
906{
907 if (reg == 15 && ENABLE_ARCH_5) {
908 gen_bx(s, var);
909 } else {
910 store_reg(s, reg, var);
911 }
912}
913
e334bd31
PB
914#ifdef CONFIG_USER_ONLY
915#define IS_USER_ONLY 1
916#else
917#define IS_USER_ONLY 0
918#endif
919
08307563
PM
920/* Abstractions of "generate code to do a guest load/store for
921 * AArch32", where a vaddr is always 32 bits (and is zero
922 * extended if we're a 64 bit core) and data is also
923 * 32 bits unless specifically doing a 64 bit access.
924 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 925 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
926 */
927#if TARGET_LONG_BITS == 32
928
e334bd31 929#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
930static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
931 TCGv_i32 addr, int index) \
08307563 932{ \
dacf0a2f 933 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
934 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
935 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
936 TCGv addr_be = tcg_temp_new(); \
937 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
938 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
939 tcg_temp_free(addr_be); \
940 return; \
941 } \
dacf0a2f 942 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
08307563
PM
943}
944
e334bd31 945#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
946static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
947 TCGv_i32 addr, int index) \
08307563 948{ \
dacf0a2f 949 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
950 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
951 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
952 TCGv addr_be = tcg_temp_new(); \
953 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
954 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
955 tcg_temp_free(addr_be); \
956 return; \
957 } \
dacf0a2f 958 tcg_gen_qemu_st_i32(val, addr, index, opc); \
08307563
PM
959}
960
12dcc321
PB
961static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
962 TCGv_i32 addr, int index)
08307563 963{
dacf0a2f
PB
964 TCGMemOp opc = MO_Q | s->be_data;
965 tcg_gen_qemu_ld_i64(val, addr, index, opc);
e334bd31
PB
966 /* Not needed for user-mode BE32, where we use MO_BE instead. */
967 if (!IS_USER_ONLY && s->sctlr_b) {
968 tcg_gen_rotri_i64(val, val, 32);
969 }
08307563
PM
970}
971
12dcc321
PB
972static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
973 TCGv_i32 addr, int index)
08307563 974{
dacf0a2f 975 TCGMemOp opc = MO_Q | s->be_data;
e334bd31
PB
976 /* Not needed for user-mode BE32, where we use MO_BE instead. */
977 if (!IS_USER_ONLY && s->sctlr_b) {
978 TCGv_i64 tmp = tcg_temp_new_i64();
979 tcg_gen_rotri_i64(tmp, val, 32);
980 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
981 tcg_temp_free_i64(tmp);
982 return;
983 }
dacf0a2f 984 tcg_gen_qemu_st_i64(val, addr, index, opc);
08307563
PM
985}
986
987#else
988
e334bd31 989#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
990static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
991 TCGv_i32 addr, int index) \
08307563 992{ \
dacf0a2f 993 TCGMemOp opc = (OPC) | s->be_data; \
08307563 994 TCGv addr64 = tcg_temp_new(); \
08307563 995 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
996 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
997 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
998 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
999 } \
dacf0a2f 1000 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
08307563 1001 tcg_temp_free(addr64); \
08307563
PM
1002}
1003
e334bd31 1004#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
1005static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1006 TCGv_i32 addr, int index) \
08307563 1007{ \
dacf0a2f 1008 TCGMemOp opc = (OPC) | s->be_data; \
08307563 1009 TCGv addr64 = tcg_temp_new(); \
08307563 1010 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
1011 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1012 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1013 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1014 } \
dacf0a2f 1015 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
08307563 1016 tcg_temp_free(addr64); \
08307563
PM
1017}
1018
12dcc321
PB
1019static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1020 TCGv_i32 addr, int index)
08307563 1021{
dacf0a2f 1022 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1023 TCGv addr64 = tcg_temp_new();
1024 tcg_gen_extu_i32_i64(addr64, addr);
dacf0a2f 1025 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
e334bd31
PB
1026
1027 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1028 if (!IS_USER_ONLY && s->sctlr_b) {
1029 tcg_gen_rotri_i64(val, val, 32);
1030 }
08307563
PM
1031 tcg_temp_free(addr64);
1032}
1033
12dcc321
PB
1034static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1035 TCGv_i32 addr, int index)
08307563 1036{
dacf0a2f 1037 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1038 TCGv addr64 = tcg_temp_new();
1039 tcg_gen_extu_i32_i64(addr64, addr);
e334bd31
PB
1040
1041 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1042 if (!IS_USER_ONLY && s->sctlr_b) {
1043 TCGv tmp = tcg_temp_new();
1044 tcg_gen_rotri_i64(tmp, val, 32);
1045 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1046 tcg_temp_free(tmp);
1047 } else {
1048 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1049 }
08307563
PM
1050 tcg_temp_free(addr64);
1051}
1052
1053#endif
1054
e334bd31
PB
1055DO_GEN_LD(8s, MO_SB, 3)
1056DO_GEN_LD(8u, MO_UB, 3)
1057DO_GEN_LD(16s, MO_SW, 2)
1058DO_GEN_LD(16u, MO_UW, 2)
1059DO_GEN_LD(32u, MO_UL, 0)
30901475 1060/* 'a' variants include an alignment check */
e334bd31
PB
1061DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1062DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1063DO_GEN_ST(8, MO_UB, 3)
1064DO_GEN_ST(16, MO_UW, 2)
1065DO_GEN_ST(32, MO_UL, 0)
08307563 1066
eaed129d 1067static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 1068{
40f860cd 1069 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
1070}
1071
37e6456e
PM
1072static inline void gen_hvc(DisasContext *s, int imm16)
1073{
1074 /* The pre HVC helper handles cases when HVC gets trapped
1075 * as an undefined insn by runtime configuration (ie before
1076 * the insn really executes).
1077 */
1078 gen_set_pc_im(s, s->pc - 4);
1079 gen_helper_pre_hvc(cpu_env);
1080 /* Otherwise we will treat this as a real exception which
1081 * happens after execution of the insn. (The distinction matters
1082 * for the PC value reported to the exception handler and also
1083 * for single stepping.)
1084 */
1085 s->svc_imm = imm16;
1086 gen_set_pc_im(s, s->pc);
1087 s->is_jmp = DISAS_HVC;
1088}
1089
1090static inline void gen_smc(DisasContext *s)
1091{
1092 /* As with HVC, we may take an exception either before or after
1093 * the insn executes.
1094 */
1095 TCGv_i32 tmp;
1096
1097 gen_set_pc_im(s, s->pc - 4);
1098 tmp = tcg_const_i32(syn_aa32_smc());
1099 gen_helper_pre_smc(cpu_env, tmp);
1100 tcg_temp_free_i32(tmp);
1101 gen_set_pc_im(s, s->pc);
1102 s->is_jmp = DISAS_SMC;
1103}
1104
d4a2dc67
PM
1105static inline void
1106gen_set_condexec (DisasContext *s)
1107{
1108 if (s->condexec_mask) {
1109 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1110 TCGv_i32 tmp = tcg_temp_new_i32();
1111 tcg_gen_movi_i32(tmp, val);
1112 store_cpu_field(tmp, condexec_bits);
1113 }
1114}
1115
1116static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1117{
1118 gen_set_condexec(s);
1119 gen_set_pc_im(s, s->pc - offset);
1120 gen_exception_internal(excp);
1121 s->is_jmp = DISAS_JUMP;
1122}
1123
73710361
GB
1124static void gen_exception_insn(DisasContext *s, int offset, int excp,
1125 int syn, uint32_t target_el)
d4a2dc67
PM
1126{
1127 gen_set_condexec(s);
1128 gen_set_pc_im(s, s->pc - offset);
73710361 1129 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1130 s->is_jmp = DISAS_JUMP;
1131}
1132
b5ff1b31
FB
1133/* Force a TB lookup after an instruction that changes the CPU state. */
1134static inline void gen_lookup_tb(DisasContext *s)
1135{
a6445c52 1136 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1137 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1138}
1139
b0109805 1140static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1141 TCGv_i32 var)
2c0262af 1142{
1e8d4eec 1143 int val, rm, shift, shiftop;
39d5492a 1144 TCGv_i32 offset;
2c0262af
FB
1145
1146 if (!(insn & (1 << 25))) {
1147 /* immediate */
1148 val = insn & 0xfff;
1149 if (!(insn & (1 << 23)))
1150 val = -val;
537730b9 1151 if (val != 0)
b0109805 1152 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1153 } else {
1154 /* shift/register */
1155 rm = (insn) & 0xf;
1156 shift = (insn >> 7) & 0x1f;
1e8d4eec 1157 shiftop = (insn >> 5) & 3;
b26eefb6 1158 offset = load_reg(s, rm);
9a119ff6 1159 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1160 if (!(insn & (1 << 23)))
b0109805 1161 tcg_gen_sub_i32(var, var, offset);
2c0262af 1162 else
b0109805 1163 tcg_gen_add_i32(var, var, offset);
7d1b0095 1164 tcg_temp_free_i32(offset);
2c0262af
FB
1165 }
1166}
1167
191f9a93 1168static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1169 int extra, TCGv_i32 var)
2c0262af
FB
1170{
1171 int val, rm;
39d5492a 1172 TCGv_i32 offset;
3b46e624 1173
2c0262af
FB
1174 if (insn & (1 << 22)) {
1175 /* immediate */
1176 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1177 if (!(insn & (1 << 23)))
1178 val = -val;
18acad92 1179 val += extra;
537730b9 1180 if (val != 0)
b0109805 1181 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1182 } else {
1183 /* register */
191f9a93 1184 if (extra)
b0109805 1185 tcg_gen_addi_i32(var, var, extra);
2c0262af 1186 rm = (insn) & 0xf;
b26eefb6 1187 offset = load_reg(s, rm);
2c0262af 1188 if (!(insn & (1 << 23)))
b0109805 1189 tcg_gen_sub_i32(var, var, offset);
2c0262af 1190 else
b0109805 1191 tcg_gen_add_i32(var, var, offset);
7d1b0095 1192 tcg_temp_free_i32(offset);
2c0262af
FB
1193 }
1194}
1195
5aaebd13
PM
1196static TCGv_ptr get_fpstatus_ptr(int neon)
1197{
1198 TCGv_ptr statusptr = tcg_temp_new_ptr();
1199 int offset;
1200 if (neon) {
0ecb72a5 1201 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1202 } else {
0ecb72a5 1203 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1204 }
1205 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1206 return statusptr;
1207}
1208
4373f3ce
PB
1209#define VFP_OP2(name) \
1210static inline void gen_vfp_##name(int dp) \
1211{ \
ae1857ec
PM
1212 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1213 if (dp) { \
1214 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1215 } else { \
1216 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1217 } \
1218 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1219}
1220
4373f3ce
PB
1221VFP_OP2(add)
1222VFP_OP2(sub)
1223VFP_OP2(mul)
1224VFP_OP2(div)
1225
1226#undef VFP_OP2
1227
605a6aed
PM
1228static inline void gen_vfp_F1_mul(int dp)
1229{
1230 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1231 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1232 if (dp) {
ae1857ec 1233 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1234 } else {
ae1857ec 1235 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1236 }
ae1857ec 1237 tcg_temp_free_ptr(fpst);
605a6aed
PM
1238}
1239
1240static inline void gen_vfp_F1_neg(int dp)
1241{
1242 /* Like gen_vfp_neg() but put result in F1 */
1243 if (dp) {
1244 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1245 } else {
1246 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1247 }
1248}
1249
4373f3ce
PB
1250static inline void gen_vfp_abs(int dp)
1251{
1252 if (dp)
1253 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1254 else
1255 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1256}
1257
1258static inline void gen_vfp_neg(int dp)
1259{
1260 if (dp)
1261 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1262 else
1263 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1264}
1265
1266static inline void gen_vfp_sqrt(int dp)
1267{
1268 if (dp)
1269 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1270 else
1271 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1272}
1273
1274static inline void gen_vfp_cmp(int dp)
1275{
1276 if (dp)
1277 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1278 else
1279 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1280}
1281
1282static inline void gen_vfp_cmpe(int dp)
1283{
1284 if (dp)
1285 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1286 else
1287 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1288}
1289
1290static inline void gen_vfp_F1_ld0(int dp)
1291{
1292 if (dp)
5b340b51 1293 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1294 else
5b340b51 1295 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1296}
1297
5500b06c
PM
1298#define VFP_GEN_ITOF(name) \
1299static inline void gen_vfp_##name(int dp, int neon) \
1300{ \
5aaebd13 1301 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1302 if (dp) { \
1303 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1304 } else { \
1305 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1306 } \
b7fa9214 1307 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1308}
1309
5500b06c
PM
1310VFP_GEN_ITOF(uito)
1311VFP_GEN_ITOF(sito)
1312#undef VFP_GEN_ITOF
4373f3ce 1313
5500b06c
PM
1314#define VFP_GEN_FTOI(name) \
1315static inline void gen_vfp_##name(int dp, int neon) \
1316{ \
5aaebd13 1317 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1318 if (dp) { \
1319 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1320 } else { \
1321 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1322 } \
b7fa9214 1323 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1324}
1325
5500b06c
PM
1326VFP_GEN_FTOI(toui)
1327VFP_GEN_FTOI(touiz)
1328VFP_GEN_FTOI(tosi)
1329VFP_GEN_FTOI(tosiz)
1330#undef VFP_GEN_FTOI
4373f3ce 1331
16d5b3ca 1332#define VFP_GEN_FIX(name, round) \
5500b06c 1333static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1334{ \
39d5492a 1335 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1336 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1337 if (dp) { \
16d5b3ca
WN
1338 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1339 statusptr); \
5500b06c 1340 } else { \
16d5b3ca
WN
1341 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1342 statusptr); \
5500b06c 1343 } \
b75263d6 1344 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1345 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1346}
16d5b3ca
WN
1347VFP_GEN_FIX(tosh, _round_to_zero)
1348VFP_GEN_FIX(tosl, _round_to_zero)
1349VFP_GEN_FIX(touh, _round_to_zero)
1350VFP_GEN_FIX(toul, _round_to_zero)
1351VFP_GEN_FIX(shto, )
1352VFP_GEN_FIX(slto, )
1353VFP_GEN_FIX(uhto, )
1354VFP_GEN_FIX(ulto, )
4373f3ce 1355#undef VFP_GEN_FIX
9ee6e8bb 1356
39d5492a 1357static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1358{
08307563 1359 if (dp) {
12dcc321 1360 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1361 } else {
12dcc321 1362 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1363 }
b5ff1b31
FB
1364}
1365
39d5492a 1366static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1367{
08307563 1368 if (dp) {
12dcc321 1369 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1370 } else {
12dcc321 1371 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1372 }
b5ff1b31
FB
1373}
1374
8e96005d
FB
1375static inline long
1376vfp_reg_offset (int dp, int reg)
1377{
1378 if (dp)
1379 return offsetof(CPUARMState, vfp.regs[reg]);
1380 else if (reg & 1) {
1381 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1382 + offsetof(CPU_DoubleU, l.upper);
1383 } else {
1384 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1385 + offsetof(CPU_DoubleU, l.lower);
1386 }
1387}
9ee6e8bb
PB
1388
1389/* Return the offset of a 32-bit piece of a NEON register.
1390 zero is the least significant end of the register. */
1391static inline long
1392neon_reg_offset (int reg, int n)
1393{
1394 int sreg;
1395 sreg = reg * 2 + n;
1396 return vfp_reg_offset(0, sreg);
1397}
1398
39d5492a 1399static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1400{
39d5492a 1401 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1402 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1403 return tmp;
1404}
1405
39d5492a 1406static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1407{
1408 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1409 tcg_temp_free_i32(var);
8f8e3aa4
PB
1410}
1411
a7812ae4 1412static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1413{
1414 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1415}
1416
a7812ae4 1417static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1418{
1419 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1420}
1421
4373f3ce
PB
1422#define tcg_gen_ld_f32 tcg_gen_ld_i32
1423#define tcg_gen_ld_f64 tcg_gen_ld_i64
1424#define tcg_gen_st_f32 tcg_gen_st_i32
1425#define tcg_gen_st_f64 tcg_gen_st_i64
1426
b7bcbe95
FB
1427static inline void gen_mov_F0_vreg(int dp, int reg)
1428{
1429 if (dp)
4373f3ce 1430 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1431 else
4373f3ce 1432 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1433}
1434
1435static inline void gen_mov_F1_vreg(int dp, int reg)
1436{
1437 if (dp)
4373f3ce 1438 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1439 else
4373f3ce 1440 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1441}
1442
1443static inline void gen_mov_vreg_F0(int dp, int reg)
1444{
1445 if (dp)
4373f3ce 1446 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1447 else
4373f3ce 1448 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1449}
1450
18c9b560
AZ
1451#define ARM_CP_RW_BIT (1 << 20)
1452
a7812ae4 1453static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1454{
0ecb72a5 1455 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1456}
1457
a7812ae4 1458static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1459{
0ecb72a5 1460 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1461}
1462
39d5492a 1463static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1464{
39d5492a 1465 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1466 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1467 return var;
e677137d
PB
1468}
1469
39d5492a 1470static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1471{
0ecb72a5 1472 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1473 tcg_temp_free_i32(var);
e677137d
PB
1474}
1475
1476static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1477{
1478 iwmmxt_store_reg(cpu_M0, rn);
1479}
1480
1481static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1482{
1483 iwmmxt_load_reg(cpu_M0, rn);
1484}
1485
1486static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1487{
1488 iwmmxt_load_reg(cpu_V1, rn);
1489 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1490}
1491
1492static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1493{
1494 iwmmxt_load_reg(cpu_V1, rn);
1495 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1496}
1497
1498static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1499{
1500 iwmmxt_load_reg(cpu_V1, rn);
1501 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1502}
1503
1504#define IWMMXT_OP(name) \
1505static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1506{ \
1507 iwmmxt_load_reg(cpu_V1, rn); \
1508 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1509}
1510
477955bd
PM
1511#define IWMMXT_OP_ENV(name) \
1512static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1513{ \
1514 iwmmxt_load_reg(cpu_V1, rn); \
1515 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1516}
1517
1518#define IWMMXT_OP_ENV_SIZE(name) \
1519IWMMXT_OP_ENV(name##b) \
1520IWMMXT_OP_ENV(name##w) \
1521IWMMXT_OP_ENV(name##l)
e677137d 1522
477955bd 1523#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1524static inline void gen_op_iwmmxt_##name##_M0(void) \
1525{ \
477955bd 1526 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1527}
1528
1529IWMMXT_OP(maddsq)
1530IWMMXT_OP(madduq)
1531IWMMXT_OP(sadb)
1532IWMMXT_OP(sadw)
1533IWMMXT_OP(mulslw)
1534IWMMXT_OP(mulshw)
1535IWMMXT_OP(mululw)
1536IWMMXT_OP(muluhw)
1537IWMMXT_OP(macsw)
1538IWMMXT_OP(macuw)
1539
477955bd
PM
1540IWMMXT_OP_ENV_SIZE(unpackl)
1541IWMMXT_OP_ENV_SIZE(unpackh)
1542
1543IWMMXT_OP_ENV1(unpacklub)
1544IWMMXT_OP_ENV1(unpackluw)
1545IWMMXT_OP_ENV1(unpacklul)
1546IWMMXT_OP_ENV1(unpackhub)
1547IWMMXT_OP_ENV1(unpackhuw)
1548IWMMXT_OP_ENV1(unpackhul)
1549IWMMXT_OP_ENV1(unpacklsb)
1550IWMMXT_OP_ENV1(unpacklsw)
1551IWMMXT_OP_ENV1(unpacklsl)
1552IWMMXT_OP_ENV1(unpackhsb)
1553IWMMXT_OP_ENV1(unpackhsw)
1554IWMMXT_OP_ENV1(unpackhsl)
1555
1556IWMMXT_OP_ENV_SIZE(cmpeq)
1557IWMMXT_OP_ENV_SIZE(cmpgtu)
1558IWMMXT_OP_ENV_SIZE(cmpgts)
1559
1560IWMMXT_OP_ENV_SIZE(mins)
1561IWMMXT_OP_ENV_SIZE(minu)
1562IWMMXT_OP_ENV_SIZE(maxs)
1563IWMMXT_OP_ENV_SIZE(maxu)
1564
1565IWMMXT_OP_ENV_SIZE(subn)
1566IWMMXT_OP_ENV_SIZE(addn)
1567IWMMXT_OP_ENV_SIZE(subu)
1568IWMMXT_OP_ENV_SIZE(addu)
1569IWMMXT_OP_ENV_SIZE(subs)
1570IWMMXT_OP_ENV_SIZE(adds)
1571
1572IWMMXT_OP_ENV(avgb0)
1573IWMMXT_OP_ENV(avgb1)
1574IWMMXT_OP_ENV(avgw0)
1575IWMMXT_OP_ENV(avgw1)
e677137d 1576
477955bd
PM
1577IWMMXT_OP_ENV(packuw)
1578IWMMXT_OP_ENV(packul)
1579IWMMXT_OP_ENV(packuq)
1580IWMMXT_OP_ENV(packsw)
1581IWMMXT_OP_ENV(packsl)
1582IWMMXT_OP_ENV(packsq)
e677137d 1583
e677137d
PB
1584static void gen_op_iwmmxt_set_mup(void)
1585{
39d5492a 1586 TCGv_i32 tmp;
e677137d
PB
1587 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1588 tcg_gen_ori_i32(tmp, tmp, 2);
1589 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1590}
1591
1592static void gen_op_iwmmxt_set_cup(void)
1593{
39d5492a 1594 TCGv_i32 tmp;
e677137d
PB
1595 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1596 tcg_gen_ori_i32(tmp, tmp, 1);
1597 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1598}
1599
1600static void gen_op_iwmmxt_setpsr_nz(void)
1601{
39d5492a 1602 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1603 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1604 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1605}
1606
1607static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1608{
1609 iwmmxt_load_reg(cpu_V1, rn);
86831435 1610 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1611 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1612}
1613
39d5492a
PM
1614static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1615 TCGv_i32 dest)
18c9b560
AZ
1616{
1617 int rd;
1618 uint32_t offset;
39d5492a 1619 TCGv_i32 tmp;
18c9b560
AZ
1620
1621 rd = (insn >> 16) & 0xf;
da6b5335 1622 tmp = load_reg(s, rd);
18c9b560
AZ
1623
1624 offset = (insn & 0xff) << ((insn >> 7) & 2);
1625 if (insn & (1 << 24)) {
1626 /* Pre indexed */
1627 if (insn & (1 << 23))
da6b5335 1628 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1629 else
da6b5335
FN
1630 tcg_gen_addi_i32(tmp, tmp, -offset);
1631 tcg_gen_mov_i32(dest, tmp);
18c9b560 1632 if (insn & (1 << 21))
da6b5335
FN
1633 store_reg(s, rd, tmp);
1634 else
7d1b0095 1635 tcg_temp_free_i32(tmp);
18c9b560
AZ
1636 } else if (insn & (1 << 21)) {
1637 /* Post indexed */
da6b5335 1638 tcg_gen_mov_i32(dest, tmp);
18c9b560 1639 if (insn & (1 << 23))
da6b5335 1640 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1641 else
da6b5335
FN
1642 tcg_gen_addi_i32(tmp, tmp, -offset);
1643 store_reg(s, rd, tmp);
18c9b560
AZ
1644 } else if (!(insn & (1 << 23)))
1645 return 1;
1646 return 0;
1647}
1648
39d5492a 1649static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1650{
1651 int rd = (insn >> 0) & 0xf;
39d5492a 1652 TCGv_i32 tmp;
18c9b560 1653
da6b5335
FN
1654 if (insn & (1 << 8)) {
1655 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1656 return 1;
da6b5335
FN
1657 } else {
1658 tmp = iwmmxt_load_creg(rd);
1659 }
1660 } else {
7d1b0095 1661 tmp = tcg_temp_new_i32();
da6b5335 1662 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1663 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1664 }
1665 tcg_gen_andi_i32(tmp, tmp, mask);
1666 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1667 tcg_temp_free_i32(tmp);
18c9b560
AZ
1668 return 0;
1669}
1670
a1c7273b 1671/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1672 (ie. an undefined instruction). */
7dcc1f89 1673static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1674{
1675 int rd, wrd;
1676 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1677 TCGv_i32 addr;
1678 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1679
1680 if ((insn & 0x0e000e00) == 0x0c000000) {
1681 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1682 wrd = insn & 0xf;
1683 rdlo = (insn >> 12) & 0xf;
1684 rdhi = (insn >> 16) & 0xf;
1685 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1686 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1687 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1688 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1689 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1690 } else { /* TMCRR */
da6b5335
FN
1691 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1692 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1693 gen_op_iwmmxt_set_mup();
1694 }
1695 return 0;
1696 }
1697
1698 wrd = (insn >> 12) & 0xf;
7d1b0095 1699 addr = tcg_temp_new_i32();
da6b5335 1700 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1701 tcg_temp_free_i32(addr);
18c9b560 1702 return 1;
da6b5335 1703 }
18c9b560
AZ
1704 if (insn & ARM_CP_RW_BIT) {
1705 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1706 tmp = tcg_temp_new_i32();
12dcc321 1707 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1708 iwmmxt_store_creg(wrd, tmp);
18c9b560 1709 } else {
e677137d
PB
1710 i = 1;
1711 if (insn & (1 << 8)) {
1712 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1713 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1714 i = 0;
1715 } else { /* WLDRW wRd */
29531141 1716 tmp = tcg_temp_new_i32();
12dcc321 1717 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1718 }
1719 } else {
29531141 1720 tmp = tcg_temp_new_i32();
e677137d 1721 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1722 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1723 } else { /* WLDRB */
12dcc321 1724 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1725 }
1726 }
1727 if (i) {
1728 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1729 tcg_temp_free_i32(tmp);
e677137d 1730 }
18c9b560
AZ
1731 gen_op_iwmmxt_movq_wRn_M0(wrd);
1732 }
1733 } else {
1734 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1735 tmp = iwmmxt_load_creg(wrd);
12dcc321 1736 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1737 } else {
1738 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1739 tmp = tcg_temp_new_i32();
e677137d
PB
1740 if (insn & (1 << 8)) {
1741 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1742 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1743 } else { /* WSTRW wRd */
ecc7b3aa 1744 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1745 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1746 }
1747 } else {
1748 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1749 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1750 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1751 } else { /* WSTRB */
ecc7b3aa 1752 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1753 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1754 }
1755 }
18c9b560 1756 }
29531141 1757 tcg_temp_free_i32(tmp);
18c9b560 1758 }
7d1b0095 1759 tcg_temp_free_i32(addr);
18c9b560
AZ
1760 return 0;
1761 }
1762
1763 if ((insn & 0x0f000000) != 0x0e000000)
1764 return 1;
1765
1766 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1767 case 0x000: /* WOR */
1768 wrd = (insn >> 12) & 0xf;
1769 rd0 = (insn >> 0) & 0xf;
1770 rd1 = (insn >> 16) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0);
1772 gen_op_iwmmxt_orq_M0_wRn(rd1);
1773 gen_op_iwmmxt_setpsr_nz();
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x011: /* TMCR */
1779 if (insn & 0xf)
1780 return 1;
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
1783 switch (wrd) {
1784 case ARM_IWMMXT_wCID:
1785 case ARM_IWMMXT_wCASF:
1786 break;
1787 case ARM_IWMMXT_wCon:
1788 gen_op_iwmmxt_set_cup();
1789 /* Fall through. */
1790 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1791 tmp = iwmmxt_load_creg(wrd);
1792 tmp2 = load_reg(s, rd);
f669df27 1793 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1794 tcg_temp_free_i32(tmp2);
da6b5335 1795 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1796 break;
1797 case ARM_IWMMXT_wCGR0:
1798 case ARM_IWMMXT_wCGR1:
1799 case ARM_IWMMXT_wCGR2:
1800 case ARM_IWMMXT_wCGR3:
1801 gen_op_iwmmxt_set_cup();
da6b5335
FN
1802 tmp = load_reg(s, rd);
1803 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1804 break;
1805 default:
1806 return 1;
1807 }
1808 break;
1809 case 0x100: /* WXOR */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 0) & 0xf;
1812 rd1 = (insn >> 16) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
1814 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1815 gen_op_iwmmxt_setpsr_nz();
1816 gen_op_iwmmxt_movq_wRn_M0(wrd);
1817 gen_op_iwmmxt_set_mup();
1818 gen_op_iwmmxt_set_cup();
1819 break;
1820 case 0x111: /* TMRC */
1821 if (insn & 0xf)
1822 return 1;
1823 rd = (insn >> 12) & 0xf;
1824 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1825 tmp = iwmmxt_load_creg(wrd);
1826 store_reg(s, rd, tmp);
18c9b560
AZ
1827 break;
1828 case 0x300: /* WANDN */
1829 wrd = (insn >> 12) & 0xf;
1830 rd0 = (insn >> 0) & 0xf;
1831 rd1 = (insn >> 16) & 0xf;
1832 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1833 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1834 gen_op_iwmmxt_andq_M0_wRn(rd1);
1835 gen_op_iwmmxt_setpsr_nz();
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 gen_op_iwmmxt_set_cup();
1839 break;
1840 case 0x200: /* WAND */
1841 wrd = (insn >> 12) & 0xf;
1842 rd0 = (insn >> 0) & 0xf;
1843 rd1 = (insn >> 16) & 0xf;
1844 gen_op_iwmmxt_movq_M0_wRn(rd0);
1845 gen_op_iwmmxt_andq_M0_wRn(rd1);
1846 gen_op_iwmmxt_setpsr_nz();
1847 gen_op_iwmmxt_movq_wRn_M0(wrd);
1848 gen_op_iwmmxt_set_mup();
1849 gen_op_iwmmxt_set_cup();
1850 break;
1851 case 0x810: case 0xa10: /* WMADD */
1852 wrd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 0) & 0xf;
1854 rd1 = (insn >> 16) & 0xf;
1855 gen_op_iwmmxt_movq_M0_wRn(rd0);
1856 if (insn & (1 << 21))
1857 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1858 else
1859 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1860 gen_op_iwmmxt_movq_wRn_M0(wrd);
1861 gen_op_iwmmxt_set_mup();
1862 break;
1863 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 16) & 0xf;
1866 rd1 = (insn >> 0) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 switch ((insn >> 22) & 3) {
1869 case 0:
1870 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1871 break;
1872 case 1:
1873 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1874 break;
1875 case 2:
1876 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1877 break;
1878 case 3:
1879 return 1;
1880 }
1881 gen_op_iwmmxt_movq_wRn_M0(wrd);
1882 gen_op_iwmmxt_set_mup();
1883 gen_op_iwmmxt_set_cup();
1884 break;
1885 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1886 wrd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
1888 rd1 = (insn >> 0) & 0xf;
1889 gen_op_iwmmxt_movq_M0_wRn(rd0);
1890 switch ((insn >> 22) & 3) {
1891 case 0:
1892 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1893 break;
1894 case 1:
1895 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1896 break;
1897 case 2:
1898 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1899 break;
1900 case 3:
1901 return 1;
1902 }
1903 gen_op_iwmmxt_movq_wRn_M0(wrd);
1904 gen_op_iwmmxt_set_mup();
1905 gen_op_iwmmxt_set_cup();
1906 break;
1907 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 if (insn & (1 << 22))
1913 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1914 else
1915 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1916 if (!(insn & (1 << 20)))
1917 gen_op_iwmmxt_addl_M0_wRn(wrd);
1918 gen_op_iwmmxt_movq_wRn_M0(wrd);
1919 gen_op_iwmmxt_set_mup();
1920 break;
1921 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 16) & 0xf;
1924 rd1 = (insn >> 0) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1926 if (insn & (1 << 21)) {
1927 if (insn & (1 << 20))
1928 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1929 else
1930 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1931 } else {
1932 if (insn & (1 << 20))
1933 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1934 else
1935 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1936 }
18c9b560
AZ
1937 gen_op_iwmmxt_movq_wRn_M0(wrd);
1938 gen_op_iwmmxt_set_mup();
1939 break;
1940 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 16) & 0xf;
1943 rd1 = (insn >> 0) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1947 else
1948 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1949 if (!(insn & (1 << 20))) {
e677137d
PB
1950 iwmmxt_load_reg(cpu_V1, wrd);
1951 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1952 }
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
1956 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1962 case 0:
1963 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1967 break;
1968 case 2:
1969 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1970 break;
1971 case 3:
1972 return 1;
1973 }
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
1978 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1983 if (insn & (1 << 22)) {
1984 if (insn & (1 << 20))
1985 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1986 else
1987 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1988 } else {
1989 if (insn & (1 << 20))
1990 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1991 else
1992 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1993 }
18c9b560
AZ
1994 gen_op_iwmmxt_movq_wRn_M0(wrd);
1995 gen_op_iwmmxt_set_mup();
1996 gen_op_iwmmxt_set_cup();
1997 break;
1998 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 16) & 0xf;
2001 rd1 = (insn >> 0) & 0xf;
2002 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2003 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2004 tcg_gen_andi_i32(tmp, tmp, 7);
2005 iwmmxt_load_reg(cpu_V1, rd1);
2006 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2007 tcg_temp_free_i32(tmp);
18c9b560
AZ
2008 gen_op_iwmmxt_movq_wRn_M0(wrd);
2009 gen_op_iwmmxt_set_mup();
2010 break;
2011 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2012 if (((insn >> 6) & 3) == 3)
2013 return 1;
18c9b560
AZ
2014 rd = (insn >> 12) & 0xf;
2015 wrd = (insn >> 16) & 0xf;
da6b5335 2016 tmp = load_reg(s, rd);
18c9b560
AZ
2017 gen_op_iwmmxt_movq_M0_wRn(wrd);
2018 switch ((insn >> 6) & 3) {
2019 case 0:
da6b5335
FN
2020 tmp2 = tcg_const_i32(0xff);
2021 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2022 break;
2023 case 1:
da6b5335
FN
2024 tmp2 = tcg_const_i32(0xffff);
2025 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2026 break;
2027 case 2:
da6b5335
FN
2028 tmp2 = tcg_const_i32(0xffffffff);
2029 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2030 break;
da6b5335 2031 default:
39d5492a
PM
2032 TCGV_UNUSED_I32(tmp2);
2033 TCGV_UNUSED_I32(tmp3);
18c9b560 2034 }
da6b5335 2035 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2036 tcg_temp_free_i32(tmp3);
2037 tcg_temp_free_i32(tmp2);
7d1b0095 2038 tcg_temp_free_i32(tmp);
18c9b560
AZ
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2043 rd = (insn >> 12) & 0xf;
2044 wrd = (insn >> 16) & 0xf;
da6b5335 2045 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2046 return 1;
2047 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2048 tmp = tcg_temp_new_i32();
18c9b560
AZ
2049 switch ((insn >> 22) & 3) {
2050 case 0:
da6b5335 2051 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2052 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2053 if (insn & 8) {
2054 tcg_gen_ext8s_i32(tmp, tmp);
2055 } else {
2056 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2057 }
2058 break;
2059 case 1:
da6b5335 2060 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2061 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2062 if (insn & 8) {
2063 tcg_gen_ext16s_i32(tmp, tmp);
2064 } else {
2065 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2066 }
2067 break;
2068 case 2:
da6b5335 2069 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2070 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2071 break;
18c9b560 2072 }
da6b5335 2073 store_reg(s, rd, tmp);
18c9b560
AZ
2074 break;
2075 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2076 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2077 return 1;
da6b5335 2078 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2079 switch ((insn >> 22) & 3) {
2080 case 0:
da6b5335 2081 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2082 break;
2083 case 1:
da6b5335 2084 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2085 break;
2086 case 2:
da6b5335 2087 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2088 break;
18c9b560 2089 }
da6b5335
FN
2090 tcg_gen_shli_i32(tmp, tmp, 28);
2091 gen_set_nzcv(tmp);
7d1b0095 2092 tcg_temp_free_i32(tmp);
18c9b560
AZ
2093 break;
2094 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2095 if (((insn >> 6) & 3) == 3)
2096 return 1;
18c9b560
AZ
2097 rd = (insn >> 12) & 0xf;
2098 wrd = (insn >> 16) & 0xf;
da6b5335 2099 tmp = load_reg(s, rd);
18c9b560
AZ
2100 switch ((insn >> 6) & 3) {
2101 case 0:
da6b5335 2102 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2103 break;
2104 case 1:
da6b5335 2105 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 2:
da6b5335 2108 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2109 break;
18c9b560 2110 }
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 break;
2115 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2116 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2117 return 1;
da6b5335 2118 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2119 tmp2 = tcg_temp_new_i32();
da6b5335 2120 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2121 switch ((insn >> 22) & 3) {
2122 case 0:
2123 for (i = 0; i < 7; i ++) {
da6b5335
FN
2124 tcg_gen_shli_i32(tmp2, tmp2, 4);
2125 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2126 }
2127 break;
2128 case 1:
2129 for (i = 0; i < 3; i ++) {
da6b5335
FN
2130 tcg_gen_shli_i32(tmp2, tmp2, 8);
2131 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2132 }
2133 break;
2134 case 2:
da6b5335
FN
2135 tcg_gen_shli_i32(tmp2, tmp2, 16);
2136 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2137 break;
18c9b560 2138 }
da6b5335 2139 gen_set_nzcv(tmp);
7d1b0095
PM
2140 tcg_temp_free_i32(tmp2);
2141 tcg_temp_free_i32(tmp);
18c9b560
AZ
2142 break;
2143 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2144 wrd = (insn >> 12) & 0xf;
2145 rd0 = (insn >> 16) & 0xf;
2146 gen_op_iwmmxt_movq_M0_wRn(rd0);
2147 switch ((insn >> 22) & 3) {
2148 case 0:
e677137d 2149 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2150 break;
2151 case 1:
e677137d 2152 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2153 break;
2154 case 2:
e677137d 2155 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2156 break;
2157 case 3:
2158 return 1;
2159 }
2160 gen_op_iwmmxt_movq_wRn_M0(wrd);
2161 gen_op_iwmmxt_set_mup();
2162 break;
2163 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2164 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2165 return 1;
da6b5335 2166 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2167 tmp2 = tcg_temp_new_i32();
da6b5335 2168 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2169 switch ((insn >> 22) & 3) {
2170 case 0:
2171 for (i = 0; i < 7; i ++) {
da6b5335
FN
2172 tcg_gen_shli_i32(tmp2, tmp2, 4);
2173 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2174 }
2175 break;
2176 case 1:
2177 for (i = 0; i < 3; i ++) {
da6b5335
FN
2178 tcg_gen_shli_i32(tmp2, tmp2, 8);
2179 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2180 }
2181 break;
2182 case 2:
da6b5335
FN
2183 tcg_gen_shli_i32(tmp2, tmp2, 16);
2184 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2185 break;
18c9b560 2186 }
da6b5335 2187 gen_set_nzcv(tmp);
7d1b0095
PM
2188 tcg_temp_free_i32(tmp2);
2189 tcg_temp_free_i32(tmp);
18c9b560
AZ
2190 break;
2191 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2192 rd = (insn >> 12) & 0xf;
2193 rd0 = (insn >> 16) & 0xf;
da6b5335 2194 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2195 return 1;
2196 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2197 tmp = tcg_temp_new_i32();
18c9b560
AZ
2198 switch ((insn >> 22) & 3) {
2199 case 0:
da6b5335 2200 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2201 break;
2202 case 1:
da6b5335 2203 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2204 break;
2205 case 2:
da6b5335 2206 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2207 break;
18c9b560 2208 }
da6b5335 2209 store_reg(s, rd, tmp);
18c9b560
AZ
2210 break;
2211 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2212 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 rd1 = (insn >> 0) & 0xf;
2216 gen_op_iwmmxt_movq_M0_wRn(rd0);
2217 switch ((insn >> 22) & 3) {
2218 case 0:
2219 if (insn & (1 << 21))
2220 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2223 break;
2224 case 1:
2225 if (insn & (1 << 21))
2226 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2227 else
2228 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2229 break;
2230 case 2:
2231 if (insn & (1 << 21))
2232 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2233 else
2234 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2235 break;
2236 case 3:
2237 return 1;
2238 }
2239 gen_op_iwmmxt_movq_wRn_M0(wrd);
2240 gen_op_iwmmxt_set_mup();
2241 gen_op_iwmmxt_set_cup();
2242 break;
2243 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2244 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2245 wrd = (insn >> 12) & 0xf;
2246 rd0 = (insn >> 16) & 0xf;
2247 gen_op_iwmmxt_movq_M0_wRn(rd0);
2248 switch ((insn >> 22) & 3) {
2249 case 0:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_unpacklsb_M0();
2252 else
2253 gen_op_iwmmxt_unpacklub_M0();
2254 break;
2255 case 1:
2256 if (insn & (1 << 21))
2257 gen_op_iwmmxt_unpacklsw_M0();
2258 else
2259 gen_op_iwmmxt_unpackluw_M0();
2260 break;
2261 case 2:
2262 if (insn & (1 << 21))
2263 gen_op_iwmmxt_unpacklsl_M0();
2264 else
2265 gen_op_iwmmxt_unpacklul_M0();
2266 break;
2267 case 3:
2268 return 1;
2269 }
2270 gen_op_iwmmxt_movq_wRn_M0(wrd);
2271 gen_op_iwmmxt_set_mup();
2272 gen_op_iwmmxt_set_cup();
2273 break;
2274 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2275 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0);
2279 switch ((insn >> 22) & 3) {
2280 case 0:
2281 if (insn & (1 << 21))
2282 gen_op_iwmmxt_unpackhsb_M0();
2283 else
2284 gen_op_iwmmxt_unpackhub_M0();
2285 break;
2286 case 1:
2287 if (insn & (1 << 21))
2288 gen_op_iwmmxt_unpackhsw_M0();
2289 else
2290 gen_op_iwmmxt_unpackhuw_M0();
2291 break;
2292 case 2:
2293 if (insn & (1 << 21))
2294 gen_op_iwmmxt_unpackhsl_M0();
2295 else
2296 gen_op_iwmmxt_unpackhul_M0();
2297 break;
2298 case 3:
2299 return 1;
2300 }
2301 gen_op_iwmmxt_movq_wRn_M0(wrd);
2302 gen_op_iwmmxt_set_mup();
2303 gen_op_iwmmxt_set_cup();
2304 break;
2305 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2306 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2307 if (((insn >> 22) & 3) == 0)
2308 return 1;
18c9b560
AZ
2309 wrd = (insn >> 12) & 0xf;
2310 rd0 = (insn >> 16) & 0xf;
2311 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2312 tmp = tcg_temp_new_i32();
da6b5335 2313 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2314 tcg_temp_free_i32(tmp);
18c9b560 2315 return 1;
da6b5335 2316 }
18c9b560 2317 switch ((insn >> 22) & 3) {
18c9b560 2318 case 1:
477955bd 2319 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2320 break;
2321 case 2:
477955bd 2322 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2323 break;
2324 case 3:
477955bd 2325 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2326 break;
2327 }
7d1b0095 2328 tcg_temp_free_i32(tmp);
18c9b560
AZ
2329 gen_op_iwmmxt_movq_wRn_M0(wrd);
2330 gen_op_iwmmxt_set_mup();
2331 gen_op_iwmmxt_set_cup();
2332 break;
2333 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2334 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2335 if (((insn >> 22) & 3) == 0)
2336 return 1;
18c9b560
AZ
2337 wrd = (insn >> 12) & 0xf;
2338 rd0 = (insn >> 16) & 0xf;
2339 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2340 tmp = tcg_temp_new_i32();
da6b5335 2341 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2342 tcg_temp_free_i32(tmp);
18c9b560 2343 return 1;
da6b5335 2344 }
18c9b560 2345 switch ((insn >> 22) & 3) {
18c9b560 2346 case 1:
477955bd 2347 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2348 break;
2349 case 2:
477955bd 2350 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2351 break;
2352 case 3:
477955bd 2353 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2354 break;
2355 }
7d1b0095 2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 gen_op_iwmmxt_set_cup();
2360 break;
2361 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2362 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2363 if (((insn >> 22) & 3) == 0)
2364 return 1;
18c9b560
AZ
2365 wrd = (insn >> 12) & 0xf;
2366 rd0 = (insn >> 16) & 0xf;
2367 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2368 tmp = tcg_temp_new_i32();
da6b5335 2369 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2370 tcg_temp_free_i32(tmp);
18c9b560 2371 return 1;
da6b5335 2372 }
18c9b560 2373 switch ((insn >> 22) & 3) {
18c9b560 2374 case 1:
477955bd 2375 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2376 break;
2377 case 2:
477955bd 2378 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2379 break;
2380 case 3:
477955bd 2381 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2382 break;
2383 }
7d1b0095 2384 tcg_temp_free_i32(tmp);
18c9b560
AZ
2385 gen_op_iwmmxt_movq_wRn_M0(wrd);
2386 gen_op_iwmmxt_set_mup();
2387 gen_op_iwmmxt_set_cup();
2388 break;
2389 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2390 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2391 if (((insn >> 22) & 3) == 0)
2392 return 1;
18c9b560
AZ
2393 wrd = (insn >> 12) & 0xf;
2394 rd0 = (insn >> 16) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2396 tmp = tcg_temp_new_i32();
18c9b560 2397 switch ((insn >> 22) & 3) {
18c9b560 2398 case 1:
da6b5335 2399 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2400 tcg_temp_free_i32(tmp);
18c9b560 2401 return 1;
da6b5335 2402 }
477955bd 2403 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2404 break;
2405 case 2:
da6b5335 2406 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2407 tcg_temp_free_i32(tmp);
18c9b560 2408 return 1;
da6b5335 2409 }
477955bd 2410 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2411 break;
2412 case 3:
da6b5335 2413 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2414 tcg_temp_free_i32(tmp);
18c9b560 2415 return 1;
da6b5335 2416 }
477955bd 2417 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2418 break;
2419 }
7d1b0095 2420 tcg_temp_free_i32(tmp);
18c9b560
AZ
2421 gen_op_iwmmxt_movq_wRn_M0(wrd);
2422 gen_op_iwmmxt_set_mup();
2423 gen_op_iwmmxt_set_cup();
2424 break;
2425 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2426 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2427 wrd = (insn >> 12) & 0xf;
2428 rd0 = (insn >> 16) & 0xf;
2429 rd1 = (insn >> 0) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
2431 switch ((insn >> 22) & 3) {
2432 case 0:
2433 if (insn & (1 << 21))
2434 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2435 else
2436 gen_op_iwmmxt_minub_M0_wRn(rd1);
2437 break;
2438 case 1:
2439 if (insn & (1 << 21))
2440 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2441 else
2442 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2443 break;
2444 case 2:
2445 if (insn & (1 << 21))
2446 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2447 else
2448 gen_op_iwmmxt_minul_M0_wRn(rd1);
2449 break;
2450 case 3:
2451 return 1;
2452 }
2453 gen_op_iwmmxt_movq_wRn_M0(wrd);
2454 gen_op_iwmmxt_set_mup();
2455 break;
2456 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2457 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 rd1 = (insn >> 0) & 0xf;
2461 gen_op_iwmmxt_movq_M0_wRn(rd0);
2462 switch ((insn >> 22) & 3) {
2463 case 0:
2464 if (insn & (1 << 21))
2465 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2466 else
2467 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2468 break;
2469 case 1:
2470 if (insn & (1 << 21))
2471 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2472 else
2473 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2474 break;
2475 case 2:
2476 if (insn & (1 << 21))
2477 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2478 else
2479 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2480 break;
2481 case 3:
2482 return 1;
2483 }
2484 gen_op_iwmmxt_movq_wRn_M0(wrd);
2485 gen_op_iwmmxt_set_mup();
2486 break;
2487 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2488 case 0x402: case 0x502: case 0x602: case 0x702:
2489 wrd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 rd1 = (insn >> 0) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2493 tmp = tcg_const_i32((insn >> 20) & 3);
2494 iwmmxt_load_reg(cpu_V1, rd1);
2495 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2496 tcg_temp_free_i32(tmp);
18c9b560
AZ
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 break;
2500 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2501 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2502 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2503 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2504 wrd = (insn >> 12) & 0xf;
2505 rd0 = (insn >> 16) & 0xf;
2506 rd1 = (insn >> 0) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
2508 switch ((insn >> 20) & 0xf) {
2509 case 0x0:
2510 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2511 break;
2512 case 0x1:
2513 gen_op_iwmmxt_subub_M0_wRn(rd1);
2514 break;
2515 case 0x3:
2516 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2517 break;
2518 case 0x4:
2519 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2520 break;
2521 case 0x5:
2522 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2523 break;
2524 case 0x7:
2525 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2526 break;
2527 case 0x8:
2528 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2529 break;
2530 case 0x9:
2531 gen_op_iwmmxt_subul_M0_wRn(rd1);
2532 break;
2533 case 0xb:
2534 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2535 break;
2536 default:
2537 return 1;
2538 }
2539 gen_op_iwmmxt_movq_wRn_M0(wrd);
2540 gen_op_iwmmxt_set_mup();
2541 gen_op_iwmmxt_set_cup();
2542 break;
2543 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2544 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2545 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2546 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2547 wrd = (insn >> 12) & 0xf;
2548 rd0 = (insn >> 16) & 0xf;
2549 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2550 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2551 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2552 tcg_temp_free_i32(tmp);
18c9b560
AZ
2553 gen_op_iwmmxt_movq_wRn_M0(wrd);
2554 gen_op_iwmmxt_set_mup();
2555 gen_op_iwmmxt_set_cup();
2556 break;
2557 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2558 case 0x418: case 0x518: case 0x618: case 0x718:
2559 case 0x818: case 0x918: case 0xa18: case 0xb18:
2560 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2561 wrd = (insn >> 12) & 0xf;
2562 rd0 = (insn >> 16) & 0xf;
2563 rd1 = (insn >> 0) & 0xf;
2564 gen_op_iwmmxt_movq_M0_wRn(rd0);
2565 switch ((insn >> 20) & 0xf) {
2566 case 0x0:
2567 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2568 break;
2569 case 0x1:
2570 gen_op_iwmmxt_addub_M0_wRn(rd1);
2571 break;
2572 case 0x3:
2573 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2574 break;
2575 case 0x4:
2576 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2577 break;
2578 case 0x5:
2579 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2580 break;
2581 case 0x7:
2582 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2583 break;
2584 case 0x8:
2585 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2586 break;
2587 case 0x9:
2588 gen_op_iwmmxt_addul_M0_wRn(rd1);
2589 break;
2590 case 0xb:
2591 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2592 break;
2593 default:
2594 return 1;
2595 }
2596 gen_op_iwmmxt_movq_wRn_M0(wrd);
2597 gen_op_iwmmxt_set_mup();
2598 gen_op_iwmmxt_set_cup();
2599 break;
2600 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2601 case 0x408: case 0x508: case 0x608: case 0x708:
2602 case 0x808: case 0x908: case 0xa08: case 0xb08:
2603 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2604 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2605 return 1;
18c9b560
AZ
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2610 switch ((insn >> 22) & 3) {
18c9b560
AZ
2611 case 1:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2616 break;
2617 case 2:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_packul_M0_wRn(rd1);
2622 break;
2623 case 3:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2628 break;
2629 }
2630 gen_op_iwmmxt_movq_wRn_M0(wrd);
2631 gen_op_iwmmxt_set_mup();
2632 gen_op_iwmmxt_set_cup();
2633 break;
2634 case 0x201: case 0x203: case 0x205: case 0x207:
2635 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2636 case 0x211: case 0x213: case 0x215: case 0x217:
2637 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2638 wrd = (insn >> 5) & 0xf;
2639 rd0 = (insn >> 12) & 0xf;
2640 rd1 = (insn >> 0) & 0xf;
2641 if (rd0 == 0xf || rd1 == 0xf)
2642 return 1;
2643 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2644 tmp = load_reg(s, rd0);
2645 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2646 switch ((insn >> 16) & 0xf) {
2647 case 0x0: /* TMIA */
da6b5335 2648 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2649 break;
2650 case 0x8: /* TMIAPH */
da6b5335 2651 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2652 break;
2653 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2654 if (insn & (1 << 16))
da6b5335 2655 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2656 if (insn & (1 << 17))
da6b5335
FN
2657 tcg_gen_shri_i32(tmp2, tmp2, 16);
2658 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2659 break;
2660 default:
7d1b0095
PM
2661 tcg_temp_free_i32(tmp2);
2662 tcg_temp_free_i32(tmp);
18c9b560
AZ
2663 return 1;
2664 }
7d1b0095
PM
2665 tcg_temp_free_i32(tmp2);
2666 tcg_temp_free_i32(tmp);
18c9b560
AZ
2667 gen_op_iwmmxt_movq_wRn_M0(wrd);
2668 gen_op_iwmmxt_set_mup();
2669 break;
2670 default:
2671 return 1;
2672 }
2673
2674 return 0;
2675}
2676
a1c7273b 2677/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2678 (ie. an undefined instruction). */
7dcc1f89 2679static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2680{
2681 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2682 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2683
2684 if ((insn & 0x0ff00f10) == 0x0e200010) {
2685 /* Multiply with Internal Accumulate Format */
2686 rd0 = (insn >> 12) & 0xf;
2687 rd1 = insn & 0xf;
2688 acc = (insn >> 5) & 7;
2689
2690 if (acc != 0)
2691 return 1;
2692
3a554c0f
FN
2693 tmp = load_reg(s, rd0);
2694 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2695 switch ((insn >> 16) & 0xf) {
2696 case 0x0: /* MIA */
3a554c0f 2697 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2698 break;
2699 case 0x8: /* MIAPH */
3a554c0f 2700 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2701 break;
2702 case 0xc: /* MIABB */
2703 case 0xd: /* MIABT */
2704 case 0xe: /* MIATB */
2705 case 0xf: /* MIATT */
18c9b560 2706 if (insn & (1 << 16))
3a554c0f 2707 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2708 if (insn & (1 << 17))
3a554c0f
FN
2709 tcg_gen_shri_i32(tmp2, tmp2, 16);
2710 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2711 break;
2712 default:
2713 return 1;
2714 }
7d1b0095
PM
2715 tcg_temp_free_i32(tmp2);
2716 tcg_temp_free_i32(tmp);
18c9b560
AZ
2717
2718 gen_op_iwmmxt_movq_wRn_M0(acc);
2719 return 0;
2720 }
2721
2722 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2723 /* Internal Accumulator Access Format */
2724 rdhi = (insn >> 16) & 0xf;
2725 rdlo = (insn >> 12) & 0xf;
2726 acc = insn & 7;
2727
2728 if (acc != 0)
2729 return 1;
2730
2731 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2732 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2733 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2734 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2735 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2736 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2737 } else { /* MAR */
3a554c0f
FN
2738 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2739 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2740 }
2741 return 0;
2742 }
2743
2744 return 1;
2745}
2746
9ee6e8bb
PB
2747#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2748#define VFP_SREG(insn, bigbit, smallbit) \
2749 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2750#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2751 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2752 reg = (((insn) >> (bigbit)) & 0x0f) \
2753 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2754 } else { \
2755 if (insn & (1 << (smallbit))) \
2756 return 1; \
2757 reg = ((insn) >> (bigbit)) & 0x0f; \
2758 }} while (0)
2759
2760#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2761#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2762#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2763#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2764#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2765#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2766
4373f3ce 2767/* Move between integer and VFP cores. */
39d5492a 2768static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2769{
39d5492a 2770 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2771 tcg_gen_mov_i32(tmp, cpu_F0s);
2772 return tmp;
2773}
2774
39d5492a 2775static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2776{
2777 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2778 tcg_temp_free_i32(tmp);
4373f3ce
PB
2779}
2780
39d5492a 2781static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2782{
39d5492a 2783 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2784 if (shift)
2785 tcg_gen_shri_i32(var, var, shift);
86831435 2786 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2787 tcg_gen_shli_i32(tmp, var, 8);
2788 tcg_gen_or_i32(var, var, tmp);
2789 tcg_gen_shli_i32(tmp, var, 16);
2790 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2791 tcg_temp_free_i32(tmp);
ad69471c
PB
2792}
2793
39d5492a 2794static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2795{
39d5492a 2796 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2797 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2798 tcg_gen_shli_i32(tmp, var, 16);
2799 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2800 tcg_temp_free_i32(tmp);
ad69471c
PB
2801}
2802
39d5492a 2803static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2804{
39d5492a 2805 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2806 tcg_gen_andi_i32(var, var, 0xffff0000);
2807 tcg_gen_shri_i32(tmp, var, 16);
2808 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2809 tcg_temp_free_i32(tmp);
ad69471c
PB
2810}
2811
39d5492a 2812static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2813{
2814 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2815 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2816 switch (size) {
2817 case 0:
12dcc321 2818 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2819 gen_neon_dup_u8(tmp, 0);
2820 break;
2821 case 1:
12dcc321 2822 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2823 gen_neon_dup_low16(tmp);
2824 break;
2825 case 2:
12dcc321 2826 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2827 break;
2828 default: /* Avoid compiler warnings. */
2829 abort();
2830 }
2831 return tmp;
2832}
2833
04731fb5
WN
2834static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2835 uint32_t dp)
2836{
2837 uint32_t cc = extract32(insn, 20, 2);
2838
2839 if (dp) {
2840 TCGv_i64 frn, frm, dest;
2841 TCGv_i64 tmp, zero, zf, nf, vf;
2842
2843 zero = tcg_const_i64(0);
2844
2845 frn = tcg_temp_new_i64();
2846 frm = tcg_temp_new_i64();
2847 dest = tcg_temp_new_i64();
2848
2849 zf = tcg_temp_new_i64();
2850 nf = tcg_temp_new_i64();
2851 vf = tcg_temp_new_i64();
2852
2853 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2854 tcg_gen_ext_i32_i64(nf, cpu_NF);
2855 tcg_gen_ext_i32_i64(vf, cpu_VF);
2856
2857 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2858 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2859 switch (cc) {
2860 case 0: /* eq: Z */
2861 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2862 frn, frm);
2863 break;
2864 case 1: /* vs: V */
2865 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2866 frn, frm);
2867 break;
2868 case 2: /* ge: N == V -> N ^ V == 0 */
2869 tmp = tcg_temp_new_i64();
2870 tcg_gen_xor_i64(tmp, vf, nf);
2871 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2872 frn, frm);
2873 tcg_temp_free_i64(tmp);
2874 break;
2875 case 3: /* gt: !Z && N == V */
2876 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2877 frn, frm);
2878 tmp = tcg_temp_new_i64();
2879 tcg_gen_xor_i64(tmp, vf, nf);
2880 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2881 dest, frm);
2882 tcg_temp_free_i64(tmp);
2883 break;
2884 }
2885 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2886 tcg_temp_free_i64(frn);
2887 tcg_temp_free_i64(frm);
2888 tcg_temp_free_i64(dest);
2889
2890 tcg_temp_free_i64(zf);
2891 tcg_temp_free_i64(nf);
2892 tcg_temp_free_i64(vf);
2893
2894 tcg_temp_free_i64(zero);
2895 } else {
2896 TCGv_i32 frn, frm, dest;
2897 TCGv_i32 tmp, zero;
2898
2899 zero = tcg_const_i32(0);
2900
2901 frn = tcg_temp_new_i32();
2902 frm = tcg_temp_new_i32();
2903 dest = tcg_temp_new_i32();
2904 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2905 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2906 switch (cc) {
2907 case 0: /* eq: Z */
2908 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2909 frn, frm);
2910 break;
2911 case 1: /* vs: V */
2912 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2913 frn, frm);
2914 break;
2915 case 2: /* ge: N == V -> N ^ V == 0 */
2916 tmp = tcg_temp_new_i32();
2917 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2918 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2919 frn, frm);
2920 tcg_temp_free_i32(tmp);
2921 break;
2922 case 3: /* gt: !Z && N == V */
2923 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2924 frn, frm);
2925 tmp = tcg_temp_new_i32();
2926 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2927 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2928 dest, frm);
2929 tcg_temp_free_i32(tmp);
2930 break;
2931 }
2932 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2933 tcg_temp_free_i32(frn);
2934 tcg_temp_free_i32(frm);
2935 tcg_temp_free_i32(dest);
2936
2937 tcg_temp_free_i32(zero);
2938 }
2939
2940 return 0;
2941}
2942
40cfacdd
WN
2943static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2944 uint32_t rm, uint32_t dp)
2945{
2946 uint32_t vmin = extract32(insn, 6, 1);
2947 TCGv_ptr fpst = get_fpstatus_ptr(0);
2948
2949 if (dp) {
2950 TCGv_i64 frn, frm, dest;
2951
2952 frn = tcg_temp_new_i64();
2953 frm = tcg_temp_new_i64();
2954 dest = tcg_temp_new_i64();
2955
2956 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2957 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2958 if (vmin) {
f71a2ae5 2959 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2960 } else {
f71a2ae5 2961 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2962 }
2963 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2964 tcg_temp_free_i64(frn);
2965 tcg_temp_free_i64(frm);
2966 tcg_temp_free_i64(dest);
2967 } else {
2968 TCGv_i32 frn, frm, dest;
2969
2970 frn = tcg_temp_new_i32();
2971 frm = tcg_temp_new_i32();
2972 dest = tcg_temp_new_i32();
2973
2974 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2975 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2976 if (vmin) {
f71a2ae5 2977 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2978 } else {
f71a2ae5 2979 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2980 }
2981 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2982 tcg_temp_free_i32(frn);
2983 tcg_temp_free_i32(frm);
2984 tcg_temp_free_i32(dest);
2985 }
2986
2987 tcg_temp_free_ptr(fpst);
2988 return 0;
2989}
2990
7655f39b
WN
2991static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2992 int rounding)
2993{
2994 TCGv_ptr fpst = get_fpstatus_ptr(0);
2995 TCGv_i32 tcg_rmode;
2996
2997 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2998 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2999
3000 if (dp) {
3001 TCGv_i64 tcg_op;
3002 TCGv_i64 tcg_res;
3003 tcg_op = tcg_temp_new_i64();
3004 tcg_res = tcg_temp_new_i64();
3005 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3006 gen_helper_rintd(tcg_res, tcg_op, fpst);
3007 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3008 tcg_temp_free_i64(tcg_op);
3009 tcg_temp_free_i64(tcg_res);
3010 } else {
3011 TCGv_i32 tcg_op;
3012 TCGv_i32 tcg_res;
3013 tcg_op = tcg_temp_new_i32();
3014 tcg_res = tcg_temp_new_i32();
3015 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3016 gen_helper_rints(tcg_res, tcg_op, fpst);
3017 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3018 tcg_temp_free_i32(tcg_op);
3019 tcg_temp_free_i32(tcg_res);
3020 }
3021
3022 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3023 tcg_temp_free_i32(tcg_rmode);
3024
3025 tcg_temp_free_ptr(fpst);
3026 return 0;
3027}
3028
c9975a83
WN
3029static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3030 int rounding)
3031{
3032 bool is_signed = extract32(insn, 7, 1);
3033 TCGv_ptr fpst = get_fpstatus_ptr(0);
3034 TCGv_i32 tcg_rmode, tcg_shift;
3035
3036 tcg_shift = tcg_const_i32(0);
3037
3038 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3039 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3040
3041 if (dp) {
3042 TCGv_i64 tcg_double, tcg_res;
3043 TCGv_i32 tcg_tmp;
3044 /* Rd is encoded as a single precision register even when the source
3045 * is double precision.
3046 */
3047 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3048 tcg_double = tcg_temp_new_i64();
3049 tcg_res = tcg_temp_new_i64();
3050 tcg_tmp = tcg_temp_new_i32();
3051 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3052 if (is_signed) {
3053 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3054 } else {
3055 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3056 }
ecc7b3aa 3057 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3058 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3059 tcg_temp_free_i32(tcg_tmp);
3060 tcg_temp_free_i64(tcg_res);
3061 tcg_temp_free_i64(tcg_double);
3062 } else {
3063 TCGv_i32 tcg_single, tcg_res;
3064 tcg_single = tcg_temp_new_i32();
3065 tcg_res = tcg_temp_new_i32();
3066 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3067 if (is_signed) {
3068 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3069 } else {
3070 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3071 }
3072 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3073 tcg_temp_free_i32(tcg_res);
3074 tcg_temp_free_i32(tcg_single);
3075 }
3076
3077 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3078 tcg_temp_free_i32(tcg_rmode);
3079
3080 tcg_temp_free_i32(tcg_shift);
3081
3082 tcg_temp_free_ptr(fpst);
3083
3084 return 0;
3085}
7655f39b
WN
3086
3087/* Table for converting the most common AArch32 encoding of
3088 * rounding mode to arm_fprounding order (which matches the
3089 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3090 */
3091static const uint8_t fp_decode_rm[] = {
3092 FPROUNDING_TIEAWAY,
3093 FPROUNDING_TIEEVEN,
3094 FPROUNDING_POSINF,
3095 FPROUNDING_NEGINF,
3096};
3097
7dcc1f89 3098static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3099{
3100 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3101
d614a513 3102 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3103 return 1;
3104 }
3105
3106 if (dp) {
3107 VFP_DREG_D(rd, insn);
3108 VFP_DREG_N(rn, insn);
3109 VFP_DREG_M(rm, insn);
3110 } else {
3111 rd = VFP_SREG_D(insn);
3112 rn = VFP_SREG_N(insn);
3113 rm = VFP_SREG_M(insn);
3114 }
3115
3116 if ((insn & 0x0f800e50) == 0x0e000a00) {
3117 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3118 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3119 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3120 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3121 /* VRINTA, VRINTN, VRINTP, VRINTM */
3122 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3123 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3124 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3125 /* VCVTA, VCVTN, VCVTP, VCVTM */
3126 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3127 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3128 }
3129 return 1;
3130}
3131
a1c7273b 3132/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3133 (ie. an undefined instruction). */
7dcc1f89 3134static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3135{
3136 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3137 int dp, veclen;
39d5492a
PM
3138 TCGv_i32 addr;
3139 TCGv_i32 tmp;
3140 TCGv_i32 tmp2;
b7bcbe95 3141
d614a513 3142 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3143 return 1;
d614a513 3144 }
40f137e1 3145
2c7ffc41
PM
3146 /* FIXME: this access check should not take precedence over UNDEF
3147 * for invalid encodings; we will generate incorrect syndrome information
3148 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3149 */
9dbbc748 3150 if (s->fp_excp_el) {
2c7ffc41 3151 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3152 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3153 return 0;
3154 }
3155
5df8bac1 3156 if (!s->vfp_enabled) {
9ee6e8bb 3157 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3158 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3159 return 1;
3160 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3161 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3162 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3163 return 1;
a50c0f51 3164 }
40f137e1 3165 }
6a57f3eb
WN
3166
3167 if (extract32(insn, 28, 4) == 0xf) {
3168 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3169 * only used in v8 and above.
3170 */
7dcc1f89 3171 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3172 }
3173
b7bcbe95
FB
3174 dp = ((insn & 0xf00) == 0xb00);
3175 switch ((insn >> 24) & 0xf) {
3176 case 0xe:
3177 if (insn & (1 << 4)) {
3178 /* single register transfer */
b7bcbe95
FB
3179 rd = (insn >> 12) & 0xf;
3180 if (dp) {
9ee6e8bb
PB
3181 int size;
3182 int pass;
3183
3184 VFP_DREG_N(rn, insn);
3185 if (insn & 0xf)
b7bcbe95 3186 return 1;
9ee6e8bb 3187 if (insn & 0x00c00060
d614a513 3188 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3189 return 1;
d614a513 3190 }
9ee6e8bb
PB
3191
3192 pass = (insn >> 21) & 1;
3193 if (insn & (1 << 22)) {
3194 size = 0;
3195 offset = ((insn >> 5) & 3) * 8;
3196 } else if (insn & (1 << 5)) {
3197 size = 1;
3198 offset = (insn & (1 << 6)) ? 16 : 0;
3199 } else {
3200 size = 2;
3201 offset = 0;
3202 }
18c9b560 3203 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3204 /* vfp->arm */
ad69471c 3205 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3206 switch (size) {
3207 case 0:
9ee6e8bb 3208 if (offset)
ad69471c 3209 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3210 if (insn & (1 << 23))
ad69471c 3211 gen_uxtb(tmp);
9ee6e8bb 3212 else
ad69471c 3213 gen_sxtb(tmp);
9ee6e8bb
PB
3214 break;
3215 case 1:
9ee6e8bb
PB
3216 if (insn & (1 << 23)) {
3217 if (offset) {
ad69471c 3218 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3219 } else {
ad69471c 3220 gen_uxth(tmp);
9ee6e8bb
PB
3221 }
3222 } else {
3223 if (offset) {
ad69471c 3224 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3225 } else {
ad69471c 3226 gen_sxth(tmp);
9ee6e8bb
PB
3227 }
3228 }
3229 break;
3230 case 2:
9ee6e8bb
PB
3231 break;
3232 }
ad69471c 3233 store_reg(s, rd, tmp);
b7bcbe95
FB
3234 } else {
3235 /* arm->vfp */
ad69471c 3236 tmp = load_reg(s, rd);
9ee6e8bb
PB
3237 if (insn & (1 << 23)) {
3238 /* VDUP */
3239 if (size == 0) {
ad69471c 3240 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3241 } else if (size == 1) {
ad69471c 3242 gen_neon_dup_low16(tmp);
9ee6e8bb 3243 }
cbbccffc 3244 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3245 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3246 tcg_gen_mov_i32(tmp2, tmp);
3247 neon_store_reg(rn, n, tmp2);
3248 }
3249 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3250 } else {
3251 /* VMOV */
3252 switch (size) {
3253 case 0:
ad69471c 3254 tmp2 = neon_load_reg(rn, pass);
d593c48e 3255 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3256 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3257 break;
3258 case 1:
ad69471c 3259 tmp2 = neon_load_reg(rn, pass);
d593c48e 3260 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3261 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3262 break;
3263 case 2:
9ee6e8bb
PB
3264 break;
3265 }
ad69471c 3266 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3267 }
b7bcbe95 3268 }
9ee6e8bb
PB
3269 } else { /* !dp */
3270 if ((insn & 0x6f) != 0x00)
3271 return 1;
3272 rn = VFP_SREG_N(insn);
18c9b560 3273 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3274 /* vfp->arm */
3275 if (insn & (1 << 21)) {
3276 /* system register */
40f137e1 3277 rn >>= 1;
9ee6e8bb 3278
b7bcbe95 3279 switch (rn) {
40f137e1 3280 case ARM_VFP_FPSID:
4373f3ce 3281 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3282 VFP3 restricts all id registers to privileged
3283 accesses. */
3284 if (IS_USER(s)
d614a513 3285 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3286 return 1;
d614a513 3287 }
4373f3ce 3288 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3289 break;
40f137e1 3290 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3291 if (IS_USER(s))
3292 return 1;
4373f3ce 3293 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3294 break;
40f137e1
PB
3295 case ARM_VFP_FPINST:
3296 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3297 /* Not present in VFP3. */
3298 if (IS_USER(s)
d614a513 3299 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3300 return 1;
d614a513 3301 }
4373f3ce 3302 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3303 break;
40f137e1 3304 case ARM_VFP_FPSCR:
601d70b9 3305 if (rd == 15) {
4373f3ce
PB
3306 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3307 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3308 } else {
7d1b0095 3309 tmp = tcg_temp_new_i32();
4373f3ce
PB
3310 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3311 }
b7bcbe95 3312 break;
a50c0f51 3313 case ARM_VFP_MVFR2:
d614a513 3314 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3315 return 1;
3316 }
3317 /* fall through */
9ee6e8bb
PB
3318 case ARM_VFP_MVFR0:
3319 case ARM_VFP_MVFR1:
3320 if (IS_USER(s)
d614a513 3321 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3322 return 1;
d614a513 3323 }
4373f3ce 3324 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3325 break;
b7bcbe95
FB
3326 default:
3327 return 1;
3328 }
3329 } else {
3330 gen_mov_F0_vreg(0, rn);
4373f3ce 3331 tmp = gen_vfp_mrs();
b7bcbe95
FB
3332 }
3333 if (rd == 15) {
b5ff1b31 3334 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3335 gen_set_nzcv(tmp);
7d1b0095 3336 tcg_temp_free_i32(tmp);
4373f3ce
PB
3337 } else {
3338 store_reg(s, rd, tmp);
3339 }
b7bcbe95
FB
3340 } else {
3341 /* arm->vfp */
b7bcbe95 3342 if (insn & (1 << 21)) {
40f137e1 3343 rn >>= 1;
b7bcbe95
FB
3344 /* system register */
3345 switch (rn) {
40f137e1 3346 case ARM_VFP_FPSID:
9ee6e8bb
PB
3347 case ARM_VFP_MVFR0:
3348 case ARM_VFP_MVFR1:
b7bcbe95
FB
3349 /* Writes are ignored. */
3350 break;
40f137e1 3351 case ARM_VFP_FPSCR:
e4c1cfa5 3352 tmp = load_reg(s, rd);
4373f3ce 3353 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3354 tcg_temp_free_i32(tmp);
b5ff1b31 3355 gen_lookup_tb(s);
b7bcbe95 3356 break;
40f137e1 3357 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3358 if (IS_USER(s))
3359 return 1;
71b3c3de
JR
3360 /* TODO: VFP subarchitecture support.
3361 * For now, keep the EN bit only */
e4c1cfa5 3362 tmp = load_reg(s, rd);
71b3c3de 3363 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3364 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3365 gen_lookup_tb(s);
3366 break;
3367 case ARM_VFP_FPINST:
3368 case ARM_VFP_FPINST2:
23adb861
PM
3369 if (IS_USER(s)) {
3370 return 1;
3371 }
e4c1cfa5 3372 tmp = load_reg(s, rd);
4373f3ce 3373 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3374 break;
b7bcbe95
FB
3375 default:
3376 return 1;
3377 }
3378 } else {
e4c1cfa5 3379 tmp = load_reg(s, rd);
4373f3ce 3380 gen_vfp_msr(tmp);
b7bcbe95
FB
3381 gen_mov_vreg_F0(0, rn);
3382 }
3383 }
3384 }
3385 } else {
3386 /* data processing */
3387 /* The opcode is in bits 23, 21, 20 and 6. */
3388 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3389 if (dp) {
3390 if (op == 15) {
3391 /* rn is opcode */
3392 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3393 } else {
3394 /* rn is register number */
9ee6e8bb 3395 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3396 }
3397
239c20c7
WN
3398 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3399 ((rn & 0x1e) == 0x6))) {
3400 /* Integer or single/half precision destination. */
9ee6e8bb 3401 rd = VFP_SREG_D(insn);
b7bcbe95 3402 } else {
9ee6e8bb 3403 VFP_DREG_D(rd, insn);
b7bcbe95 3404 }
04595bf6 3405 if (op == 15 &&
239c20c7
WN
3406 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3407 ((rn & 0x1e) == 0x4))) {
3408 /* VCVT from int or half precision is always from S reg
3409 * regardless of dp bit. VCVT with immediate frac_bits
3410 * has same format as SREG_M.
04595bf6
PM
3411 */
3412 rm = VFP_SREG_M(insn);
b7bcbe95 3413 } else {
9ee6e8bb 3414 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3415 }
3416 } else {
9ee6e8bb 3417 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3418 if (op == 15 && rn == 15) {
3419 /* Double precision destination. */
9ee6e8bb
PB
3420 VFP_DREG_D(rd, insn);
3421 } else {
3422 rd = VFP_SREG_D(insn);
3423 }
04595bf6
PM
3424 /* NB that we implicitly rely on the encoding for the frac_bits
3425 * in VCVT of fixed to float being the same as that of an SREG_M
3426 */
9ee6e8bb 3427 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3428 }
3429
69d1fc22 3430 veclen = s->vec_len;
b7bcbe95
FB
3431 if (op == 15 && rn > 3)
3432 veclen = 0;
3433
3434 /* Shut up compiler warnings. */
3435 delta_m = 0;
3436 delta_d = 0;
3437 bank_mask = 0;
3b46e624 3438
b7bcbe95
FB
3439 if (veclen > 0) {
3440 if (dp)
3441 bank_mask = 0xc;
3442 else
3443 bank_mask = 0x18;
3444
3445 /* Figure out what type of vector operation this is. */
3446 if ((rd & bank_mask) == 0) {
3447 /* scalar */
3448 veclen = 0;
3449 } else {
3450 if (dp)
69d1fc22 3451 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3452 else
69d1fc22 3453 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3454
3455 if ((rm & bank_mask) == 0) {
3456 /* mixed scalar/vector */
3457 delta_m = 0;
3458 } else {
3459 /* vector */
3460 delta_m = delta_d;
3461 }
3462 }
3463 }
3464
3465 /* Load the initial operands. */
3466 if (op == 15) {
3467 switch (rn) {
3468 case 16:
3469 case 17:
3470 /* Integer source */
3471 gen_mov_F0_vreg(0, rm);
3472 break;
3473 case 8:
3474 case 9:
3475 /* Compare */
3476 gen_mov_F0_vreg(dp, rd);
3477 gen_mov_F1_vreg(dp, rm);
3478 break;
3479 case 10:
3480 case 11:
3481 /* Compare with zero */
3482 gen_mov_F0_vreg(dp, rd);
3483 gen_vfp_F1_ld0(dp);
3484 break;
9ee6e8bb
PB
3485 case 20:
3486 case 21:
3487 case 22:
3488 case 23:
644ad806
PB
3489 case 28:
3490 case 29:
3491 case 30:
3492 case 31:
9ee6e8bb
PB
3493 /* Source and destination the same. */
3494 gen_mov_F0_vreg(dp, rd);
3495 break;
6e0c0ed1
PM
3496 case 4:
3497 case 5:
3498 case 6:
3499 case 7:
239c20c7
WN
3500 /* VCVTB, VCVTT: only present with the halfprec extension
3501 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3502 * (we choose to UNDEF)
6e0c0ed1 3503 */
d614a513
PM
3504 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3505 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3506 return 1;
3507 }
239c20c7
WN
3508 if (!extract32(rn, 1, 1)) {
3509 /* Half precision source. */
3510 gen_mov_F0_vreg(0, rm);
3511 break;
3512 }
6e0c0ed1 3513 /* Otherwise fall through */
b7bcbe95
FB
3514 default:
3515 /* One source operand. */
3516 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3517 break;
b7bcbe95
FB
3518 }
3519 } else {
3520 /* Two source operands. */
3521 gen_mov_F0_vreg(dp, rn);
3522 gen_mov_F1_vreg(dp, rm);
3523 }
3524
3525 for (;;) {
3526 /* Perform the calculation. */
3527 switch (op) {
605a6aed
PM
3528 case 0: /* VMLA: fd + (fn * fm) */
3529 /* Note that order of inputs to the add matters for NaNs */
3530 gen_vfp_F1_mul(dp);
3531 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3532 gen_vfp_add(dp);
3533 break;
605a6aed 3534 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3535 gen_vfp_mul(dp);
605a6aed
PM
3536 gen_vfp_F1_neg(dp);
3537 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3538 gen_vfp_add(dp);
3539 break;
605a6aed
PM
3540 case 2: /* VNMLS: -fd + (fn * fm) */
3541 /* Note that it isn't valid to replace (-A + B) with (B - A)
3542 * or similar plausible looking simplifications
3543 * because this will give wrong results for NaNs.
3544 */
3545 gen_vfp_F1_mul(dp);
3546 gen_mov_F0_vreg(dp, rd);
3547 gen_vfp_neg(dp);
3548 gen_vfp_add(dp);
b7bcbe95 3549 break;
605a6aed 3550 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3551 gen_vfp_mul(dp);
605a6aed
PM
3552 gen_vfp_F1_neg(dp);
3553 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3554 gen_vfp_neg(dp);
605a6aed 3555 gen_vfp_add(dp);
b7bcbe95
FB
3556 break;
3557 case 4: /* mul: fn * fm */
3558 gen_vfp_mul(dp);
3559 break;
3560 case 5: /* nmul: -(fn * fm) */
3561 gen_vfp_mul(dp);
3562 gen_vfp_neg(dp);
3563 break;
3564 case 6: /* add: fn + fm */
3565 gen_vfp_add(dp);
3566 break;
3567 case 7: /* sub: fn - fm */
3568 gen_vfp_sub(dp);
3569 break;
3570 case 8: /* div: fn / fm */
3571 gen_vfp_div(dp);
3572 break;
da97f52c
PM
3573 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3574 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3575 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3576 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3577 /* These are fused multiply-add, and must be done as one
3578 * floating point operation with no rounding between the
3579 * multiplication and addition steps.
3580 * NB that doing the negations here as separate steps is
3581 * correct : an input NaN should come out with its sign bit
3582 * flipped if it is a negated-input.
3583 */
d614a513 3584 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3585 return 1;
3586 }
3587 if (dp) {
3588 TCGv_ptr fpst;
3589 TCGv_i64 frd;
3590 if (op & 1) {
3591 /* VFNMS, VFMS */
3592 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3593 }
3594 frd = tcg_temp_new_i64();
3595 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3596 if (op & 2) {
3597 /* VFNMA, VFNMS */
3598 gen_helper_vfp_negd(frd, frd);
3599 }
3600 fpst = get_fpstatus_ptr(0);
3601 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3602 cpu_F1d, frd, fpst);
3603 tcg_temp_free_ptr(fpst);
3604 tcg_temp_free_i64(frd);
3605 } else {
3606 TCGv_ptr fpst;
3607 TCGv_i32 frd;
3608 if (op & 1) {
3609 /* VFNMS, VFMS */
3610 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3611 }
3612 frd = tcg_temp_new_i32();
3613 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3614 if (op & 2) {
3615 gen_helper_vfp_negs(frd, frd);
3616 }
3617 fpst = get_fpstatus_ptr(0);
3618 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3619 cpu_F1s, frd, fpst);
3620 tcg_temp_free_ptr(fpst);
3621 tcg_temp_free_i32(frd);
3622 }
3623 break;
9ee6e8bb 3624 case 14: /* fconst */
d614a513
PM
3625 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3626 return 1;
3627 }
9ee6e8bb
PB
3628
3629 n = (insn << 12) & 0x80000000;
3630 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3631 if (dp) {
3632 if (i & 0x40)
3633 i |= 0x3f80;
3634 else
3635 i |= 0x4000;
3636 n |= i << 16;
4373f3ce 3637 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3638 } else {
3639 if (i & 0x40)
3640 i |= 0x780;
3641 else
3642 i |= 0x800;
3643 n |= i << 19;
5b340b51 3644 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3645 }
9ee6e8bb 3646 break;
b7bcbe95
FB
3647 case 15: /* extension space */
3648 switch (rn) {
3649 case 0: /* cpy */
3650 /* no-op */
3651 break;
3652 case 1: /* abs */
3653 gen_vfp_abs(dp);
3654 break;
3655 case 2: /* neg */
3656 gen_vfp_neg(dp);
3657 break;
3658 case 3: /* sqrt */
3659 gen_vfp_sqrt(dp);
3660 break;
239c20c7 3661 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3662 tmp = gen_vfp_mrs();
3663 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3664 if (dp) {
3665 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3666 cpu_env);
3667 } else {
3668 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3669 cpu_env);
3670 }
7d1b0095 3671 tcg_temp_free_i32(tmp);
60011498 3672 break;
239c20c7 3673 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3674 tmp = gen_vfp_mrs();
3675 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3676 if (dp) {
3677 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3678 cpu_env);
3679 } else {
3680 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3681 cpu_env);
3682 }
7d1b0095 3683 tcg_temp_free_i32(tmp);
60011498 3684 break;
239c20c7 3685 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3686 tmp = tcg_temp_new_i32();
239c20c7
WN
3687 if (dp) {
3688 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3689 cpu_env);
3690 } else {
3691 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3692 cpu_env);
3693 }
60011498
PB
3694 gen_mov_F0_vreg(0, rd);
3695 tmp2 = gen_vfp_mrs();
3696 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3697 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3698 tcg_temp_free_i32(tmp2);
60011498
PB
3699 gen_vfp_msr(tmp);
3700 break;
239c20c7 3701 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3702 tmp = tcg_temp_new_i32();
239c20c7
WN
3703 if (dp) {
3704 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3705 cpu_env);
3706 } else {
3707 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3708 cpu_env);
3709 }
60011498
PB
3710 tcg_gen_shli_i32(tmp, tmp, 16);
3711 gen_mov_F0_vreg(0, rd);
3712 tmp2 = gen_vfp_mrs();
3713 tcg_gen_ext16u_i32(tmp2, tmp2);
3714 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3715 tcg_temp_free_i32(tmp2);
60011498
PB
3716 gen_vfp_msr(tmp);
3717 break;
b7bcbe95
FB
3718 case 8: /* cmp */
3719 gen_vfp_cmp(dp);
3720 break;
3721 case 9: /* cmpe */
3722 gen_vfp_cmpe(dp);
3723 break;
3724 case 10: /* cmpz */
3725 gen_vfp_cmp(dp);
3726 break;
3727 case 11: /* cmpez */
3728 gen_vfp_F1_ld0(dp);
3729 gen_vfp_cmpe(dp);
3730 break;
664c6733
WN
3731 case 12: /* vrintr */
3732 {
3733 TCGv_ptr fpst = get_fpstatus_ptr(0);
3734 if (dp) {
3735 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3736 } else {
3737 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3738 }
3739 tcg_temp_free_ptr(fpst);
3740 break;
3741 }
a290c62a
WN
3742 case 13: /* vrintz */
3743 {
3744 TCGv_ptr fpst = get_fpstatus_ptr(0);
3745 TCGv_i32 tcg_rmode;
3746 tcg_rmode = tcg_const_i32(float_round_to_zero);
3747 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3748 if (dp) {
3749 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3750 } else {
3751 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3752 }
3753 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3754 tcg_temp_free_i32(tcg_rmode);
3755 tcg_temp_free_ptr(fpst);
3756 break;
3757 }
4e82bc01
WN
3758 case 14: /* vrintx */
3759 {
3760 TCGv_ptr fpst = get_fpstatus_ptr(0);
3761 if (dp) {
3762 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3763 } else {
3764 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3765 }
3766 tcg_temp_free_ptr(fpst);
3767 break;
3768 }
b7bcbe95
FB
3769 case 15: /* single<->double conversion */
3770 if (dp)
4373f3ce 3771 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3772 else
4373f3ce 3773 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3774 break;
3775 case 16: /* fuito */
5500b06c 3776 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3777 break;
3778 case 17: /* fsito */
5500b06c 3779 gen_vfp_sito(dp, 0);
b7bcbe95 3780 break;
9ee6e8bb 3781 case 20: /* fshto */
d614a513
PM
3782 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3783 return 1;
3784 }
5500b06c 3785 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3786 break;
3787 case 21: /* fslto */
d614a513
PM
3788 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3789 return 1;
3790 }
5500b06c 3791 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3792 break;
3793 case 22: /* fuhto */
d614a513
PM
3794 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3795 return 1;
3796 }
5500b06c 3797 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3798 break;
3799 case 23: /* fulto */
d614a513
PM
3800 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3801 return 1;
3802 }
5500b06c 3803 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3804 break;
b7bcbe95 3805 case 24: /* ftoui */
5500b06c 3806 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3807 break;
3808 case 25: /* ftouiz */
5500b06c 3809 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3810 break;
3811 case 26: /* ftosi */
5500b06c 3812 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3813 break;
3814 case 27: /* ftosiz */
5500b06c 3815 gen_vfp_tosiz(dp, 0);
b7bcbe95 3816 break;
9ee6e8bb 3817 case 28: /* ftosh */
d614a513
PM
3818 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3819 return 1;
3820 }
5500b06c 3821 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3822 break;
3823 case 29: /* ftosl */
d614a513
PM
3824 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3825 return 1;
3826 }
5500b06c 3827 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3828 break;
3829 case 30: /* ftouh */
d614a513
PM
3830 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3831 return 1;
3832 }
5500b06c 3833 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3834 break;
3835 case 31: /* ftoul */
d614a513
PM
3836 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3837 return 1;
3838 }
5500b06c 3839 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3840 break;
b7bcbe95 3841 default: /* undefined */
b7bcbe95
FB
3842 return 1;
3843 }
3844 break;
3845 default: /* undefined */
b7bcbe95
FB
3846 return 1;
3847 }
3848
3849 /* Write back the result. */
239c20c7
WN
3850 if (op == 15 && (rn >= 8 && rn <= 11)) {
3851 /* Comparison, do nothing. */
3852 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3853 (rn & 0x1e) == 0x6)) {
3854 /* VCVT double to int: always integer result.
3855 * VCVT double to half precision is always a single
3856 * precision result.
3857 */
b7bcbe95 3858 gen_mov_vreg_F0(0, rd);
239c20c7 3859 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3860 /* conversion */
3861 gen_mov_vreg_F0(!dp, rd);
239c20c7 3862 } else {
b7bcbe95 3863 gen_mov_vreg_F0(dp, rd);
239c20c7 3864 }
b7bcbe95
FB
3865
3866 /* break out of the loop if we have finished */
3867 if (veclen == 0)
3868 break;
3869
3870 if (op == 15 && delta_m == 0) {
3871 /* single source one-many */
3872 while (veclen--) {
3873 rd = ((rd + delta_d) & (bank_mask - 1))
3874 | (rd & bank_mask);
3875 gen_mov_vreg_F0(dp, rd);
3876 }
3877 break;
3878 }
3879 /* Setup the next operands. */
3880 veclen--;
3881 rd = ((rd + delta_d) & (bank_mask - 1))
3882 | (rd & bank_mask);
3883
3884 if (op == 15) {
3885 /* One source operand. */
3886 rm = ((rm + delta_m) & (bank_mask - 1))
3887 | (rm & bank_mask);
3888 gen_mov_F0_vreg(dp, rm);
3889 } else {
3890 /* Two source operands. */
3891 rn = ((rn + delta_d) & (bank_mask - 1))
3892 | (rn & bank_mask);
3893 gen_mov_F0_vreg(dp, rn);
3894 if (delta_m) {
3895 rm = ((rm + delta_m) & (bank_mask - 1))
3896 | (rm & bank_mask);
3897 gen_mov_F1_vreg(dp, rm);
3898 }
3899 }
3900 }
3901 }
3902 break;
3903 case 0xc:
3904 case 0xd:
8387da81 3905 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3906 /* two-register transfer */
3907 rn = (insn >> 16) & 0xf;
3908 rd = (insn >> 12) & 0xf;
3909 if (dp) {
9ee6e8bb
PB
3910 VFP_DREG_M(rm, insn);
3911 } else {
3912 rm = VFP_SREG_M(insn);
3913 }
b7bcbe95 3914
18c9b560 3915 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3916 /* vfp->arm */
3917 if (dp) {
4373f3ce
PB
3918 gen_mov_F0_vreg(0, rm * 2);
3919 tmp = gen_vfp_mrs();
3920 store_reg(s, rd, tmp);
3921 gen_mov_F0_vreg(0, rm * 2 + 1);
3922 tmp = gen_vfp_mrs();
3923 store_reg(s, rn, tmp);
b7bcbe95
FB
3924 } else {
3925 gen_mov_F0_vreg(0, rm);
4373f3ce 3926 tmp = gen_vfp_mrs();
8387da81 3927 store_reg(s, rd, tmp);
b7bcbe95 3928 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3929 tmp = gen_vfp_mrs();
8387da81 3930 store_reg(s, rn, tmp);
b7bcbe95
FB
3931 }
3932 } else {
3933 /* arm->vfp */
3934 if (dp) {
4373f3ce
PB
3935 tmp = load_reg(s, rd);
3936 gen_vfp_msr(tmp);
3937 gen_mov_vreg_F0(0, rm * 2);
3938 tmp = load_reg(s, rn);
3939 gen_vfp_msr(tmp);
3940 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3941 } else {
8387da81 3942 tmp = load_reg(s, rd);
4373f3ce 3943 gen_vfp_msr(tmp);
b7bcbe95 3944 gen_mov_vreg_F0(0, rm);
8387da81 3945 tmp = load_reg(s, rn);
4373f3ce 3946 gen_vfp_msr(tmp);
b7bcbe95
FB
3947 gen_mov_vreg_F0(0, rm + 1);
3948 }
3949 }
3950 } else {
3951 /* Load/store */
3952 rn = (insn >> 16) & 0xf;
3953 if (dp)
9ee6e8bb 3954 VFP_DREG_D(rd, insn);
b7bcbe95 3955 else
9ee6e8bb 3956 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3957 if ((insn & 0x01200000) == 0x01000000) {
3958 /* Single load/store */
3959 offset = (insn & 0xff) << 2;
3960 if ((insn & (1 << 23)) == 0)
3961 offset = -offset;
934814f1
PM
3962 if (s->thumb && rn == 15) {
3963 /* This is actually UNPREDICTABLE */
3964 addr = tcg_temp_new_i32();
3965 tcg_gen_movi_i32(addr, s->pc & ~2);
3966 } else {
3967 addr = load_reg(s, rn);
3968 }
312eea9f 3969 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3970 if (insn & (1 << 20)) {
312eea9f 3971 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3972 gen_mov_vreg_F0(dp, rd);
3973 } else {
3974 gen_mov_F0_vreg(dp, rd);
312eea9f 3975 gen_vfp_st(s, dp, addr);
b7bcbe95 3976 }
7d1b0095 3977 tcg_temp_free_i32(addr);
b7bcbe95
FB
3978 } else {
3979 /* load/store multiple */
934814f1 3980 int w = insn & (1 << 21);
b7bcbe95
FB
3981 if (dp)
3982 n = (insn >> 1) & 0x7f;
3983 else
3984 n = insn & 0xff;
3985
934814f1
PM
3986 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3987 /* P == U , W == 1 => UNDEF */
3988 return 1;
3989 }
3990 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3991 /* UNPREDICTABLE cases for bad immediates: we choose to
3992 * UNDEF to avoid generating huge numbers of TCG ops
3993 */
3994 return 1;
3995 }
3996 if (rn == 15 && w) {
3997 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3998 return 1;
3999 }
4000
4001 if (s->thumb && rn == 15) {
4002 /* This is actually UNPREDICTABLE */
4003 addr = tcg_temp_new_i32();
4004 tcg_gen_movi_i32(addr, s->pc & ~2);
4005 } else {
4006 addr = load_reg(s, rn);
4007 }
b7bcbe95 4008 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4009 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4010
4011 if (dp)
4012 offset = 8;
4013 else
4014 offset = 4;
4015 for (i = 0; i < n; i++) {
18c9b560 4016 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4017 /* load */
312eea9f 4018 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4019 gen_mov_vreg_F0(dp, rd + i);
4020 } else {
4021 /* store */
4022 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4023 gen_vfp_st(s, dp, addr);
b7bcbe95 4024 }
312eea9f 4025 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4026 }
934814f1 4027 if (w) {
b7bcbe95
FB
4028 /* writeback */
4029 if (insn & (1 << 24))
4030 offset = -offset * n;
4031 else if (dp && (insn & 1))
4032 offset = 4;
4033 else
4034 offset = 0;
4035
4036 if (offset != 0)
312eea9f
FN
4037 tcg_gen_addi_i32(addr, addr, offset);
4038 store_reg(s, rn, addr);
4039 } else {
7d1b0095 4040 tcg_temp_free_i32(addr);
b7bcbe95
FB
4041 }
4042 }
4043 }
4044 break;
4045 default:
4046 /* Should never happen. */
4047 return 1;
4048 }
4049 return 0;
4050}
4051
0a2461fa 4052static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 4053{
6e256c93
FB
4054 TranslationBlock *tb;
4055
4056 tb = s->tb;
4057 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 4058 tcg_gen_goto_tb(n);
eaed129d 4059 gen_set_pc_im(s, dest);
8cfd0495 4060 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 4061 } else {
eaed129d 4062 gen_set_pc_im(s, dest);
57fec1fe 4063 tcg_gen_exit_tb(0);
6e256c93 4064 }
c53be334
FB
4065}
4066
8aaca4c0
FB
4067static inline void gen_jmp (DisasContext *s, uint32_t dest)
4068{
50225ad0 4069 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 4070 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4071 if (s->thumb)
d9ba4830
PB
4072 dest |= 1;
4073 gen_bx_im(s, dest);
8aaca4c0 4074 } else {
6e256c93 4075 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4076 s->is_jmp = DISAS_TB_JUMP;
4077 }
4078}
4079
39d5492a 4080static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4081{
ee097184 4082 if (x)
d9ba4830 4083 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4084 else
d9ba4830 4085 gen_sxth(t0);
ee097184 4086 if (y)
d9ba4830 4087 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4088 else
d9ba4830
PB
4089 gen_sxth(t1);
4090 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4091}
4092
4093/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4094static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4095{
b5ff1b31
FB
4096 uint32_t mask;
4097
4098 mask = 0;
4099 if (flags & (1 << 0))
4100 mask |= 0xff;
4101 if (flags & (1 << 1))
4102 mask |= 0xff00;
4103 if (flags & (1 << 2))
4104 mask |= 0xff0000;
4105 if (flags & (1 << 3))
4106 mask |= 0xff000000;
9ee6e8bb 4107
2ae23e75 4108 /* Mask out undefined bits. */
9ee6e8bb 4109 mask &= ~CPSR_RESERVED;
d614a513 4110 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4111 mask &= ~CPSR_T;
d614a513
PM
4112 }
4113 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4114 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4115 }
4116 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4117 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4118 }
4119 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4120 mask &= ~CPSR_IT;
d614a513 4121 }
4051e12c
PM
4122 /* Mask out execution state and reserved bits. */
4123 if (!spsr) {
4124 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4125 }
b5ff1b31
FB
4126 /* Mask out privileged bits. */
4127 if (IS_USER(s))
9ee6e8bb 4128 mask &= CPSR_USER;
b5ff1b31
FB
4129 return mask;
4130}
4131
2fbac54b 4132/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4133static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4134{
39d5492a 4135 TCGv_i32 tmp;
b5ff1b31
FB
4136 if (spsr) {
4137 /* ??? This is also undefined in system mode. */
4138 if (IS_USER(s))
4139 return 1;
d9ba4830
PB
4140
4141 tmp = load_cpu_field(spsr);
4142 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4143 tcg_gen_andi_i32(t0, t0, mask);
4144 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4145 store_cpu_field(tmp, spsr);
b5ff1b31 4146 } else {
2fbac54b 4147 gen_set_cpsr(t0, mask);
b5ff1b31 4148 }
7d1b0095 4149 tcg_temp_free_i32(t0);
b5ff1b31
FB
4150 gen_lookup_tb(s);
4151 return 0;
4152}
4153
2fbac54b
FN
4154/* Returns nonzero if access to the PSR is not permitted. */
4155static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4156{
39d5492a 4157 TCGv_i32 tmp;
7d1b0095 4158 tmp = tcg_temp_new_i32();
2fbac54b
FN
4159 tcg_gen_movi_i32(tmp, val);
4160 return gen_set_psr(s, mask, spsr, tmp);
4161}
4162
8bfd0550
PM
4163static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4164 int *tgtmode, int *regno)
4165{
4166 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4167 * the target mode and register number, and identify the various
4168 * unpredictable cases.
4169 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4170 * + executed in user mode
4171 * + using R15 as the src/dest register
4172 * + accessing an unimplemented register
4173 * + accessing a register that's inaccessible at current PL/security state*
4174 * + accessing a register that you could access with a different insn
4175 * We choose to UNDEF in all these cases.
4176 * Since we don't know which of the various AArch32 modes we are in
4177 * we have to defer some checks to runtime.
4178 * Accesses to Monitor mode registers from Secure EL1 (which implies
4179 * that EL3 is AArch64) must trap to EL3.
4180 *
4181 * If the access checks fail this function will emit code to take
4182 * an exception and return false. Otherwise it will return true,
4183 * and set *tgtmode and *regno appropriately.
4184 */
4185 int exc_target = default_exception_el(s);
4186
4187 /* These instructions are present only in ARMv8, or in ARMv7 with the
4188 * Virtualization Extensions.
4189 */
4190 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4191 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4192 goto undef;
4193 }
4194
4195 if (IS_USER(s) || rn == 15) {
4196 goto undef;
4197 }
4198
4199 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4200 * of registers into (r, sysm).
4201 */
4202 if (r) {
4203 /* SPSRs for other modes */
4204 switch (sysm) {
4205 case 0xe: /* SPSR_fiq */
4206 *tgtmode = ARM_CPU_MODE_FIQ;
4207 break;
4208 case 0x10: /* SPSR_irq */
4209 *tgtmode = ARM_CPU_MODE_IRQ;
4210 break;
4211 case 0x12: /* SPSR_svc */
4212 *tgtmode = ARM_CPU_MODE_SVC;
4213 break;
4214 case 0x14: /* SPSR_abt */
4215 *tgtmode = ARM_CPU_MODE_ABT;
4216 break;
4217 case 0x16: /* SPSR_und */
4218 *tgtmode = ARM_CPU_MODE_UND;
4219 break;
4220 case 0x1c: /* SPSR_mon */
4221 *tgtmode = ARM_CPU_MODE_MON;
4222 break;
4223 case 0x1e: /* SPSR_hyp */
4224 *tgtmode = ARM_CPU_MODE_HYP;
4225 break;
4226 default: /* unallocated */
4227 goto undef;
4228 }
4229 /* We arbitrarily assign SPSR a register number of 16. */
4230 *regno = 16;
4231 } else {
4232 /* general purpose registers for other modes */
4233 switch (sysm) {
4234 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4235 *tgtmode = ARM_CPU_MODE_USR;
4236 *regno = sysm + 8;
4237 break;
4238 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4239 *tgtmode = ARM_CPU_MODE_FIQ;
4240 *regno = sysm;
4241 break;
4242 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4243 *tgtmode = ARM_CPU_MODE_IRQ;
4244 *regno = sysm & 1 ? 13 : 14;
4245 break;
4246 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4247 *tgtmode = ARM_CPU_MODE_SVC;
4248 *regno = sysm & 1 ? 13 : 14;
4249 break;
4250 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4251 *tgtmode = ARM_CPU_MODE_ABT;
4252 *regno = sysm & 1 ? 13 : 14;
4253 break;
4254 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4255 *tgtmode = ARM_CPU_MODE_UND;
4256 *regno = sysm & 1 ? 13 : 14;
4257 break;
4258 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4259 *tgtmode = ARM_CPU_MODE_MON;
4260 *regno = sysm & 1 ? 13 : 14;
4261 break;
4262 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4263 *tgtmode = ARM_CPU_MODE_HYP;
4264 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4265 *regno = sysm & 1 ? 13 : 17;
4266 break;
4267 default: /* unallocated */
4268 goto undef;
4269 }
4270 }
4271
4272 /* Catch the 'accessing inaccessible register' cases we can detect
4273 * at translate time.
4274 */
4275 switch (*tgtmode) {
4276 case ARM_CPU_MODE_MON:
4277 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4278 goto undef;
4279 }
4280 if (s->current_el == 1) {
4281 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4282 * then accesses to Mon registers trap to EL3
4283 */
4284 exc_target = 3;
4285 goto undef;
4286 }
4287 break;
4288 case ARM_CPU_MODE_HYP:
4289 /* Note that we can forbid accesses from EL2 here because they
4290 * must be from Hyp mode itself
4291 */
4292 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4293 goto undef;
4294 }
4295 break;
4296 default:
4297 break;
4298 }
4299
4300 return true;
4301
4302undef:
4303 /* If we get here then some access check did not pass */
4304 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4305 return false;
4306}
4307
4308static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4309{
4310 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4311 int tgtmode = 0, regno = 0;
4312
4313 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4314 return;
4315 }
4316
4317 /* Sync state because msr_banked() can raise exceptions */
4318 gen_set_condexec(s);
4319 gen_set_pc_im(s, s->pc - 4);
4320 tcg_reg = load_reg(s, rn);
4321 tcg_tgtmode = tcg_const_i32(tgtmode);
4322 tcg_regno = tcg_const_i32(regno);
4323 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4324 tcg_temp_free_i32(tcg_tgtmode);
4325 tcg_temp_free_i32(tcg_regno);
4326 tcg_temp_free_i32(tcg_reg);
4327 s->is_jmp = DISAS_UPDATE;
4328}
4329
4330static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4331{
4332 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4333 int tgtmode = 0, regno = 0;
4334
4335 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4336 return;
4337 }
4338
4339 /* Sync state because mrs_banked() can raise exceptions */
4340 gen_set_condexec(s);
4341 gen_set_pc_im(s, s->pc - 4);
4342 tcg_reg = tcg_temp_new_i32();
4343 tcg_tgtmode = tcg_const_i32(tgtmode);
4344 tcg_regno = tcg_const_i32(regno);
4345 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4346 tcg_temp_free_i32(tcg_tgtmode);
4347 tcg_temp_free_i32(tcg_regno);
4348 store_reg(s, rn, tcg_reg);
4349 s->is_jmp = DISAS_UPDATE;
4350}
4351
e9bb4aa9 4352/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4353static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4354{
39d5492a 4355 TCGv_i32 tmp;
e9bb4aa9 4356 store_reg(s, 15, pc);
d9ba4830 4357 tmp = load_cpu_field(spsr);
235ea1f5 4358 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 4359 tcg_temp_free_i32(tmp);
577bf808 4360 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
4361}
4362
b0109805 4363/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4364static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4365{
235ea1f5 4366 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4367 tcg_temp_free_i32(cpsr);
b0109805 4368 store_reg(s, 15, pc);
577bf808 4369 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4370}
3b46e624 4371
9ee6e8bb
PB
4372static void gen_nop_hint(DisasContext *s, int val)
4373{
4374 switch (val) {
c87e5a61
PM
4375 case 1: /* yield */
4376 gen_set_pc_im(s, s->pc);
4377 s->is_jmp = DISAS_YIELD;
4378 break;
9ee6e8bb 4379 case 3: /* wfi */
eaed129d 4380 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4381 s->is_jmp = DISAS_WFI;
4382 break;
4383 case 2: /* wfe */
72c1d3af
PM
4384 gen_set_pc_im(s, s->pc);
4385 s->is_jmp = DISAS_WFE;
4386 break;
9ee6e8bb 4387 case 4: /* sev */
12b10571
MR
4388 case 5: /* sevl */
4389 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4390 default: /* nop */
4391 break;
4392 }
4393}
99c475ab 4394
ad69471c 4395#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4396
39d5492a 4397static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4398{
4399 switch (size) {
dd8fbd78
FN
4400 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4401 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4402 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4403 default: abort();
9ee6e8bb 4404 }
9ee6e8bb
PB
4405}
4406
39d5492a 4407static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4408{
4409 switch (size) {
dd8fbd78
FN
4410 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4411 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4412 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4413 default: return;
4414 }
4415}
4416
4417/* 32-bit pairwise ops end up the same as the elementwise versions. */
4418#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4419#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4420#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4421#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4422
ad69471c
PB
4423#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4424 switch ((size << 1) | u) { \
4425 case 0: \
dd8fbd78 4426 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4427 break; \
4428 case 1: \
dd8fbd78 4429 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4430 break; \
4431 case 2: \
dd8fbd78 4432 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4433 break; \
4434 case 3: \
dd8fbd78 4435 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4436 break; \
4437 case 4: \
dd8fbd78 4438 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4439 break; \
4440 case 5: \
dd8fbd78 4441 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4442 break; \
4443 default: return 1; \
4444 }} while (0)
9ee6e8bb
PB
4445
4446#define GEN_NEON_INTEGER_OP(name) do { \
4447 switch ((size << 1) | u) { \
ad69471c 4448 case 0: \
dd8fbd78 4449 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4450 break; \
4451 case 1: \
dd8fbd78 4452 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4453 break; \
4454 case 2: \
dd8fbd78 4455 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4456 break; \
4457 case 3: \
dd8fbd78 4458 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4459 break; \
4460 case 4: \
dd8fbd78 4461 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4462 break; \
4463 case 5: \
dd8fbd78 4464 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4465 break; \
9ee6e8bb
PB
4466 default: return 1; \
4467 }} while (0)
4468
39d5492a 4469static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4470{
39d5492a 4471 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4472 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4473 return tmp;
9ee6e8bb
PB
4474}
4475
39d5492a 4476static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4477{
dd8fbd78 4478 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4479 tcg_temp_free_i32(var);
9ee6e8bb
PB
4480}
4481
39d5492a 4482static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4483{
39d5492a 4484 TCGv_i32 tmp;
9ee6e8bb 4485 if (size == 1) {
0fad6efc
PM
4486 tmp = neon_load_reg(reg & 7, reg >> 4);
4487 if (reg & 8) {
dd8fbd78 4488 gen_neon_dup_high16(tmp);
0fad6efc
PM
4489 } else {
4490 gen_neon_dup_low16(tmp);
dd8fbd78 4491 }
0fad6efc
PM
4492 } else {
4493 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4494 }
dd8fbd78 4495 return tmp;
9ee6e8bb
PB
4496}
4497
02acedf9 4498static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4499{
39d5492a 4500 TCGv_i32 tmp, tmp2;
600b828c 4501 if (!q && size == 2) {
02acedf9
PM
4502 return 1;
4503 }
4504 tmp = tcg_const_i32(rd);
4505 tmp2 = tcg_const_i32(rm);
4506 if (q) {
4507 switch (size) {
4508 case 0:
02da0b2d 4509 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4510 break;
4511 case 1:
02da0b2d 4512 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4513 break;
4514 case 2:
02da0b2d 4515 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4516 break;
4517 default:
4518 abort();
4519 }
4520 } else {
4521 switch (size) {
4522 case 0:
02da0b2d 4523 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4524 break;
4525 case 1:
02da0b2d 4526 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4527 break;
4528 default:
4529 abort();
4530 }
4531 }
4532 tcg_temp_free_i32(tmp);
4533 tcg_temp_free_i32(tmp2);
4534 return 0;
19457615
FN
4535}
4536
d68a6f3a 4537static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4538{
39d5492a 4539 TCGv_i32 tmp, tmp2;
600b828c 4540 if (!q && size == 2) {
d68a6f3a
PM
4541 return 1;
4542 }
4543 tmp = tcg_const_i32(rd);
4544 tmp2 = tcg_const_i32(rm);
4545 if (q) {
4546 switch (size) {
4547 case 0:
02da0b2d 4548 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4549 break;
4550 case 1:
02da0b2d 4551 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4552 break;
4553 case 2:
02da0b2d 4554 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4555 break;
4556 default:
4557 abort();
4558 }
4559 } else {
4560 switch (size) {
4561 case 0:
02da0b2d 4562 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4563 break;
4564 case 1:
02da0b2d 4565 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4566 break;
4567 default:
4568 abort();
4569 }
4570 }
4571 tcg_temp_free_i32(tmp);
4572 tcg_temp_free_i32(tmp2);
4573 return 0;
19457615
FN
4574}
4575
39d5492a 4576static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4577{
39d5492a 4578 TCGv_i32 rd, tmp;
19457615 4579
7d1b0095
PM
4580 rd = tcg_temp_new_i32();
4581 tmp = tcg_temp_new_i32();
19457615
FN
4582
4583 tcg_gen_shli_i32(rd, t0, 8);
4584 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4585 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4586 tcg_gen_or_i32(rd, rd, tmp);
4587
4588 tcg_gen_shri_i32(t1, t1, 8);
4589 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4590 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4591 tcg_gen_or_i32(t1, t1, tmp);
4592 tcg_gen_mov_i32(t0, rd);
4593
7d1b0095
PM
4594 tcg_temp_free_i32(tmp);
4595 tcg_temp_free_i32(rd);
19457615
FN
4596}
4597
39d5492a 4598static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4599{
39d5492a 4600 TCGv_i32 rd, tmp;
19457615 4601
7d1b0095
PM
4602 rd = tcg_temp_new_i32();
4603 tmp = tcg_temp_new_i32();
19457615
FN
4604
4605 tcg_gen_shli_i32(rd, t0, 16);
4606 tcg_gen_andi_i32(tmp, t1, 0xffff);
4607 tcg_gen_or_i32(rd, rd, tmp);
4608 tcg_gen_shri_i32(t1, t1, 16);
4609 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4610 tcg_gen_or_i32(t1, t1, tmp);
4611 tcg_gen_mov_i32(t0, rd);
4612
7d1b0095
PM
4613 tcg_temp_free_i32(tmp);
4614 tcg_temp_free_i32(rd);
19457615
FN
4615}
4616
4617
9ee6e8bb
PB
4618static struct {
4619 int nregs;
4620 int interleave;
4621 int spacing;
4622} neon_ls_element_type[11] = {
4623 {4, 4, 1},
4624 {4, 4, 2},
4625 {4, 1, 1},
4626 {4, 2, 1},
4627 {3, 3, 1},
4628 {3, 3, 2},
4629 {3, 1, 1},
4630 {1, 1, 1},
4631 {2, 2, 1},
4632 {2, 2, 2},
4633 {2, 1, 1}
4634};
4635
4636/* Translate a NEON load/store element instruction. Return nonzero if the
4637 instruction is invalid. */
7dcc1f89 4638static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4639{
4640 int rd, rn, rm;
4641 int op;
4642 int nregs;
4643 int interleave;
84496233 4644 int spacing;
9ee6e8bb
PB
4645 int stride;
4646 int size;
4647 int reg;
4648 int pass;
4649 int load;
4650 int shift;
9ee6e8bb 4651 int n;
39d5492a
PM
4652 TCGv_i32 addr;
4653 TCGv_i32 tmp;
4654 TCGv_i32 tmp2;
84496233 4655 TCGv_i64 tmp64;
9ee6e8bb 4656
2c7ffc41
PM
4657 /* FIXME: this access check should not take precedence over UNDEF
4658 * for invalid encodings; we will generate incorrect syndrome information
4659 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4660 */
9dbbc748 4661 if (s->fp_excp_el) {
2c7ffc41 4662 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4663 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4664 return 0;
4665 }
4666
5df8bac1 4667 if (!s->vfp_enabled)
9ee6e8bb
PB
4668 return 1;
4669 VFP_DREG_D(rd, insn);
4670 rn = (insn >> 16) & 0xf;
4671 rm = insn & 0xf;
4672 load = (insn & (1 << 21)) != 0;
4673 if ((insn & (1 << 23)) == 0) {
4674 /* Load store all elements. */
4675 op = (insn >> 8) & 0xf;
4676 size = (insn >> 6) & 3;
84496233 4677 if (op > 10)
9ee6e8bb 4678 return 1;
f2dd89d0
PM
4679 /* Catch UNDEF cases for bad values of align field */
4680 switch (op & 0xc) {
4681 case 4:
4682 if (((insn >> 5) & 1) == 1) {
4683 return 1;
4684 }
4685 break;
4686 case 8:
4687 if (((insn >> 4) & 3) == 3) {
4688 return 1;
4689 }
4690 break;
4691 default:
4692 break;
4693 }
9ee6e8bb
PB
4694 nregs = neon_ls_element_type[op].nregs;
4695 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4696 spacing = neon_ls_element_type[op].spacing;
4697 if (size == 3 && (interleave | spacing) != 1)
4698 return 1;
e318a60b 4699 addr = tcg_temp_new_i32();
dcc65026 4700 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4701 stride = (1 << size) * interleave;
4702 for (reg = 0; reg < nregs; reg++) {
4703 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4704 load_reg_var(s, addr, rn);
4705 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4706 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4707 load_reg_var(s, addr, rn);
4708 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4709 }
84496233 4710 if (size == 3) {
8ed1237d 4711 tmp64 = tcg_temp_new_i64();
84496233 4712 if (load) {
12dcc321 4713 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4714 neon_store_reg64(tmp64, rd);
84496233 4715 } else {
84496233 4716 neon_load_reg64(tmp64, rd);
12dcc321 4717 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4718 }
8ed1237d 4719 tcg_temp_free_i64(tmp64);
84496233
JR
4720 tcg_gen_addi_i32(addr, addr, stride);
4721 } else {
4722 for (pass = 0; pass < 2; pass++) {
4723 if (size == 2) {
4724 if (load) {
58ab8e96 4725 tmp = tcg_temp_new_i32();
12dcc321 4726 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4727 neon_store_reg(rd, pass, tmp);
4728 } else {
4729 tmp = neon_load_reg(rd, pass);
12dcc321 4730 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4731 tcg_temp_free_i32(tmp);
84496233 4732 }
1b2b1e54 4733 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4734 } else if (size == 1) {
4735 if (load) {
58ab8e96 4736 tmp = tcg_temp_new_i32();
12dcc321 4737 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4738 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4739 tmp2 = tcg_temp_new_i32();
12dcc321 4740 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4741 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4742 tcg_gen_shli_i32(tmp2, tmp2, 16);
4743 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4744 tcg_temp_free_i32(tmp2);
84496233
JR
4745 neon_store_reg(rd, pass, tmp);
4746 } else {
4747 tmp = neon_load_reg(rd, pass);
7d1b0095 4748 tmp2 = tcg_temp_new_i32();
84496233 4749 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4750 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4751 tcg_temp_free_i32(tmp);
84496233 4752 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4753 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4754 tcg_temp_free_i32(tmp2);
1b2b1e54 4755 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4756 }
84496233
JR
4757 } else /* size == 0 */ {
4758 if (load) {
39d5492a 4759 TCGV_UNUSED_I32(tmp2);
84496233 4760 for (n = 0; n < 4; n++) {
58ab8e96 4761 tmp = tcg_temp_new_i32();
12dcc321 4762 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4763 tcg_gen_addi_i32(addr, addr, stride);
4764 if (n == 0) {
4765 tmp2 = tmp;
4766 } else {
41ba8341
PB
4767 tcg_gen_shli_i32(tmp, tmp, n * 8);
4768 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4769 tcg_temp_free_i32(tmp);
84496233 4770 }
9ee6e8bb 4771 }
84496233
JR
4772 neon_store_reg(rd, pass, tmp2);
4773 } else {
4774 tmp2 = neon_load_reg(rd, pass);
4775 for (n = 0; n < 4; n++) {
7d1b0095 4776 tmp = tcg_temp_new_i32();
84496233
JR
4777 if (n == 0) {
4778 tcg_gen_mov_i32(tmp, tmp2);
4779 } else {
4780 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4781 }
12dcc321 4782 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4783 tcg_temp_free_i32(tmp);
84496233
JR
4784 tcg_gen_addi_i32(addr, addr, stride);
4785 }
7d1b0095 4786 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4787 }
4788 }
4789 }
4790 }
84496233 4791 rd += spacing;
9ee6e8bb 4792 }
e318a60b 4793 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4794 stride = nregs * 8;
4795 } else {
4796 size = (insn >> 10) & 3;
4797 if (size == 3) {
4798 /* Load single element to all lanes. */
8e18cde3
PM
4799 int a = (insn >> 4) & 1;
4800 if (!load) {
9ee6e8bb 4801 return 1;
8e18cde3 4802 }
9ee6e8bb
PB
4803 size = (insn >> 6) & 3;
4804 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4805
4806 if (size == 3) {
4807 if (nregs != 4 || a == 0) {
9ee6e8bb 4808 return 1;
99c475ab 4809 }
8e18cde3
PM
4810 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4811 size = 2;
4812 }
4813 if (nregs == 1 && a == 1 && size == 0) {
4814 return 1;
4815 }
4816 if (nregs == 3 && a == 1) {
4817 return 1;
4818 }
e318a60b 4819 addr = tcg_temp_new_i32();
8e18cde3
PM
4820 load_reg_var(s, addr, rn);
4821 if (nregs == 1) {
4822 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4823 tmp = gen_load_and_replicate(s, addr, size);
4824 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4825 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4826 if (insn & (1 << 5)) {
4827 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4828 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4829 }
4830 tcg_temp_free_i32(tmp);
4831 } else {
4832 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4833 stride = (insn & (1 << 5)) ? 2 : 1;
4834 for (reg = 0; reg < nregs; reg++) {
4835 tmp = gen_load_and_replicate(s, addr, size);
4836 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4837 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4838 tcg_temp_free_i32(tmp);
4839 tcg_gen_addi_i32(addr, addr, 1 << size);
4840 rd += stride;
4841 }
9ee6e8bb 4842 }
e318a60b 4843 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4844 stride = (1 << size) * nregs;
4845 } else {
4846 /* Single element. */
93262b16 4847 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4848 pass = (insn >> 7) & 1;
4849 switch (size) {
4850 case 0:
4851 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4852 stride = 1;
4853 break;
4854 case 1:
4855 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4856 stride = (insn & (1 << 5)) ? 2 : 1;
4857 break;
4858 case 2:
4859 shift = 0;
9ee6e8bb
PB
4860 stride = (insn & (1 << 6)) ? 2 : 1;
4861 break;
4862 default:
4863 abort();
4864 }
4865 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4866 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4867 switch (nregs) {
4868 case 1:
4869 if (((idx & (1 << size)) != 0) ||
4870 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4871 return 1;
4872 }
4873 break;
4874 case 3:
4875 if ((idx & 1) != 0) {
4876 return 1;
4877 }
4878 /* fall through */
4879 case 2:
4880 if (size == 2 && (idx & 2) != 0) {
4881 return 1;
4882 }
4883 break;
4884 case 4:
4885 if ((size == 2) && ((idx & 3) == 3)) {
4886 return 1;
4887 }
4888 break;
4889 default:
4890 abort();
4891 }
4892 if ((rd + stride * (nregs - 1)) > 31) {
4893 /* Attempts to write off the end of the register file
4894 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4895 * the neon_load_reg() would write off the end of the array.
4896 */
4897 return 1;
4898 }
e318a60b 4899 addr = tcg_temp_new_i32();
dcc65026 4900 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4901 for (reg = 0; reg < nregs; reg++) {
4902 if (load) {
58ab8e96 4903 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4904 switch (size) {
4905 case 0:
12dcc321 4906 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4907 break;
4908 case 1:
12dcc321 4909 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4910 break;
4911 case 2:
12dcc321 4912 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4913 break;
a50f5b91
PB
4914 default: /* Avoid compiler warnings. */
4915 abort();
9ee6e8bb
PB
4916 }
4917 if (size != 2) {
8f8e3aa4 4918 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4919 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4920 shift, size ? 16 : 8);
7d1b0095 4921 tcg_temp_free_i32(tmp2);
9ee6e8bb 4922 }
8f8e3aa4 4923 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4924 } else { /* Store */
8f8e3aa4
PB
4925 tmp = neon_load_reg(rd, pass);
4926 if (shift)
4927 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4928 switch (size) {
4929 case 0:
12dcc321 4930 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4931 break;
4932 case 1:
12dcc321 4933 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4934 break;
4935 case 2:
12dcc321 4936 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4937 break;
99c475ab 4938 }
58ab8e96 4939 tcg_temp_free_i32(tmp);
99c475ab 4940 }
9ee6e8bb 4941 rd += stride;
1b2b1e54 4942 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4943 }
e318a60b 4944 tcg_temp_free_i32(addr);
9ee6e8bb 4945 stride = nregs * (1 << size);
99c475ab 4946 }
9ee6e8bb
PB
4947 }
4948 if (rm != 15) {
39d5492a 4949 TCGv_i32 base;
b26eefb6
PB
4950
4951 base = load_reg(s, rn);
9ee6e8bb 4952 if (rm == 13) {
b26eefb6 4953 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4954 } else {
39d5492a 4955 TCGv_i32 index;
b26eefb6
PB
4956 index = load_reg(s, rm);
4957 tcg_gen_add_i32(base, base, index);
7d1b0095 4958 tcg_temp_free_i32(index);
9ee6e8bb 4959 }
b26eefb6 4960 store_reg(s, rn, base);
9ee6e8bb
PB
4961 }
4962 return 0;
4963}
3b46e624 4964
8f8e3aa4 4965/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4966static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4967{
4968 tcg_gen_and_i32(t, t, c);
f669df27 4969 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4970 tcg_gen_or_i32(dest, t, f);
4971}
4972
39d5492a 4973static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4974{
4975 switch (size) {
4976 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4977 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4978 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4979 default: abort();
4980 }
4981}
4982
39d5492a 4983static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4984{
4985 switch (size) {
02da0b2d
PM
4986 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4987 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4988 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4989 default: abort();
4990 }
4991}
4992
39d5492a 4993static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4994{
4995 switch (size) {
02da0b2d
PM
4996 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4997 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4998 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4999 default: abort();
5000 }
5001}
5002
39d5492a 5003static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5004{
5005 switch (size) {
02da0b2d
PM
5006 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5007 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5008 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5009 default: abort();
5010 }
5011}
5012
39d5492a 5013static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5014 int q, int u)
5015{
5016 if (q) {
5017 if (u) {
5018 switch (size) {
5019 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5020 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5021 default: abort();
5022 }
5023 } else {
5024 switch (size) {
5025 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5026 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5027 default: abort();
5028 }
5029 }
5030 } else {
5031 if (u) {
5032 switch (size) {
b408a9b0
CL
5033 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5034 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5035 default: abort();
5036 }
5037 } else {
5038 switch (size) {
5039 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5040 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5041 default: abort();
5042 }
5043 }
5044 }
5045}
5046
39d5492a 5047static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5048{
5049 if (u) {
5050 switch (size) {
5051 case 0: gen_helper_neon_widen_u8(dest, src); break;
5052 case 1: gen_helper_neon_widen_u16(dest, src); break;
5053 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5054 default: abort();
5055 }
5056 } else {
5057 switch (size) {
5058 case 0: gen_helper_neon_widen_s8(dest, src); break;
5059 case 1: gen_helper_neon_widen_s16(dest, src); break;
5060 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5061 default: abort();
5062 }
5063 }
7d1b0095 5064 tcg_temp_free_i32(src);
ad69471c
PB
5065}
5066
5067static inline void gen_neon_addl(int size)
5068{
5069 switch (size) {
5070 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5071 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5072 case 2: tcg_gen_add_i64(CPU_V001); break;
5073 default: abort();
5074 }
5075}
5076
5077static inline void gen_neon_subl(int size)
5078{
5079 switch (size) {
5080 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5081 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5082 case 2: tcg_gen_sub_i64(CPU_V001); break;
5083 default: abort();
5084 }
5085}
5086
a7812ae4 5087static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5088{
5089 switch (size) {
5090 case 0: gen_helper_neon_negl_u16(var, var); break;
5091 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5092 case 2:
5093 tcg_gen_neg_i64(var, var);
5094 break;
ad69471c
PB
5095 default: abort();
5096 }
5097}
5098
a7812ae4 5099static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5100{
5101 switch (size) {
02da0b2d
PM
5102 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5103 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5104 default: abort();
5105 }
5106}
5107
39d5492a
PM
5108static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5109 int size, int u)
ad69471c 5110{
a7812ae4 5111 TCGv_i64 tmp;
ad69471c
PB
5112
5113 switch ((size << 1) | u) {
5114 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5115 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5116 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5117 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5118 case 4:
5119 tmp = gen_muls_i64_i32(a, b);
5120 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5121 tcg_temp_free_i64(tmp);
ad69471c
PB
5122 break;
5123 case 5:
5124 tmp = gen_mulu_i64_i32(a, b);
5125 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5126 tcg_temp_free_i64(tmp);
ad69471c
PB
5127 break;
5128 default: abort();
5129 }
c6067f04
CL
5130
5131 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5132 Don't forget to clean them now. */
5133 if (size < 2) {
7d1b0095
PM
5134 tcg_temp_free_i32(a);
5135 tcg_temp_free_i32(b);
c6067f04 5136 }
ad69471c
PB
5137}
5138
39d5492a
PM
5139static void gen_neon_narrow_op(int op, int u, int size,
5140 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5141{
5142 if (op) {
5143 if (u) {
5144 gen_neon_unarrow_sats(size, dest, src);
5145 } else {
5146 gen_neon_narrow(size, dest, src);
5147 }
5148 } else {
5149 if (u) {
5150 gen_neon_narrow_satu(size, dest, src);
5151 } else {
5152 gen_neon_narrow_sats(size, dest, src);
5153 }
5154 }
5155}
5156
62698be3
PM
5157/* Symbolic constants for op fields for Neon 3-register same-length.
5158 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5159 * table A7-9.
5160 */
5161#define NEON_3R_VHADD 0
5162#define NEON_3R_VQADD 1
5163#define NEON_3R_VRHADD 2
5164#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5165#define NEON_3R_VHSUB 4
5166#define NEON_3R_VQSUB 5
5167#define NEON_3R_VCGT 6
5168#define NEON_3R_VCGE 7
5169#define NEON_3R_VSHL 8
5170#define NEON_3R_VQSHL 9
5171#define NEON_3R_VRSHL 10
5172#define NEON_3R_VQRSHL 11
5173#define NEON_3R_VMAX 12
5174#define NEON_3R_VMIN 13
5175#define NEON_3R_VABD 14
5176#define NEON_3R_VABA 15
5177#define NEON_3R_VADD_VSUB 16
5178#define NEON_3R_VTST_VCEQ 17
5179#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5180#define NEON_3R_VMUL 19
5181#define NEON_3R_VPMAX 20
5182#define NEON_3R_VPMIN 21
5183#define NEON_3R_VQDMULH_VQRDMULH 22
5184#define NEON_3R_VPADD 23
f1ecb913 5185#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5186#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5187#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5188#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5189#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5190#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5191#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5192#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5193
5194static const uint8_t neon_3r_sizes[] = {
5195 [NEON_3R_VHADD] = 0x7,
5196 [NEON_3R_VQADD] = 0xf,
5197 [NEON_3R_VRHADD] = 0x7,
5198 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5199 [NEON_3R_VHSUB] = 0x7,
5200 [NEON_3R_VQSUB] = 0xf,
5201 [NEON_3R_VCGT] = 0x7,
5202 [NEON_3R_VCGE] = 0x7,
5203 [NEON_3R_VSHL] = 0xf,
5204 [NEON_3R_VQSHL] = 0xf,
5205 [NEON_3R_VRSHL] = 0xf,
5206 [NEON_3R_VQRSHL] = 0xf,
5207 [NEON_3R_VMAX] = 0x7,
5208 [NEON_3R_VMIN] = 0x7,
5209 [NEON_3R_VABD] = 0x7,
5210 [NEON_3R_VABA] = 0x7,
5211 [NEON_3R_VADD_VSUB] = 0xf,
5212 [NEON_3R_VTST_VCEQ] = 0x7,
5213 [NEON_3R_VML] = 0x7,
5214 [NEON_3R_VMUL] = 0x7,
5215 [NEON_3R_VPMAX] = 0x7,
5216 [NEON_3R_VPMIN] = 0x7,
5217 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5218 [NEON_3R_VPADD] = 0x7,
f1ecb913 5219 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5220 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5221 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5222 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5223 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5224 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5225 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5226 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5227};
5228
600b828c
PM
5229/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5230 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5231 * table A7-13.
5232 */
5233#define NEON_2RM_VREV64 0
5234#define NEON_2RM_VREV32 1
5235#define NEON_2RM_VREV16 2
5236#define NEON_2RM_VPADDL 4
5237#define NEON_2RM_VPADDL_U 5
9d935509
AB
5238#define NEON_2RM_AESE 6 /* Includes AESD */
5239#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5240#define NEON_2RM_VCLS 8
5241#define NEON_2RM_VCLZ 9
5242#define NEON_2RM_VCNT 10
5243#define NEON_2RM_VMVN 11
5244#define NEON_2RM_VPADAL 12
5245#define NEON_2RM_VPADAL_U 13
5246#define NEON_2RM_VQABS 14
5247#define NEON_2RM_VQNEG 15
5248#define NEON_2RM_VCGT0 16
5249#define NEON_2RM_VCGE0 17
5250#define NEON_2RM_VCEQ0 18
5251#define NEON_2RM_VCLE0 19
5252#define NEON_2RM_VCLT0 20
f1ecb913 5253#define NEON_2RM_SHA1H 21
600b828c
PM
5254#define NEON_2RM_VABS 22
5255#define NEON_2RM_VNEG 23
5256#define NEON_2RM_VCGT0_F 24
5257#define NEON_2RM_VCGE0_F 25
5258#define NEON_2RM_VCEQ0_F 26
5259#define NEON_2RM_VCLE0_F 27
5260#define NEON_2RM_VCLT0_F 28
5261#define NEON_2RM_VABS_F 30
5262#define NEON_2RM_VNEG_F 31
5263#define NEON_2RM_VSWP 32
5264#define NEON_2RM_VTRN 33
5265#define NEON_2RM_VUZP 34
5266#define NEON_2RM_VZIP 35
5267#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5268#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5269#define NEON_2RM_VSHLL 38
f1ecb913 5270#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5271#define NEON_2RM_VRINTN 40
2ce70625 5272#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5273#define NEON_2RM_VRINTA 42
5274#define NEON_2RM_VRINTZ 43
600b828c 5275#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5276#define NEON_2RM_VRINTM 45
600b828c 5277#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5278#define NEON_2RM_VRINTP 47
901ad525
WN
5279#define NEON_2RM_VCVTAU 48
5280#define NEON_2RM_VCVTAS 49
5281#define NEON_2RM_VCVTNU 50
5282#define NEON_2RM_VCVTNS 51
5283#define NEON_2RM_VCVTPU 52
5284#define NEON_2RM_VCVTPS 53
5285#define NEON_2RM_VCVTMU 54
5286#define NEON_2RM_VCVTMS 55
600b828c
PM
5287#define NEON_2RM_VRECPE 56
5288#define NEON_2RM_VRSQRTE 57
5289#define NEON_2RM_VRECPE_F 58
5290#define NEON_2RM_VRSQRTE_F 59
5291#define NEON_2RM_VCVT_FS 60
5292#define NEON_2RM_VCVT_FU 61
5293#define NEON_2RM_VCVT_SF 62
5294#define NEON_2RM_VCVT_UF 63
5295
5296static int neon_2rm_is_float_op(int op)
5297{
5298 /* Return true if this neon 2reg-misc op is float-to-float */
5299 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5300 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5301 op == NEON_2RM_VRINTM ||
5302 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5303 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5304}
5305
5306/* Each entry in this array has bit n set if the insn allows
5307 * size value n (otherwise it will UNDEF). Since unallocated
5308 * op values will have no bits set they always UNDEF.
5309 */
5310static const uint8_t neon_2rm_sizes[] = {
5311 [NEON_2RM_VREV64] = 0x7,
5312 [NEON_2RM_VREV32] = 0x3,
5313 [NEON_2RM_VREV16] = 0x1,
5314 [NEON_2RM_VPADDL] = 0x7,
5315 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5316 [NEON_2RM_AESE] = 0x1,
5317 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5318 [NEON_2RM_VCLS] = 0x7,
5319 [NEON_2RM_VCLZ] = 0x7,
5320 [NEON_2RM_VCNT] = 0x1,
5321 [NEON_2RM_VMVN] = 0x1,
5322 [NEON_2RM_VPADAL] = 0x7,
5323 [NEON_2RM_VPADAL_U] = 0x7,
5324 [NEON_2RM_VQABS] = 0x7,
5325 [NEON_2RM_VQNEG] = 0x7,
5326 [NEON_2RM_VCGT0] = 0x7,
5327 [NEON_2RM_VCGE0] = 0x7,
5328 [NEON_2RM_VCEQ0] = 0x7,
5329 [NEON_2RM_VCLE0] = 0x7,
5330 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5331 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5332 [NEON_2RM_VABS] = 0x7,
5333 [NEON_2RM_VNEG] = 0x7,
5334 [NEON_2RM_VCGT0_F] = 0x4,
5335 [NEON_2RM_VCGE0_F] = 0x4,
5336 [NEON_2RM_VCEQ0_F] = 0x4,
5337 [NEON_2RM_VCLE0_F] = 0x4,
5338 [NEON_2RM_VCLT0_F] = 0x4,
5339 [NEON_2RM_VABS_F] = 0x4,
5340 [NEON_2RM_VNEG_F] = 0x4,
5341 [NEON_2RM_VSWP] = 0x1,
5342 [NEON_2RM_VTRN] = 0x7,
5343 [NEON_2RM_VUZP] = 0x7,
5344 [NEON_2RM_VZIP] = 0x7,
5345 [NEON_2RM_VMOVN] = 0x7,
5346 [NEON_2RM_VQMOVN] = 0x7,
5347 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5348 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5349 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5350 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5351 [NEON_2RM_VRINTA] = 0x4,
5352 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5353 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5354 [NEON_2RM_VRINTM] = 0x4,
600b828c 5355 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5356 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5357 [NEON_2RM_VCVTAU] = 0x4,
5358 [NEON_2RM_VCVTAS] = 0x4,
5359 [NEON_2RM_VCVTNU] = 0x4,
5360 [NEON_2RM_VCVTNS] = 0x4,
5361 [NEON_2RM_VCVTPU] = 0x4,
5362 [NEON_2RM_VCVTPS] = 0x4,
5363 [NEON_2RM_VCVTMU] = 0x4,
5364 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5365 [NEON_2RM_VRECPE] = 0x4,
5366 [NEON_2RM_VRSQRTE] = 0x4,
5367 [NEON_2RM_VRECPE_F] = 0x4,
5368 [NEON_2RM_VRSQRTE_F] = 0x4,
5369 [NEON_2RM_VCVT_FS] = 0x4,
5370 [NEON_2RM_VCVT_FU] = 0x4,
5371 [NEON_2RM_VCVT_SF] = 0x4,
5372 [NEON_2RM_VCVT_UF] = 0x4,
5373};
5374
9ee6e8bb
PB
5375/* Translate a NEON data processing instruction. Return nonzero if the
5376 instruction is invalid.
ad69471c
PB
5377 We process data in a mixture of 32-bit and 64-bit chunks.
5378 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5379
7dcc1f89 5380static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5381{
5382 int op;
5383 int q;
5384 int rd, rn, rm;
5385 int size;
5386 int shift;
5387 int pass;
5388 int count;
5389 int pairwise;
5390 int u;
ca9a32e4 5391 uint32_t imm, mask;
39d5492a 5392 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5393 TCGv_i64 tmp64;
9ee6e8bb 5394
2c7ffc41
PM
5395 /* FIXME: this access check should not take precedence over UNDEF
5396 * for invalid encodings; we will generate incorrect syndrome information
5397 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5398 */
9dbbc748 5399 if (s->fp_excp_el) {
2c7ffc41 5400 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5401 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5402 return 0;
5403 }
5404
5df8bac1 5405 if (!s->vfp_enabled)
9ee6e8bb
PB
5406 return 1;
5407 q = (insn & (1 << 6)) != 0;
5408 u = (insn >> 24) & 1;
5409 VFP_DREG_D(rd, insn);
5410 VFP_DREG_N(rn, insn);
5411 VFP_DREG_M(rm, insn);
5412 size = (insn >> 20) & 3;
5413 if ((insn & (1 << 23)) == 0) {
5414 /* Three register same length. */
5415 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5416 /* Catch invalid op and bad size combinations: UNDEF */
5417 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5418 return 1;
5419 }
25f84f79
PM
5420 /* All insns of this form UNDEF for either this condition or the
5421 * superset of cases "Q==1"; we catch the latter later.
5422 */
5423 if (q && ((rd | rn | rm) & 1)) {
5424 return 1;
5425 }
f1ecb913
AB
5426 /*
5427 * The SHA-1/SHA-256 3-register instructions require special treatment
5428 * here, as their size field is overloaded as an op type selector, and
5429 * they all consume their input in a single pass.
5430 */
5431 if (op == NEON_3R_SHA) {
5432 if (!q) {
5433 return 1;
5434 }
5435 if (!u) { /* SHA-1 */
d614a513 5436 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5437 return 1;
5438 }
5439 tmp = tcg_const_i32(rd);
5440 tmp2 = tcg_const_i32(rn);
5441 tmp3 = tcg_const_i32(rm);
5442 tmp4 = tcg_const_i32(size);
5443 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5444 tcg_temp_free_i32(tmp4);
5445 } else { /* SHA-256 */
d614a513 5446 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5447 return 1;
5448 }
5449 tmp = tcg_const_i32(rd);
5450 tmp2 = tcg_const_i32(rn);
5451 tmp3 = tcg_const_i32(rm);
5452 switch (size) {
5453 case 0:
5454 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5455 break;
5456 case 1:
5457 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5458 break;
5459 case 2:
5460 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5461 break;
5462 }
5463 }
5464 tcg_temp_free_i32(tmp);
5465 tcg_temp_free_i32(tmp2);
5466 tcg_temp_free_i32(tmp3);
5467 return 0;
5468 }
62698be3
PM
5469 if (size == 3 && op != NEON_3R_LOGIC) {
5470 /* 64-bit element instructions. */
9ee6e8bb 5471 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5472 neon_load_reg64(cpu_V0, rn + pass);
5473 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5474 switch (op) {
62698be3 5475 case NEON_3R_VQADD:
9ee6e8bb 5476 if (u) {
02da0b2d
PM
5477 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5478 cpu_V0, cpu_V1);
2c0262af 5479 } else {
02da0b2d
PM
5480 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5481 cpu_V0, cpu_V1);
2c0262af 5482 }
9ee6e8bb 5483 break;
62698be3 5484 case NEON_3R_VQSUB:
9ee6e8bb 5485 if (u) {
02da0b2d
PM
5486 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5487 cpu_V0, cpu_V1);
ad69471c 5488 } else {
02da0b2d
PM
5489 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5490 cpu_V0, cpu_V1);
ad69471c
PB
5491 }
5492 break;
62698be3 5493 case NEON_3R_VSHL:
ad69471c
PB
5494 if (u) {
5495 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5496 } else {
5497 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5498 }
5499 break;
62698be3 5500 case NEON_3R_VQSHL:
ad69471c 5501 if (u) {
02da0b2d
PM
5502 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5503 cpu_V1, cpu_V0);
ad69471c 5504 } else {
02da0b2d
PM
5505 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5506 cpu_V1, cpu_V0);
ad69471c
PB
5507 }
5508 break;
62698be3 5509 case NEON_3R_VRSHL:
ad69471c
PB
5510 if (u) {
5511 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5512 } else {
ad69471c
PB
5513 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5514 }
5515 break;
62698be3 5516 case NEON_3R_VQRSHL:
ad69471c 5517 if (u) {
02da0b2d
PM
5518 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5519 cpu_V1, cpu_V0);
ad69471c 5520 } else {
02da0b2d
PM
5521 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5522 cpu_V1, cpu_V0);
1e8d4eec 5523 }
9ee6e8bb 5524 break;
62698be3 5525 case NEON_3R_VADD_VSUB:
9ee6e8bb 5526 if (u) {
ad69471c 5527 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5528 } else {
ad69471c 5529 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5530 }
5531 break;
5532 default:
5533 abort();
2c0262af 5534 }
ad69471c 5535 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5536 }
9ee6e8bb 5537 return 0;
2c0262af 5538 }
25f84f79 5539 pairwise = 0;
9ee6e8bb 5540 switch (op) {
62698be3
PM
5541 case NEON_3R_VSHL:
5542 case NEON_3R_VQSHL:
5543 case NEON_3R_VRSHL:
5544 case NEON_3R_VQRSHL:
9ee6e8bb 5545 {
ad69471c
PB
5546 int rtmp;
5547 /* Shift instruction operands are reversed. */
5548 rtmp = rn;
9ee6e8bb 5549 rn = rm;
ad69471c 5550 rm = rtmp;
9ee6e8bb 5551 }
2c0262af 5552 break;
25f84f79
PM
5553 case NEON_3R_VPADD:
5554 if (u) {
5555 return 1;
5556 }
5557 /* Fall through */
62698be3
PM
5558 case NEON_3R_VPMAX:
5559 case NEON_3R_VPMIN:
9ee6e8bb 5560 pairwise = 1;
2c0262af 5561 break;
25f84f79
PM
5562 case NEON_3R_FLOAT_ARITH:
5563 pairwise = (u && size < 2); /* if VPADD (float) */
5564 break;
5565 case NEON_3R_FLOAT_MINMAX:
5566 pairwise = u; /* if VPMIN/VPMAX (float) */
5567 break;
5568 case NEON_3R_FLOAT_CMP:
5569 if (!u && size) {
5570 /* no encoding for U=0 C=1x */
5571 return 1;
5572 }
5573 break;
5574 case NEON_3R_FLOAT_ACMP:
5575 if (!u) {
5576 return 1;
5577 }
5578 break;
505935fc
WN
5579 case NEON_3R_FLOAT_MISC:
5580 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5581 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5582 return 1;
5583 }
2c0262af 5584 break;
25f84f79
PM
5585 case NEON_3R_VMUL:
5586 if (u && (size != 0)) {
5587 /* UNDEF on invalid size for polynomial subcase */
5588 return 1;
5589 }
2c0262af 5590 break;
da97f52c 5591 case NEON_3R_VFM:
d614a513 5592 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5593 return 1;
5594 }
5595 break;
9ee6e8bb 5596 default:
2c0262af 5597 break;
9ee6e8bb 5598 }
dd8fbd78 5599
25f84f79
PM
5600 if (pairwise && q) {
5601 /* All the pairwise insns UNDEF if Q is set */
5602 return 1;
5603 }
5604
9ee6e8bb
PB
5605 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5606
5607 if (pairwise) {
5608 /* Pairwise. */
a5a14945
JR
5609 if (pass < 1) {
5610 tmp = neon_load_reg(rn, 0);
5611 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5612 } else {
a5a14945
JR
5613 tmp = neon_load_reg(rm, 0);
5614 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5615 }
5616 } else {
5617 /* Elementwise. */
dd8fbd78
FN
5618 tmp = neon_load_reg(rn, pass);
5619 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5620 }
5621 switch (op) {
62698be3 5622 case NEON_3R_VHADD:
9ee6e8bb
PB
5623 GEN_NEON_INTEGER_OP(hadd);
5624 break;
62698be3 5625 case NEON_3R_VQADD:
02da0b2d 5626 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5627 break;
62698be3 5628 case NEON_3R_VRHADD:
9ee6e8bb 5629 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5630 break;
62698be3 5631 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5632 switch ((u << 2) | size) {
5633 case 0: /* VAND */
dd8fbd78 5634 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5635 break;
5636 case 1: /* BIC */
f669df27 5637 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5638 break;
5639 case 2: /* VORR */
dd8fbd78 5640 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5641 break;
5642 case 3: /* VORN */
f669df27 5643 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5644 break;
5645 case 4: /* VEOR */
dd8fbd78 5646 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5647 break;
5648 case 5: /* VBSL */
dd8fbd78
FN
5649 tmp3 = neon_load_reg(rd, pass);
5650 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5651 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5652 break;
5653 case 6: /* VBIT */
dd8fbd78
FN
5654 tmp3 = neon_load_reg(rd, pass);
5655 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5656 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5657 break;
5658 case 7: /* VBIF */
dd8fbd78
FN
5659 tmp3 = neon_load_reg(rd, pass);
5660 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5661 tcg_temp_free_i32(tmp3);
9ee6e8bb 5662 break;
2c0262af
FB
5663 }
5664 break;
62698be3 5665 case NEON_3R_VHSUB:
9ee6e8bb
PB
5666 GEN_NEON_INTEGER_OP(hsub);
5667 break;
62698be3 5668 case NEON_3R_VQSUB:
02da0b2d 5669 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5670 break;
62698be3 5671 case NEON_3R_VCGT:
9ee6e8bb
PB
5672 GEN_NEON_INTEGER_OP(cgt);
5673 break;
62698be3 5674 case NEON_3R_VCGE:
9ee6e8bb
PB
5675 GEN_NEON_INTEGER_OP(cge);
5676 break;
62698be3 5677 case NEON_3R_VSHL:
ad69471c 5678 GEN_NEON_INTEGER_OP(shl);
2c0262af 5679 break;
62698be3 5680 case NEON_3R_VQSHL:
02da0b2d 5681 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5682 break;
62698be3 5683 case NEON_3R_VRSHL:
ad69471c 5684 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5685 break;
62698be3 5686 case NEON_3R_VQRSHL:
02da0b2d 5687 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5688 break;
62698be3 5689 case NEON_3R_VMAX:
9ee6e8bb
PB
5690 GEN_NEON_INTEGER_OP(max);
5691 break;
62698be3 5692 case NEON_3R_VMIN:
9ee6e8bb
PB
5693 GEN_NEON_INTEGER_OP(min);
5694 break;
62698be3 5695 case NEON_3R_VABD:
9ee6e8bb
PB
5696 GEN_NEON_INTEGER_OP(abd);
5697 break;
62698be3 5698 case NEON_3R_VABA:
9ee6e8bb 5699 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5700 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5701 tmp2 = neon_load_reg(rd, pass);
5702 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5703 break;
62698be3 5704 case NEON_3R_VADD_VSUB:
9ee6e8bb 5705 if (!u) { /* VADD */
62698be3 5706 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5707 } else { /* VSUB */
5708 switch (size) {
dd8fbd78
FN
5709 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5710 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5711 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5712 default: abort();
9ee6e8bb
PB
5713 }
5714 }
5715 break;
62698be3 5716 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5717 if (!u) { /* VTST */
5718 switch (size) {
dd8fbd78
FN
5719 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5720 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5721 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5722 default: abort();
9ee6e8bb
PB
5723 }
5724 } else { /* VCEQ */
5725 switch (size) {
dd8fbd78
FN
5726 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5727 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5728 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5729 default: abort();
9ee6e8bb
PB
5730 }
5731 }
5732 break;
62698be3 5733 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5734 switch (size) {
dd8fbd78
FN
5735 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5736 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5737 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5738 default: abort();
9ee6e8bb 5739 }
7d1b0095 5740 tcg_temp_free_i32(tmp2);
dd8fbd78 5741 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5742 if (u) { /* VMLS */
dd8fbd78 5743 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5744 } else { /* VMLA */
dd8fbd78 5745 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5746 }
5747 break;
62698be3 5748 case NEON_3R_VMUL:
9ee6e8bb 5749 if (u) { /* polynomial */
dd8fbd78 5750 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5751 } else { /* Integer */
5752 switch (size) {
dd8fbd78
FN
5753 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5754 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5755 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5756 default: abort();
9ee6e8bb
PB
5757 }
5758 }
5759 break;
62698be3 5760 case NEON_3R_VPMAX:
9ee6e8bb
PB
5761 GEN_NEON_INTEGER_OP(pmax);
5762 break;
62698be3 5763 case NEON_3R_VPMIN:
9ee6e8bb
PB
5764 GEN_NEON_INTEGER_OP(pmin);
5765 break;
62698be3 5766 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5767 if (!u) { /* VQDMULH */
5768 switch (size) {
02da0b2d
PM
5769 case 1:
5770 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5771 break;
5772 case 2:
5773 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5774 break;
62698be3 5775 default: abort();
9ee6e8bb 5776 }
62698be3 5777 } else { /* VQRDMULH */
9ee6e8bb 5778 switch (size) {
02da0b2d
PM
5779 case 1:
5780 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5781 break;
5782 case 2:
5783 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5784 break;
62698be3 5785 default: abort();
9ee6e8bb
PB
5786 }
5787 }
5788 break;
62698be3 5789 case NEON_3R_VPADD:
9ee6e8bb 5790 switch (size) {
dd8fbd78
FN
5791 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5792 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5793 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5794 default: abort();
9ee6e8bb
PB
5795 }
5796 break;
62698be3 5797 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5798 {
5799 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5800 switch ((u << 2) | size) {
5801 case 0: /* VADD */
aa47cfdd
PM
5802 case 4: /* VPADD */
5803 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5804 break;
5805 case 2: /* VSUB */
aa47cfdd 5806 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5807 break;
5808 case 6: /* VABD */
aa47cfdd 5809 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5810 break;
5811 default:
62698be3 5812 abort();
9ee6e8bb 5813 }
aa47cfdd 5814 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5815 break;
aa47cfdd 5816 }
62698be3 5817 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5818 {
5819 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5820 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5821 if (!u) {
7d1b0095 5822 tcg_temp_free_i32(tmp2);
dd8fbd78 5823 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5824 if (size == 0) {
aa47cfdd 5825 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5826 } else {
aa47cfdd 5827 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5828 }
5829 }
aa47cfdd 5830 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5831 break;
aa47cfdd 5832 }
62698be3 5833 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5834 {
5835 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5836 if (!u) {
aa47cfdd 5837 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5838 } else {
aa47cfdd
PM
5839 if (size == 0) {
5840 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5841 } else {
5842 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5843 }
b5ff1b31 5844 }
aa47cfdd 5845 tcg_temp_free_ptr(fpstatus);
2c0262af 5846 break;
aa47cfdd 5847 }
62698be3 5848 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5849 {
5850 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5851 if (size == 0) {
5852 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5853 } else {
5854 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5855 }
5856 tcg_temp_free_ptr(fpstatus);
2c0262af 5857 break;
aa47cfdd 5858 }
62698be3 5859 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5860 {
5861 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5862 if (size == 0) {
f71a2ae5 5863 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5864 } else {
f71a2ae5 5865 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5866 }
5867 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5868 break;
aa47cfdd 5869 }
505935fc
WN
5870 case NEON_3R_FLOAT_MISC:
5871 if (u) {
5872 /* VMAXNM/VMINNM */
5873 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5874 if (size == 0) {
f71a2ae5 5875 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5876 } else {
f71a2ae5 5877 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5878 }
5879 tcg_temp_free_ptr(fpstatus);
5880 } else {
5881 if (size == 0) {
5882 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5883 } else {
5884 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5885 }
5886 }
2c0262af 5887 break;
da97f52c
PM
5888 case NEON_3R_VFM:
5889 {
5890 /* VFMA, VFMS: fused multiply-add */
5891 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5892 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5893 if (size) {
5894 /* VFMS */
5895 gen_helper_vfp_negs(tmp, tmp);
5896 }
5897 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5898 tcg_temp_free_i32(tmp3);
5899 tcg_temp_free_ptr(fpstatus);
5900 break;
5901 }
9ee6e8bb
PB
5902 default:
5903 abort();
2c0262af 5904 }
7d1b0095 5905 tcg_temp_free_i32(tmp2);
dd8fbd78 5906
9ee6e8bb
PB
5907 /* Save the result. For elementwise operations we can put it
5908 straight into the destination register. For pairwise operations
5909 we have to be careful to avoid clobbering the source operands. */
5910 if (pairwise && rd == rm) {
dd8fbd78 5911 neon_store_scratch(pass, tmp);
9ee6e8bb 5912 } else {
dd8fbd78 5913 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5914 }
5915
5916 } /* for pass */
5917 if (pairwise && rd == rm) {
5918 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5919 tmp = neon_load_scratch(pass);
5920 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5921 }
5922 }
ad69471c 5923 /* End of 3 register same size operations. */
9ee6e8bb
PB
5924 } else if (insn & (1 << 4)) {
5925 if ((insn & 0x00380080) != 0) {
5926 /* Two registers and shift. */
5927 op = (insn >> 8) & 0xf;
5928 if (insn & (1 << 7)) {
cc13115b
PM
5929 /* 64-bit shift. */
5930 if (op > 7) {
5931 return 1;
5932 }
9ee6e8bb
PB
5933 size = 3;
5934 } else {
5935 size = 2;
5936 while ((insn & (1 << (size + 19))) == 0)
5937 size--;
5938 }
5939 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5940 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5941 by immediate using the variable shift operations. */
5942 if (op < 8) {
5943 /* Shift by immediate:
5944 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5945 if (q && ((rd | rm) & 1)) {
5946 return 1;
5947 }
5948 if (!u && (op == 4 || op == 6)) {
5949 return 1;
5950 }
9ee6e8bb
PB
5951 /* Right shifts are encoded as N - shift, where N is the
5952 element size in bits. */
5953 if (op <= 4)
5954 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5955 if (size == 3) {
5956 count = q + 1;
5957 } else {
5958 count = q ? 4: 2;
5959 }
5960 switch (size) {
5961 case 0:
5962 imm = (uint8_t) shift;
5963 imm |= imm << 8;
5964 imm |= imm << 16;
5965 break;
5966 case 1:
5967 imm = (uint16_t) shift;
5968 imm |= imm << 16;
5969 break;
5970 case 2:
5971 case 3:
5972 imm = shift;
5973 break;
5974 default:
5975 abort();
5976 }
5977
5978 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5979 if (size == 3) {
5980 neon_load_reg64(cpu_V0, rm + pass);
5981 tcg_gen_movi_i64(cpu_V1, imm);
5982 switch (op) {
5983 case 0: /* VSHR */
5984 case 1: /* VSRA */
5985 if (u)
5986 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5987 else
ad69471c 5988 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5989 break;
ad69471c
PB
5990 case 2: /* VRSHR */
5991 case 3: /* VRSRA */
5992 if (u)
5993 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5994 else
ad69471c 5995 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5996 break;
ad69471c 5997 case 4: /* VSRI */
ad69471c
PB
5998 case 5: /* VSHL, VSLI */
5999 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6000 break;
0322b26e 6001 case 6: /* VQSHLU */
02da0b2d
PM
6002 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6003 cpu_V0, cpu_V1);
ad69471c 6004 break;
0322b26e
PM
6005 case 7: /* VQSHL */
6006 if (u) {
02da0b2d 6007 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6008 cpu_V0, cpu_V1);
6009 } else {
02da0b2d 6010 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6011 cpu_V0, cpu_V1);
6012 }
9ee6e8bb 6013 break;
9ee6e8bb 6014 }
ad69471c
PB
6015 if (op == 1 || op == 3) {
6016 /* Accumulate. */
5371cb81 6017 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6018 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6019 } else if (op == 4 || (op == 5 && u)) {
6020 /* Insert */
923e6509
CL
6021 neon_load_reg64(cpu_V1, rd + pass);
6022 uint64_t mask;
6023 if (shift < -63 || shift > 63) {
6024 mask = 0;
6025 } else {
6026 if (op == 4) {
6027 mask = 0xffffffffffffffffull >> -shift;
6028 } else {
6029 mask = 0xffffffffffffffffull << shift;
6030 }
6031 }
6032 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6033 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6034 }
6035 neon_store_reg64(cpu_V0, rd + pass);
6036 } else { /* size < 3 */
6037 /* Operands in T0 and T1. */
dd8fbd78 6038 tmp = neon_load_reg(rm, pass);
7d1b0095 6039 tmp2 = tcg_temp_new_i32();
dd8fbd78 6040 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6041 switch (op) {
6042 case 0: /* VSHR */
6043 case 1: /* VSRA */
6044 GEN_NEON_INTEGER_OP(shl);
6045 break;
6046 case 2: /* VRSHR */
6047 case 3: /* VRSRA */
6048 GEN_NEON_INTEGER_OP(rshl);
6049 break;
6050 case 4: /* VSRI */
ad69471c
PB
6051 case 5: /* VSHL, VSLI */
6052 switch (size) {
dd8fbd78
FN
6053 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6054 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6055 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6056 default: abort();
ad69471c
PB
6057 }
6058 break;
0322b26e 6059 case 6: /* VQSHLU */
ad69471c 6060 switch (size) {
0322b26e 6061 case 0:
02da0b2d
PM
6062 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6063 tmp, tmp2);
0322b26e
PM
6064 break;
6065 case 1:
02da0b2d
PM
6066 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6067 tmp, tmp2);
0322b26e
PM
6068 break;
6069 case 2:
02da0b2d
PM
6070 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6071 tmp, tmp2);
0322b26e
PM
6072 break;
6073 default:
cc13115b 6074 abort();
ad69471c
PB
6075 }
6076 break;
0322b26e 6077 case 7: /* VQSHL */
02da0b2d 6078 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6079 break;
ad69471c 6080 }
7d1b0095 6081 tcg_temp_free_i32(tmp2);
ad69471c
PB
6082
6083 if (op == 1 || op == 3) {
6084 /* Accumulate. */
dd8fbd78 6085 tmp2 = neon_load_reg(rd, pass);
5371cb81 6086 gen_neon_add(size, tmp, tmp2);
7d1b0095 6087 tcg_temp_free_i32(tmp2);
ad69471c
PB
6088 } else if (op == 4 || (op == 5 && u)) {
6089 /* Insert */
6090 switch (size) {
6091 case 0:
6092 if (op == 4)
ca9a32e4 6093 mask = 0xff >> -shift;
ad69471c 6094 else
ca9a32e4
JR
6095 mask = (uint8_t)(0xff << shift);
6096 mask |= mask << 8;
6097 mask |= mask << 16;
ad69471c
PB
6098 break;
6099 case 1:
6100 if (op == 4)
ca9a32e4 6101 mask = 0xffff >> -shift;
ad69471c 6102 else
ca9a32e4
JR
6103 mask = (uint16_t)(0xffff << shift);
6104 mask |= mask << 16;
ad69471c
PB
6105 break;
6106 case 2:
ca9a32e4
JR
6107 if (shift < -31 || shift > 31) {
6108 mask = 0;
6109 } else {
6110 if (op == 4)
6111 mask = 0xffffffffu >> -shift;
6112 else
6113 mask = 0xffffffffu << shift;
6114 }
ad69471c
PB
6115 break;
6116 default:
6117 abort();
6118 }
dd8fbd78 6119 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6120 tcg_gen_andi_i32(tmp, tmp, mask);
6121 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6122 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6123 tcg_temp_free_i32(tmp2);
ad69471c 6124 }
dd8fbd78 6125 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6126 }
6127 } /* for pass */
6128 } else if (op < 10) {
ad69471c 6129 /* Shift by immediate and narrow:
9ee6e8bb 6130 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6131 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6132 if (rm & 1) {
6133 return 1;
6134 }
9ee6e8bb
PB
6135 shift = shift - (1 << (size + 3));
6136 size++;
92cdfaeb 6137 if (size == 3) {
a7812ae4 6138 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6139 neon_load_reg64(cpu_V0, rm);
6140 neon_load_reg64(cpu_V1, rm + 1);
6141 for (pass = 0; pass < 2; pass++) {
6142 TCGv_i64 in;
6143 if (pass == 0) {
6144 in = cpu_V0;
6145 } else {
6146 in = cpu_V1;
6147 }
ad69471c 6148 if (q) {
0b36f4cd 6149 if (input_unsigned) {
92cdfaeb 6150 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6151 } else {
92cdfaeb 6152 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6153 }
ad69471c 6154 } else {
0b36f4cd 6155 if (input_unsigned) {
92cdfaeb 6156 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6157 } else {
92cdfaeb 6158 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6159 }
ad69471c 6160 }
7d1b0095 6161 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6162 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6163 neon_store_reg(rd, pass, tmp);
6164 } /* for pass */
6165 tcg_temp_free_i64(tmp64);
6166 } else {
6167 if (size == 1) {
6168 imm = (uint16_t)shift;
6169 imm |= imm << 16;
2c0262af 6170 } else {
92cdfaeb
PM
6171 /* size == 2 */
6172 imm = (uint32_t)shift;
6173 }
6174 tmp2 = tcg_const_i32(imm);
6175 tmp4 = neon_load_reg(rm + 1, 0);
6176 tmp5 = neon_load_reg(rm + 1, 1);
6177 for (pass = 0; pass < 2; pass++) {
6178 if (pass == 0) {
6179 tmp = neon_load_reg(rm, 0);
6180 } else {
6181 tmp = tmp4;
6182 }
0b36f4cd
CL
6183 gen_neon_shift_narrow(size, tmp, tmp2, q,
6184 input_unsigned);
92cdfaeb
PM
6185 if (pass == 0) {
6186 tmp3 = neon_load_reg(rm, 1);
6187 } else {
6188 tmp3 = tmp5;
6189 }
0b36f4cd
CL
6190 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6191 input_unsigned);
36aa55dc 6192 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6193 tcg_temp_free_i32(tmp);
6194 tcg_temp_free_i32(tmp3);
6195 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6196 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6197 neon_store_reg(rd, pass, tmp);
6198 } /* for pass */
c6067f04 6199 tcg_temp_free_i32(tmp2);
b75263d6 6200 }
9ee6e8bb 6201 } else if (op == 10) {
cc13115b
PM
6202 /* VSHLL, VMOVL */
6203 if (q || (rd & 1)) {
9ee6e8bb 6204 return 1;
cc13115b 6205 }
ad69471c
PB
6206 tmp = neon_load_reg(rm, 0);
6207 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6208 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6209 if (pass == 1)
6210 tmp = tmp2;
6211
6212 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6213
9ee6e8bb
PB
6214 if (shift != 0) {
6215 /* The shift is less than the width of the source
ad69471c
PB
6216 type, so we can just shift the whole register. */
6217 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6218 /* Widen the result of shift: we need to clear
6219 * the potential overflow bits resulting from
6220 * left bits of the narrow input appearing as
6221 * right bits of left the neighbour narrow
6222 * input. */
ad69471c
PB
6223 if (size < 2 || !u) {
6224 uint64_t imm64;
6225 if (size == 0) {
6226 imm = (0xffu >> (8 - shift));
6227 imm |= imm << 16;
acdf01ef 6228 } else if (size == 1) {
ad69471c 6229 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6230 } else {
6231 /* size == 2 */
6232 imm = 0xffffffff >> (32 - shift);
6233 }
6234 if (size < 2) {
6235 imm64 = imm | (((uint64_t)imm) << 32);
6236 } else {
6237 imm64 = imm;
9ee6e8bb 6238 }
acdf01ef 6239 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6240 }
6241 }
ad69471c 6242 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6243 }
f73534a5 6244 } else if (op >= 14) {
9ee6e8bb 6245 /* VCVT fixed-point. */
cc13115b
PM
6246 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6247 return 1;
6248 }
f73534a5
PM
6249 /* We have already masked out the must-be-1 top bit of imm6,
6250 * hence this 32-shift where the ARM ARM has 64-imm6.
6251 */
6252 shift = 32 - shift;
9ee6e8bb 6253 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6254 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6255 if (!(op & 1)) {
9ee6e8bb 6256 if (u)
5500b06c 6257 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6258 else
5500b06c 6259 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6260 } else {
6261 if (u)
5500b06c 6262 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6263 else
5500b06c 6264 gen_vfp_tosl(0, shift, 1);
2c0262af 6265 }
4373f3ce 6266 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6267 }
6268 } else {
9ee6e8bb
PB
6269 return 1;
6270 }
6271 } else { /* (insn & 0x00380080) == 0 */
6272 int invert;
7d80fee5
PM
6273 if (q && (rd & 1)) {
6274 return 1;
6275 }
9ee6e8bb
PB
6276
6277 op = (insn >> 8) & 0xf;
6278 /* One register and immediate. */
6279 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6280 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6281 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6282 * We choose to not special-case this and will behave as if a
6283 * valid constant encoding of 0 had been given.
6284 */
9ee6e8bb
PB
6285 switch (op) {
6286 case 0: case 1:
6287 /* no-op */
6288 break;
6289 case 2: case 3:
6290 imm <<= 8;
6291 break;
6292 case 4: case 5:
6293 imm <<= 16;
6294 break;
6295 case 6: case 7:
6296 imm <<= 24;
6297 break;
6298 case 8: case 9:
6299 imm |= imm << 16;
6300 break;
6301 case 10: case 11:
6302 imm = (imm << 8) | (imm << 24);
6303 break;
6304 case 12:
8e31209e 6305 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6306 break;
6307 case 13:
6308 imm = (imm << 16) | 0xffff;
6309 break;
6310 case 14:
6311 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6312 if (invert)
6313 imm = ~imm;
6314 break;
6315 case 15:
7d80fee5
PM
6316 if (invert) {
6317 return 1;
6318 }
9ee6e8bb
PB
6319 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6320 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6321 break;
6322 }
6323 if (invert)
6324 imm = ~imm;
6325
9ee6e8bb
PB
6326 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6327 if (op & 1 && op < 12) {
ad69471c 6328 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6329 if (invert) {
6330 /* The immediate value has already been inverted, so
6331 BIC becomes AND. */
ad69471c 6332 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6333 } else {
ad69471c 6334 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6335 }
9ee6e8bb 6336 } else {
ad69471c 6337 /* VMOV, VMVN. */
7d1b0095 6338 tmp = tcg_temp_new_i32();
9ee6e8bb 6339 if (op == 14 && invert) {
a5a14945 6340 int n;
ad69471c
PB
6341 uint32_t val;
6342 val = 0;
9ee6e8bb
PB
6343 for (n = 0; n < 4; n++) {
6344 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6345 val |= 0xff << (n * 8);
9ee6e8bb 6346 }
ad69471c
PB
6347 tcg_gen_movi_i32(tmp, val);
6348 } else {
6349 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6350 }
9ee6e8bb 6351 }
ad69471c 6352 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6353 }
6354 }
e4b3861d 6355 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6356 if (size != 3) {
6357 op = (insn >> 8) & 0xf;
6358 if ((insn & (1 << 6)) == 0) {
6359 /* Three registers of different lengths. */
6360 int src1_wide;
6361 int src2_wide;
6362 int prewiden;
526d0096
PM
6363 /* undefreq: bit 0 : UNDEF if size == 0
6364 * bit 1 : UNDEF if size == 1
6365 * bit 2 : UNDEF if size == 2
6366 * bit 3 : UNDEF if U == 1
6367 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6368 */
6369 int undefreq;
6370 /* prewiden, src1_wide, src2_wide, undefreq */
6371 static const int neon_3reg_wide[16][4] = {
6372 {1, 0, 0, 0}, /* VADDL */
6373 {1, 1, 0, 0}, /* VADDW */
6374 {1, 0, 0, 0}, /* VSUBL */
6375 {1, 1, 0, 0}, /* VSUBW */
6376 {0, 1, 1, 0}, /* VADDHN */
6377 {0, 0, 0, 0}, /* VABAL */
6378 {0, 1, 1, 0}, /* VSUBHN */
6379 {0, 0, 0, 0}, /* VABDL */
6380 {0, 0, 0, 0}, /* VMLAL */
526d0096 6381 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6382 {0, 0, 0, 0}, /* VMLSL */
526d0096 6383 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6384 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6385 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6386 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6387 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6388 };
6389
6390 prewiden = neon_3reg_wide[op][0];
6391 src1_wide = neon_3reg_wide[op][1];
6392 src2_wide = neon_3reg_wide[op][2];
695272dc 6393 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6394
526d0096
PM
6395 if ((undefreq & (1 << size)) ||
6396 ((undefreq & 8) && u)) {
695272dc
PM
6397 return 1;
6398 }
6399 if ((src1_wide && (rn & 1)) ||
6400 (src2_wide && (rm & 1)) ||
6401 (!src2_wide && (rd & 1))) {
ad69471c 6402 return 1;
695272dc 6403 }
ad69471c 6404
4e624eda
PM
6405 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6406 * outside the loop below as it only performs a single pass.
6407 */
6408 if (op == 14 && size == 2) {
6409 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6410
d614a513 6411 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6412 return 1;
6413 }
6414 tcg_rn = tcg_temp_new_i64();
6415 tcg_rm = tcg_temp_new_i64();
6416 tcg_rd = tcg_temp_new_i64();
6417 neon_load_reg64(tcg_rn, rn);
6418 neon_load_reg64(tcg_rm, rm);
6419 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6420 neon_store_reg64(tcg_rd, rd);
6421 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6422 neon_store_reg64(tcg_rd, rd + 1);
6423 tcg_temp_free_i64(tcg_rn);
6424 tcg_temp_free_i64(tcg_rm);
6425 tcg_temp_free_i64(tcg_rd);
6426 return 0;
6427 }
6428
9ee6e8bb
PB
6429 /* Avoid overlapping operands. Wide source operands are
6430 always aligned so will never overlap with wide
6431 destinations in problematic ways. */
8f8e3aa4 6432 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6433 tmp = neon_load_reg(rm, 1);
6434 neon_store_scratch(2, tmp);
8f8e3aa4 6435 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6436 tmp = neon_load_reg(rn, 1);
6437 neon_store_scratch(2, tmp);
9ee6e8bb 6438 }
39d5492a 6439 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6440 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6441 if (src1_wide) {
6442 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6443 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6444 } else {
ad69471c 6445 if (pass == 1 && rd == rn) {
dd8fbd78 6446 tmp = neon_load_scratch(2);
9ee6e8bb 6447 } else {
ad69471c
PB
6448 tmp = neon_load_reg(rn, pass);
6449 }
6450 if (prewiden) {
6451 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6452 }
6453 }
ad69471c
PB
6454 if (src2_wide) {
6455 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6456 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6457 } else {
ad69471c 6458 if (pass == 1 && rd == rm) {
dd8fbd78 6459 tmp2 = neon_load_scratch(2);
9ee6e8bb 6460 } else {
ad69471c
PB
6461 tmp2 = neon_load_reg(rm, pass);
6462 }
6463 if (prewiden) {
6464 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6465 }
9ee6e8bb
PB
6466 }
6467 switch (op) {
6468 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6469 gen_neon_addl(size);
9ee6e8bb 6470 break;
79b0e534 6471 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6472 gen_neon_subl(size);
9ee6e8bb
PB
6473 break;
6474 case 5: case 7: /* VABAL, VABDL */
6475 switch ((size << 1) | u) {
ad69471c
PB
6476 case 0:
6477 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6478 break;
6479 case 1:
6480 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6481 break;
6482 case 2:
6483 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6484 break;
6485 case 3:
6486 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6487 break;
6488 case 4:
6489 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6490 break;
6491 case 5:
6492 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6493 break;
9ee6e8bb
PB
6494 default: abort();
6495 }
7d1b0095
PM
6496 tcg_temp_free_i32(tmp2);
6497 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6498 break;
6499 case 8: case 9: case 10: case 11: case 12: case 13:
6500 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6501 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6502 break;
6503 case 14: /* Polynomial VMULL */
e5ca24cb 6504 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6505 tcg_temp_free_i32(tmp2);
6506 tcg_temp_free_i32(tmp);
e5ca24cb 6507 break;
695272dc
PM
6508 default: /* 15 is RESERVED: caught earlier */
6509 abort();
9ee6e8bb 6510 }
ebcd88ce
PM
6511 if (op == 13) {
6512 /* VQDMULL */
6513 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6514 neon_store_reg64(cpu_V0, rd + pass);
6515 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6516 /* Accumulate. */
ebcd88ce 6517 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6518 switch (op) {
4dc064e6
PM
6519 case 10: /* VMLSL */
6520 gen_neon_negl(cpu_V0, size);
6521 /* Fall through */
6522 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6523 gen_neon_addl(size);
9ee6e8bb
PB
6524 break;
6525 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6526 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6527 if (op == 11) {
6528 gen_neon_negl(cpu_V0, size);
6529 }
ad69471c
PB
6530 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6531 break;
9ee6e8bb
PB
6532 default:
6533 abort();
6534 }
ad69471c 6535 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6536 } else if (op == 4 || op == 6) {
6537 /* Narrowing operation. */
7d1b0095 6538 tmp = tcg_temp_new_i32();
79b0e534 6539 if (!u) {
9ee6e8bb 6540 switch (size) {
ad69471c
PB
6541 case 0:
6542 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6543 break;
6544 case 1:
6545 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6546 break;
6547 case 2:
6548 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6549 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6550 break;
9ee6e8bb
PB
6551 default: abort();
6552 }
6553 } else {
6554 switch (size) {
ad69471c
PB
6555 case 0:
6556 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6557 break;
6558 case 1:
6559 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6560 break;
6561 case 2:
6562 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6563 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6564 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6565 break;
9ee6e8bb
PB
6566 default: abort();
6567 }
6568 }
ad69471c
PB
6569 if (pass == 0) {
6570 tmp3 = tmp;
6571 } else {
6572 neon_store_reg(rd, 0, tmp3);
6573 neon_store_reg(rd, 1, tmp);
6574 }
9ee6e8bb
PB
6575 } else {
6576 /* Write back the result. */
ad69471c 6577 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6578 }
6579 }
6580 } else {
3e3326df
PM
6581 /* Two registers and a scalar. NB that for ops of this form
6582 * the ARM ARM labels bit 24 as Q, but it is in our variable
6583 * 'u', not 'q'.
6584 */
6585 if (size == 0) {
6586 return 1;
6587 }
9ee6e8bb 6588 switch (op) {
9ee6e8bb 6589 case 1: /* Float VMLA scalar */
9ee6e8bb 6590 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6591 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6592 if (size == 1) {
6593 return 1;
6594 }
6595 /* fall through */
6596 case 0: /* Integer VMLA scalar */
6597 case 4: /* Integer VMLS scalar */
6598 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6599 case 12: /* VQDMULH scalar */
6600 case 13: /* VQRDMULH scalar */
3e3326df
PM
6601 if (u && ((rd | rn) & 1)) {
6602 return 1;
6603 }
dd8fbd78
FN
6604 tmp = neon_get_scalar(size, rm);
6605 neon_store_scratch(0, tmp);
9ee6e8bb 6606 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6607 tmp = neon_load_scratch(0);
6608 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6609 if (op == 12) {
6610 if (size == 1) {
02da0b2d 6611 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6612 } else {
02da0b2d 6613 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6614 }
6615 } else if (op == 13) {
6616 if (size == 1) {
02da0b2d 6617 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6618 } else {
02da0b2d 6619 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6620 }
6621 } else if (op & 1) {
aa47cfdd
PM
6622 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6623 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6624 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6625 } else {
6626 switch (size) {
dd8fbd78
FN
6627 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6628 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6629 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6630 default: abort();
9ee6e8bb
PB
6631 }
6632 }
7d1b0095 6633 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6634 if (op < 8) {
6635 /* Accumulate. */
dd8fbd78 6636 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6637 switch (op) {
6638 case 0:
dd8fbd78 6639 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6640 break;
6641 case 1:
aa47cfdd
PM
6642 {
6643 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6644 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6645 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6646 break;
aa47cfdd 6647 }
9ee6e8bb 6648 case 4:
dd8fbd78 6649 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6650 break;
6651 case 5:
aa47cfdd
PM
6652 {
6653 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6654 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6655 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6656 break;
aa47cfdd 6657 }
9ee6e8bb
PB
6658 default:
6659 abort();
6660 }
7d1b0095 6661 tcg_temp_free_i32(tmp2);
9ee6e8bb 6662 }
dd8fbd78 6663 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6664 }
6665 break;
9ee6e8bb 6666 case 3: /* VQDMLAL scalar */
9ee6e8bb 6667 case 7: /* VQDMLSL scalar */
9ee6e8bb 6668 case 11: /* VQDMULL scalar */
3e3326df 6669 if (u == 1) {
ad69471c 6670 return 1;
3e3326df
PM
6671 }
6672 /* fall through */
6673 case 2: /* VMLAL sclar */
6674 case 6: /* VMLSL scalar */
6675 case 10: /* VMULL scalar */
6676 if (rd & 1) {
6677 return 1;
6678 }
dd8fbd78 6679 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6680 /* We need a copy of tmp2 because gen_neon_mull
6681 * deletes it during pass 0. */
7d1b0095 6682 tmp4 = tcg_temp_new_i32();
c6067f04 6683 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6684 tmp3 = neon_load_reg(rn, 1);
ad69471c 6685
9ee6e8bb 6686 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6687 if (pass == 0) {
6688 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6689 } else {
dd8fbd78 6690 tmp = tmp3;
c6067f04 6691 tmp2 = tmp4;
9ee6e8bb 6692 }
ad69471c 6693 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6694 if (op != 11) {
6695 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6696 }
9ee6e8bb 6697 switch (op) {
4dc064e6
PM
6698 case 6:
6699 gen_neon_negl(cpu_V0, size);
6700 /* Fall through */
6701 case 2:
ad69471c 6702 gen_neon_addl(size);
9ee6e8bb
PB
6703 break;
6704 case 3: case 7:
ad69471c 6705 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6706 if (op == 7) {
6707 gen_neon_negl(cpu_V0, size);
6708 }
ad69471c 6709 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6710 break;
6711 case 10:
6712 /* no-op */
6713 break;
6714 case 11:
ad69471c 6715 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6716 break;
6717 default:
6718 abort();
6719 }
ad69471c 6720 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6721 }
dd8fbd78 6722
dd8fbd78 6723
9ee6e8bb
PB
6724 break;
6725 default: /* 14 and 15 are RESERVED */
6726 return 1;
6727 }
6728 }
6729 } else { /* size == 3 */
6730 if (!u) {
6731 /* Extract. */
9ee6e8bb 6732 imm = (insn >> 8) & 0xf;
ad69471c
PB
6733
6734 if (imm > 7 && !q)
6735 return 1;
6736
52579ea1
PM
6737 if (q && ((rd | rn | rm) & 1)) {
6738 return 1;
6739 }
6740
ad69471c
PB
6741 if (imm == 0) {
6742 neon_load_reg64(cpu_V0, rn);
6743 if (q) {
6744 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6745 }
ad69471c
PB
6746 } else if (imm == 8) {
6747 neon_load_reg64(cpu_V0, rn + 1);
6748 if (q) {
6749 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6750 }
ad69471c 6751 } else if (q) {
a7812ae4 6752 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6753 if (imm < 8) {
6754 neon_load_reg64(cpu_V0, rn);
a7812ae4 6755 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6756 } else {
6757 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6758 neon_load_reg64(tmp64, rm);
ad69471c
PB
6759 }
6760 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6761 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6762 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6763 if (imm < 8) {
6764 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6765 } else {
ad69471c
PB
6766 neon_load_reg64(cpu_V1, rm + 1);
6767 imm -= 8;
9ee6e8bb 6768 }
ad69471c 6769 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6770 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6771 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6772 tcg_temp_free_i64(tmp64);
ad69471c 6773 } else {
a7812ae4 6774 /* BUGFIX */
ad69471c 6775 neon_load_reg64(cpu_V0, rn);
a7812ae4 6776 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6777 neon_load_reg64(cpu_V1, rm);
a7812ae4 6778 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6779 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6780 }
6781 neon_store_reg64(cpu_V0, rd);
6782 if (q) {
6783 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6784 }
6785 } else if ((insn & (1 << 11)) == 0) {
6786 /* Two register misc. */
6787 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6788 size = (insn >> 18) & 3;
600b828c
PM
6789 /* UNDEF for unknown op values and bad op-size combinations */
6790 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6791 return 1;
6792 }
fc2a9b37
PM
6793 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6794 q && ((rm | rd) & 1)) {
6795 return 1;
6796 }
9ee6e8bb 6797 switch (op) {
600b828c 6798 case NEON_2RM_VREV64:
9ee6e8bb 6799 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6800 tmp = neon_load_reg(rm, pass * 2);
6801 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6802 switch (size) {
dd8fbd78
FN
6803 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6804 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6805 case 2: /* no-op */ break;
6806 default: abort();
6807 }
dd8fbd78 6808 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6809 if (size == 2) {
dd8fbd78 6810 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6811 } else {
9ee6e8bb 6812 switch (size) {
dd8fbd78
FN
6813 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6814 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6815 default: abort();
6816 }
dd8fbd78 6817 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6818 }
6819 }
6820 break;
600b828c
PM
6821 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6822 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6823 for (pass = 0; pass < q + 1; pass++) {
6824 tmp = neon_load_reg(rm, pass * 2);
6825 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6826 tmp = neon_load_reg(rm, pass * 2 + 1);
6827 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6828 switch (size) {
6829 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6830 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6831 case 2: tcg_gen_add_i64(CPU_V001); break;
6832 default: abort();
6833 }
600b828c 6834 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6835 /* Accumulate. */
ad69471c
PB
6836 neon_load_reg64(cpu_V1, rd + pass);
6837 gen_neon_addl(size);
9ee6e8bb 6838 }
ad69471c 6839 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6840 }
6841 break;
600b828c 6842 case NEON_2RM_VTRN:
9ee6e8bb 6843 if (size == 2) {
a5a14945 6844 int n;
9ee6e8bb 6845 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6846 tmp = neon_load_reg(rm, n);
6847 tmp2 = neon_load_reg(rd, n + 1);
6848 neon_store_reg(rm, n, tmp2);
6849 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6850 }
6851 } else {
6852 goto elementwise;
6853 }
6854 break;
600b828c 6855 case NEON_2RM_VUZP:
02acedf9 6856 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6857 return 1;
9ee6e8bb
PB
6858 }
6859 break;
600b828c 6860 case NEON_2RM_VZIP:
d68a6f3a 6861 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6862 return 1;
9ee6e8bb
PB
6863 }
6864 break;
600b828c
PM
6865 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6866 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6867 if (rm & 1) {
6868 return 1;
6869 }
39d5492a 6870 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6871 for (pass = 0; pass < 2; pass++) {
ad69471c 6872 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6873 tmp = tcg_temp_new_i32();
600b828c
PM
6874 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6875 tmp, cpu_V0);
ad69471c
PB
6876 if (pass == 0) {
6877 tmp2 = tmp;
6878 } else {
6879 neon_store_reg(rd, 0, tmp2);
6880 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6881 }
9ee6e8bb
PB
6882 }
6883 break;
600b828c 6884 case NEON_2RM_VSHLL:
fc2a9b37 6885 if (q || (rd & 1)) {
9ee6e8bb 6886 return 1;
600b828c 6887 }
ad69471c
PB
6888 tmp = neon_load_reg(rm, 0);
6889 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6890 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6891 if (pass == 1)
6892 tmp = tmp2;
6893 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6894 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6895 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6896 }
6897 break;
600b828c 6898 case NEON_2RM_VCVT_F16_F32:
d614a513 6899 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6900 q || (rm & 1)) {
6901 return 1;
6902 }
7d1b0095
PM
6903 tmp = tcg_temp_new_i32();
6904 tmp2 = tcg_temp_new_i32();
60011498 6905 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6906 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6907 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6908 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6909 tcg_gen_shli_i32(tmp2, tmp2, 16);
6910 tcg_gen_or_i32(tmp2, tmp2, tmp);
6911 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6912 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6913 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6914 neon_store_reg(rd, 0, tmp2);
7d1b0095 6915 tmp2 = tcg_temp_new_i32();
2d981da7 6916 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6917 tcg_gen_shli_i32(tmp2, tmp2, 16);
6918 tcg_gen_or_i32(tmp2, tmp2, tmp);
6919 neon_store_reg(rd, 1, tmp2);
7d1b0095 6920 tcg_temp_free_i32(tmp);
60011498 6921 break;
600b828c 6922 case NEON_2RM_VCVT_F32_F16:
d614a513 6923 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6924 q || (rd & 1)) {
6925 return 1;
6926 }
7d1b0095 6927 tmp3 = tcg_temp_new_i32();
60011498
PB
6928 tmp = neon_load_reg(rm, 0);
6929 tmp2 = neon_load_reg(rm, 1);
6930 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6931 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6932 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6933 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6934 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6935 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6936 tcg_temp_free_i32(tmp);
60011498 6937 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6938 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6939 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6940 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6941 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6942 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6943 tcg_temp_free_i32(tmp2);
6944 tcg_temp_free_i32(tmp3);
60011498 6945 break;
9d935509 6946 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6947 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6948 || ((rm | rd) & 1)) {
6949 return 1;
6950 }
6951 tmp = tcg_const_i32(rd);
6952 tmp2 = tcg_const_i32(rm);
6953
6954 /* Bit 6 is the lowest opcode bit; it distinguishes between
6955 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6956 */
6957 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6958
6959 if (op == NEON_2RM_AESE) {
6960 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6961 } else {
6962 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6963 }
6964 tcg_temp_free_i32(tmp);
6965 tcg_temp_free_i32(tmp2);
6966 tcg_temp_free_i32(tmp3);
6967 break;
f1ecb913 6968 case NEON_2RM_SHA1H:
d614a513 6969 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
6970 || ((rm | rd) & 1)) {
6971 return 1;
6972 }
6973 tmp = tcg_const_i32(rd);
6974 tmp2 = tcg_const_i32(rm);
6975
6976 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6977
6978 tcg_temp_free_i32(tmp);
6979 tcg_temp_free_i32(tmp2);
6980 break;
6981 case NEON_2RM_SHA1SU1:
6982 if ((rm | rd) & 1) {
6983 return 1;
6984 }
6985 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6986 if (q) {
d614a513 6987 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
6988 return 1;
6989 }
d614a513 6990 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
6991 return 1;
6992 }
6993 tmp = tcg_const_i32(rd);
6994 tmp2 = tcg_const_i32(rm);
6995 if (q) {
6996 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6997 } else {
6998 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6999 }
7000 tcg_temp_free_i32(tmp);
7001 tcg_temp_free_i32(tmp2);
7002 break;
9ee6e8bb
PB
7003 default:
7004 elementwise:
7005 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7006 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7007 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7008 neon_reg_offset(rm, pass));
39d5492a 7009 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7010 } else {
dd8fbd78 7011 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7012 }
7013 switch (op) {
600b828c 7014 case NEON_2RM_VREV32:
9ee6e8bb 7015 switch (size) {
dd8fbd78
FN
7016 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7017 case 1: gen_swap_half(tmp); break;
600b828c 7018 default: abort();
9ee6e8bb
PB
7019 }
7020 break;
600b828c 7021 case NEON_2RM_VREV16:
dd8fbd78 7022 gen_rev16(tmp);
9ee6e8bb 7023 break;
600b828c 7024 case NEON_2RM_VCLS:
9ee6e8bb 7025 switch (size) {
dd8fbd78
FN
7026 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7027 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7028 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7029 default: abort();
9ee6e8bb
PB
7030 }
7031 break;
600b828c 7032 case NEON_2RM_VCLZ:
9ee6e8bb 7033 switch (size) {
dd8fbd78
FN
7034 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7035 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7036 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 7037 default: abort();
9ee6e8bb
PB
7038 }
7039 break;
600b828c 7040 case NEON_2RM_VCNT:
dd8fbd78 7041 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7042 break;
600b828c 7043 case NEON_2RM_VMVN:
dd8fbd78 7044 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7045 break;
600b828c 7046 case NEON_2RM_VQABS:
9ee6e8bb 7047 switch (size) {
02da0b2d
PM
7048 case 0:
7049 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7050 break;
7051 case 1:
7052 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7053 break;
7054 case 2:
7055 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7056 break;
600b828c 7057 default: abort();
9ee6e8bb
PB
7058 }
7059 break;
600b828c 7060 case NEON_2RM_VQNEG:
9ee6e8bb 7061 switch (size) {
02da0b2d
PM
7062 case 0:
7063 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7064 break;
7065 case 1:
7066 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7067 break;
7068 case 2:
7069 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7070 break;
600b828c 7071 default: abort();
9ee6e8bb
PB
7072 }
7073 break;
600b828c 7074 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7075 tmp2 = tcg_const_i32(0);
9ee6e8bb 7076 switch(size) {
dd8fbd78
FN
7077 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7078 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7079 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7080 default: abort();
9ee6e8bb 7081 }
39d5492a 7082 tcg_temp_free_i32(tmp2);
600b828c 7083 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7084 tcg_gen_not_i32(tmp, tmp);
600b828c 7085 }
9ee6e8bb 7086 break;
600b828c 7087 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7088 tmp2 = tcg_const_i32(0);
9ee6e8bb 7089 switch(size) {
dd8fbd78
FN
7090 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7091 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7092 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7093 default: abort();
9ee6e8bb 7094 }
39d5492a 7095 tcg_temp_free_i32(tmp2);
600b828c 7096 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7097 tcg_gen_not_i32(tmp, tmp);
600b828c 7098 }
9ee6e8bb 7099 break;
600b828c 7100 case NEON_2RM_VCEQ0:
dd8fbd78 7101 tmp2 = tcg_const_i32(0);
9ee6e8bb 7102 switch(size) {
dd8fbd78
FN
7103 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7104 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7105 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7106 default: abort();
9ee6e8bb 7107 }
39d5492a 7108 tcg_temp_free_i32(tmp2);
9ee6e8bb 7109 break;
600b828c 7110 case NEON_2RM_VABS:
9ee6e8bb 7111 switch(size) {
dd8fbd78
FN
7112 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7113 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7114 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7115 default: abort();
9ee6e8bb
PB
7116 }
7117 break;
600b828c 7118 case NEON_2RM_VNEG:
dd8fbd78
FN
7119 tmp2 = tcg_const_i32(0);
7120 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7121 tcg_temp_free_i32(tmp2);
9ee6e8bb 7122 break;
600b828c 7123 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7124 {
7125 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7126 tmp2 = tcg_const_i32(0);
aa47cfdd 7127 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7128 tcg_temp_free_i32(tmp2);
aa47cfdd 7129 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7130 break;
aa47cfdd 7131 }
600b828c 7132 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7133 {
7134 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7135 tmp2 = tcg_const_i32(0);
aa47cfdd 7136 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7137 tcg_temp_free_i32(tmp2);
aa47cfdd 7138 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7139 break;
aa47cfdd 7140 }
600b828c 7141 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7142 {
7143 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7144 tmp2 = tcg_const_i32(0);
aa47cfdd 7145 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7146 tcg_temp_free_i32(tmp2);
aa47cfdd 7147 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7148 break;
aa47cfdd 7149 }
600b828c 7150 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7151 {
7152 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7153 tmp2 = tcg_const_i32(0);
aa47cfdd 7154 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7155 tcg_temp_free_i32(tmp2);
aa47cfdd 7156 tcg_temp_free_ptr(fpstatus);
0e326109 7157 break;
aa47cfdd 7158 }
600b828c 7159 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7160 {
7161 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7162 tmp2 = tcg_const_i32(0);
aa47cfdd 7163 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7164 tcg_temp_free_i32(tmp2);
aa47cfdd 7165 tcg_temp_free_ptr(fpstatus);
0e326109 7166 break;
aa47cfdd 7167 }
600b828c 7168 case NEON_2RM_VABS_F:
4373f3ce 7169 gen_vfp_abs(0);
9ee6e8bb 7170 break;
600b828c 7171 case NEON_2RM_VNEG_F:
4373f3ce 7172 gen_vfp_neg(0);
9ee6e8bb 7173 break;
600b828c 7174 case NEON_2RM_VSWP:
dd8fbd78
FN
7175 tmp2 = neon_load_reg(rd, pass);
7176 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7177 break;
600b828c 7178 case NEON_2RM_VTRN:
dd8fbd78 7179 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7180 switch (size) {
dd8fbd78
FN
7181 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7182 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7183 default: abort();
9ee6e8bb 7184 }
dd8fbd78 7185 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7186 break;
34f7b0a2
WN
7187 case NEON_2RM_VRINTN:
7188 case NEON_2RM_VRINTA:
7189 case NEON_2RM_VRINTM:
7190 case NEON_2RM_VRINTP:
7191 case NEON_2RM_VRINTZ:
7192 {
7193 TCGv_i32 tcg_rmode;
7194 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7195 int rmode;
7196
7197 if (op == NEON_2RM_VRINTZ) {
7198 rmode = FPROUNDING_ZERO;
7199 } else {
7200 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7201 }
7202
7203 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7204 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7205 cpu_env);
7206 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7207 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7208 cpu_env);
7209 tcg_temp_free_ptr(fpstatus);
7210 tcg_temp_free_i32(tcg_rmode);
7211 break;
7212 }
2ce70625
WN
7213 case NEON_2RM_VRINTX:
7214 {
7215 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7216 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7217 tcg_temp_free_ptr(fpstatus);
7218 break;
7219 }
901ad525
WN
7220 case NEON_2RM_VCVTAU:
7221 case NEON_2RM_VCVTAS:
7222 case NEON_2RM_VCVTNU:
7223 case NEON_2RM_VCVTNS:
7224 case NEON_2RM_VCVTPU:
7225 case NEON_2RM_VCVTPS:
7226 case NEON_2RM_VCVTMU:
7227 case NEON_2RM_VCVTMS:
7228 {
7229 bool is_signed = !extract32(insn, 7, 1);
7230 TCGv_ptr fpst = get_fpstatus_ptr(1);
7231 TCGv_i32 tcg_rmode, tcg_shift;
7232 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7233
7234 tcg_shift = tcg_const_i32(0);
7235 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7236 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7237 cpu_env);
7238
7239 if (is_signed) {
7240 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7241 tcg_shift, fpst);
7242 } else {
7243 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7244 tcg_shift, fpst);
7245 }
7246
7247 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7248 cpu_env);
7249 tcg_temp_free_i32(tcg_rmode);
7250 tcg_temp_free_i32(tcg_shift);
7251 tcg_temp_free_ptr(fpst);
7252 break;
7253 }
600b828c 7254 case NEON_2RM_VRECPE:
b6d4443a
AB
7255 {
7256 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7257 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7258 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7259 break;
b6d4443a 7260 }
600b828c 7261 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7262 {
7263 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7264 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7265 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7266 break;
c2fb418e 7267 }
600b828c 7268 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7269 {
7270 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7271 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7272 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7273 break;
b6d4443a 7274 }
600b828c 7275 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7276 {
7277 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7278 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7279 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7280 break;
c2fb418e 7281 }
600b828c 7282 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7283 gen_vfp_sito(0, 1);
9ee6e8bb 7284 break;
600b828c 7285 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7286 gen_vfp_uito(0, 1);
9ee6e8bb 7287 break;
600b828c 7288 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7289 gen_vfp_tosiz(0, 1);
9ee6e8bb 7290 break;
600b828c 7291 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7292 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7293 break;
7294 default:
600b828c
PM
7295 /* Reserved op values were caught by the
7296 * neon_2rm_sizes[] check earlier.
7297 */
7298 abort();
9ee6e8bb 7299 }
600b828c 7300 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7301 tcg_gen_st_f32(cpu_F0s, cpu_env,
7302 neon_reg_offset(rd, pass));
9ee6e8bb 7303 } else {
dd8fbd78 7304 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7305 }
7306 }
7307 break;
7308 }
7309 } else if ((insn & (1 << 10)) == 0) {
7310 /* VTBL, VTBX. */
56907d77
PM
7311 int n = ((insn >> 8) & 3) + 1;
7312 if ((rn + n) > 32) {
7313 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7314 * helper function running off the end of the register file.
7315 */
7316 return 1;
7317 }
7318 n <<= 3;
9ee6e8bb 7319 if (insn & (1 << 6)) {
8f8e3aa4 7320 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7321 } else {
7d1b0095 7322 tmp = tcg_temp_new_i32();
8f8e3aa4 7323 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7324 }
8f8e3aa4 7325 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7326 tmp4 = tcg_const_i32(rn);
7327 tmp5 = tcg_const_i32(n);
9ef39277 7328 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7329 tcg_temp_free_i32(tmp);
9ee6e8bb 7330 if (insn & (1 << 6)) {
8f8e3aa4 7331 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7332 } else {
7d1b0095 7333 tmp = tcg_temp_new_i32();
8f8e3aa4 7334 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7335 }
8f8e3aa4 7336 tmp3 = neon_load_reg(rm, 1);
9ef39277 7337 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7338 tcg_temp_free_i32(tmp5);
7339 tcg_temp_free_i32(tmp4);
8f8e3aa4 7340 neon_store_reg(rd, 0, tmp2);
3018f259 7341 neon_store_reg(rd, 1, tmp3);
7d1b0095 7342 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7343 } else if ((insn & 0x380) == 0) {
7344 /* VDUP */
133da6aa
JR
7345 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7346 return 1;
7347 }
9ee6e8bb 7348 if (insn & (1 << 19)) {
dd8fbd78 7349 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7350 } else {
dd8fbd78 7351 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7352 }
7353 if (insn & (1 << 16)) {
dd8fbd78 7354 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7355 } else if (insn & (1 << 17)) {
7356 if ((insn >> 18) & 1)
dd8fbd78 7357 gen_neon_dup_high16(tmp);
9ee6e8bb 7358 else
dd8fbd78 7359 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7360 }
7361 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7362 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7363 tcg_gen_mov_i32(tmp2, tmp);
7364 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7365 }
7d1b0095 7366 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7367 } else {
7368 return 1;
7369 }
7370 }
7371 }
7372 return 0;
7373}
7374
7dcc1f89 7375static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7376{
4b6a83fb
PM
7377 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7378 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7379
7380 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7381
7382 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7383 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7384 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7385 return 1;
7386 }
d614a513 7387 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7388 return disas_iwmmxt_insn(s, insn);
d614a513 7389 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7390 return disas_dsp_insn(s, insn);
c0f4af17
PM
7391 }
7392 return 1;
4b6a83fb
PM
7393 }
7394
7395 /* Otherwise treat as a generic register access */
7396 is64 = (insn & (1 << 25)) == 0;
7397 if (!is64 && ((insn & (1 << 4)) == 0)) {
7398 /* cdp */
7399 return 1;
7400 }
7401
7402 crm = insn & 0xf;
7403 if (is64) {
7404 crn = 0;
7405 opc1 = (insn >> 4) & 0xf;
7406 opc2 = 0;
7407 rt2 = (insn >> 16) & 0xf;
7408 } else {
7409 crn = (insn >> 16) & 0xf;
7410 opc1 = (insn >> 21) & 7;
7411 opc2 = (insn >> 5) & 7;
7412 rt2 = 0;
7413 }
7414 isread = (insn >> 20) & 1;
7415 rt = (insn >> 12) & 0xf;
7416
60322b39 7417 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7418 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7419 if (ri) {
7420 /* Check access permissions */
dcbff19b 7421 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7422 return 1;
7423 }
7424
c0f4af17 7425 if (ri->accessfn ||
d614a513 7426 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7427 /* Emit code to perform further access permissions checks at
7428 * runtime; this may result in an exception.
c0f4af17
PM
7429 * Note that on XScale all cp0..c13 registers do an access check
7430 * call in order to handle c15_cpar.
f59df3f2
PM
7431 */
7432 TCGv_ptr tmpptr;
3f208fd7 7433 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7434 uint32_t syndrome;
7435
7436 /* Note that since we are an implementation which takes an
7437 * exception on a trapped conditional instruction only if the
7438 * instruction passes its condition code check, we can take
7439 * advantage of the clause in the ARM ARM that allows us to set
7440 * the COND field in the instruction to 0xE in all cases.
7441 * We could fish the actual condition out of the insn (ARM)
7442 * or the condexec bits (Thumb) but it isn't necessary.
7443 */
7444 switch (cpnum) {
7445 case 14:
7446 if (is64) {
7447 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7448 isread, false);
8bcbf37c
PM
7449 } else {
7450 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7451 rt, isread, false);
8bcbf37c
PM
7452 }
7453 break;
7454 case 15:
7455 if (is64) {
7456 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7457 isread, false);
8bcbf37c
PM
7458 } else {
7459 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7460 rt, isread, false);
8bcbf37c
PM
7461 }
7462 break;
7463 default:
7464 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7465 * so this can only happen if this is an ARMv7 or earlier CPU,
7466 * in which case the syndrome information won't actually be
7467 * guest visible.
7468 */
d614a513 7469 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7470 syndrome = syn_uncategorized();
7471 break;
7472 }
7473
43bfa4a1 7474 gen_set_condexec(s);
3977ee5d 7475 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7476 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7477 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7478 tcg_isread = tcg_const_i32(isread);
7479 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7480 tcg_isread);
f59df3f2 7481 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7482 tcg_temp_free_i32(tcg_syn);
3f208fd7 7483 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7484 }
7485
4b6a83fb
PM
7486 /* Handle special cases first */
7487 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7488 case ARM_CP_NOP:
7489 return 0;
7490 case ARM_CP_WFI:
7491 if (isread) {
7492 return 1;
7493 }
eaed129d 7494 gen_set_pc_im(s, s->pc);
4b6a83fb 7495 s->is_jmp = DISAS_WFI;
2bee5105 7496 return 0;
4b6a83fb
PM
7497 default:
7498 break;
7499 }
7500
bd79255d 7501 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7502 gen_io_start();
7503 }
7504
4b6a83fb
PM
7505 if (isread) {
7506 /* Read */
7507 if (is64) {
7508 TCGv_i64 tmp64;
7509 TCGv_i32 tmp;
7510 if (ri->type & ARM_CP_CONST) {
7511 tmp64 = tcg_const_i64(ri->resetvalue);
7512 } else if (ri->readfn) {
7513 TCGv_ptr tmpptr;
4b6a83fb
PM
7514 tmp64 = tcg_temp_new_i64();
7515 tmpptr = tcg_const_ptr(ri);
7516 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7517 tcg_temp_free_ptr(tmpptr);
7518 } else {
7519 tmp64 = tcg_temp_new_i64();
7520 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7521 }
7522 tmp = tcg_temp_new_i32();
ecc7b3aa 7523 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7524 store_reg(s, rt, tmp);
7525 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7526 tmp = tcg_temp_new_i32();
ecc7b3aa 7527 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7528 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7529 store_reg(s, rt2, tmp);
7530 } else {
39d5492a 7531 TCGv_i32 tmp;
4b6a83fb
PM
7532 if (ri->type & ARM_CP_CONST) {
7533 tmp = tcg_const_i32(ri->resetvalue);
7534 } else if (ri->readfn) {
7535 TCGv_ptr tmpptr;
4b6a83fb
PM
7536 tmp = tcg_temp_new_i32();
7537 tmpptr = tcg_const_ptr(ri);
7538 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7539 tcg_temp_free_ptr(tmpptr);
7540 } else {
7541 tmp = load_cpu_offset(ri->fieldoffset);
7542 }
7543 if (rt == 15) {
7544 /* Destination register of r15 for 32 bit loads sets
7545 * the condition codes from the high 4 bits of the value
7546 */
7547 gen_set_nzcv(tmp);
7548 tcg_temp_free_i32(tmp);
7549 } else {
7550 store_reg(s, rt, tmp);
7551 }
7552 }
7553 } else {
7554 /* Write */
7555 if (ri->type & ARM_CP_CONST) {
7556 /* If not forbidden by access permissions, treat as WI */
7557 return 0;
7558 }
7559
7560 if (is64) {
39d5492a 7561 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7562 TCGv_i64 tmp64 = tcg_temp_new_i64();
7563 tmplo = load_reg(s, rt);
7564 tmphi = load_reg(s, rt2);
7565 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7566 tcg_temp_free_i32(tmplo);
7567 tcg_temp_free_i32(tmphi);
7568 if (ri->writefn) {
7569 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7570 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7571 tcg_temp_free_ptr(tmpptr);
7572 } else {
7573 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7574 }
7575 tcg_temp_free_i64(tmp64);
7576 } else {
7577 if (ri->writefn) {
39d5492a 7578 TCGv_i32 tmp;
4b6a83fb 7579 TCGv_ptr tmpptr;
4b6a83fb
PM
7580 tmp = load_reg(s, rt);
7581 tmpptr = tcg_const_ptr(ri);
7582 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7583 tcg_temp_free_ptr(tmpptr);
7584 tcg_temp_free_i32(tmp);
7585 } else {
39d5492a 7586 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7587 store_cpu_offset(tmp, ri->fieldoffset);
7588 }
7589 }
2452731c
PM
7590 }
7591
bd79255d 7592 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7593 /* I/O operations must end the TB here (whether read or write) */
7594 gen_io_end();
7595 gen_lookup_tb(s);
7596 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7597 /* We default to ending the TB on a coprocessor register write,
7598 * but allow this to be suppressed by the register definition
7599 * (usually only necessary to work around guest bugs).
7600 */
2452731c 7601 gen_lookup_tb(s);
4b6a83fb 7602 }
2452731c 7603
4b6a83fb
PM
7604 return 0;
7605 }
7606
626187d8
PM
7607 /* Unknown register; this might be a guest error or a QEMU
7608 * unimplemented feature.
7609 */
7610 if (is64) {
7611 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7612 "64 bit system register cp:%d opc1: %d crm:%d "
7613 "(%s)\n",
7614 isread ? "read" : "write", cpnum, opc1, crm,
7615 s->ns ? "non-secure" : "secure");
626187d8
PM
7616 } else {
7617 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7618 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7619 "(%s)\n",
7620 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7621 s->ns ? "non-secure" : "secure");
626187d8
PM
7622 }
7623
4a9a539f 7624 return 1;
9ee6e8bb
PB
7625}
7626
5e3f878a
PB
7627
7628/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7629static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7630{
39d5492a 7631 TCGv_i32 tmp;
7d1b0095 7632 tmp = tcg_temp_new_i32();
ecc7b3aa 7633 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7634 store_reg(s, rlow, tmp);
7d1b0095 7635 tmp = tcg_temp_new_i32();
5e3f878a 7636 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7637 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7638 store_reg(s, rhigh, tmp);
7639}
7640
7641/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7642static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7643{
a7812ae4 7644 TCGv_i64 tmp;
39d5492a 7645 TCGv_i32 tmp2;
5e3f878a 7646
36aa55dc 7647 /* Load value and extend to 64 bits. */
a7812ae4 7648 tmp = tcg_temp_new_i64();
5e3f878a
PB
7649 tmp2 = load_reg(s, rlow);
7650 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7651 tcg_temp_free_i32(tmp2);
5e3f878a 7652 tcg_gen_add_i64(val, val, tmp);
b75263d6 7653 tcg_temp_free_i64(tmp);
5e3f878a
PB
7654}
7655
7656/* load and add a 64-bit value from a register pair. */
a7812ae4 7657static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7658{
a7812ae4 7659 TCGv_i64 tmp;
39d5492a
PM
7660 TCGv_i32 tmpl;
7661 TCGv_i32 tmph;
5e3f878a
PB
7662
7663 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7664 tmpl = load_reg(s, rlow);
7665 tmph = load_reg(s, rhigh);
a7812ae4 7666 tmp = tcg_temp_new_i64();
36aa55dc 7667 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7668 tcg_temp_free_i32(tmpl);
7669 tcg_temp_free_i32(tmph);
5e3f878a 7670 tcg_gen_add_i64(val, val, tmp);
b75263d6 7671 tcg_temp_free_i64(tmp);
5e3f878a
PB
7672}
7673
c9f10124 7674/* Set N and Z flags from hi|lo. */
39d5492a 7675static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7676{
c9f10124
RH
7677 tcg_gen_mov_i32(cpu_NF, hi);
7678 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7679}
7680
426f5abc
PB
7681/* Load/Store exclusive instructions are implemented by remembering
7682 the value/address loaded, and seeing if these are the same
b90372ad 7683 when the store is performed. This should be sufficient to implement
426f5abc
PB
7684 the architecturally mandated semantics, and avoids having to monitor
7685 regular stores.
7686
7687 In system emulation mode only one CPU will be running at once, so
7688 this sequence is effectively atomic. In user emulation mode we
7689 throw an exception and handle the atomic operation elsewhere. */
7690static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7691 TCGv_i32 addr, int size)
426f5abc 7692{
94ee24e7 7693 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7694
50225ad0
PM
7695 s->is_ldex = true;
7696
426f5abc
PB
7697 switch (size) {
7698 case 0:
12dcc321 7699 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7700 break;
7701 case 1:
12dcc321 7702 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7703 break;
7704 case 2:
7705 case 3:
12dcc321 7706 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7707 break;
7708 default:
7709 abort();
7710 }
03d05e2d 7711
426f5abc 7712 if (size == 3) {
39d5492a 7713 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7714 TCGv_i32 tmp3 = tcg_temp_new_i32();
7715
2c9adbda 7716 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7717 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7718 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7719 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7720 store_reg(s, rt2, tmp3);
7721 } else {
7722 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7723 }
03d05e2d
PM
7724
7725 store_reg(s, rt, tmp);
7726 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7727}
7728
7729static void gen_clrex(DisasContext *s)
7730{
03d05e2d 7731 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7732}
7733
7734#ifdef CONFIG_USER_ONLY
7735static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7736 TCGv_i32 addr, int size)
426f5abc 7737{
03d05e2d 7738 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7739 tcg_gen_movi_i32(cpu_exclusive_info,
7740 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7741 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7742}
7743#else
7744static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7745 TCGv_i32 addr, int size)
426f5abc 7746{
39d5492a 7747 TCGv_i32 tmp;
03d05e2d 7748 TCGv_i64 val64, extaddr;
42a268c2
RH
7749 TCGLabel *done_label;
7750 TCGLabel *fail_label;
426f5abc
PB
7751
7752 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7753 [addr] = {Rt};
7754 {Rd} = 0;
7755 } else {
7756 {Rd} = 1;
7757 } */
7758 fail_label = gen_new_label();
7759 done_label = gen_new_label();
03d05e2d
PM
7760 extaddr = tcg_temp_new_i64();
7761 tcg_gen_extu_i32_i64(extaddr, addr);
7762 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7763 tcg_temp_free_i64(extaddr);
7764
94ee24e7 7765 tmp = tcg_temp_new_i32();
426f5abc
PB
7766 switch (size) {
7767 case 0:
12dcc321 7768 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7769 break;
7770 case 1:
12dcc321 7771 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7772 break;
7773 case 2:
7774 case 3:
12dcc321 7775 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7776 break;
7777 default:
7778 abort();
7779 }
03d05e2d
PM
7780
7781 val64 = tcg_temp_new_i64();
426f5abc 7782 if (size == 3) {
39d5492a 7783 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7784 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7785 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7786 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7787 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7788 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7789 tcg_temp_free_i32(tmp3);
7790 } else {
7791 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7792 }
03d05e2d
PM
7793 tcg_temp_free_i32(tmp);
7794
7795 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7796 tcg_temp_free_i64(val64);
7797
426f5abc
PB
7798 tmp = load_reg(s, rt);
7799 switch (size) {
7800 case 0:
12dcc321 7801 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7802 break;
7803 case 1:
12dcc321 7804 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7805 break;
7806 case 2:
7807 case 3:
12dcc321 7808 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7809 break;
7810 default:
7811 abort();
7812 }
94ee24e7 7813 tcg_temp_free_i32(tmp);
426f5abc
PB
7814 if (size == 3) {
7815 tcg_gen_addi_i32(addr, addr, 4);
7816 tmp = load_reg(s, rt2);
12dcc321 7817 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
94ee24e7 7818 tcg_temp_free_i32(tmp);
426f5abc
PB
7819 }
7820 tcg_gen_movi_i32(cpu_R[rd], 0);
7821 tcg_gen_br(done_label);
7822 gen_set_label(fail_label);
7823 tcg_gen_movi_i32(cpu_R[rd], 1);
7824 gen_set_label(done_label);
03d05e2d 7825 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7826}
7827#endif
7828
81465888
PM
7829/* gen_srs:
7830 * @env: CPUARMState
7831 * @s: DisasContext
7832 * @mode: mode field from insn (which stack to store to)
7833 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7834 * @writeback: true if writeback bit set
7835 *
7836 * Generate code for the SRS (Store Return State) insn.
7837 */
7838static void gen_srs(DisasContext *s,
7839 uint32_t mode, uint32_t amode, bool writeback)
7840{
7841 int32_t offset;
cbc0326b
PM
7842 TCGv_i32 addr, tmp;
7843 bool undef = false;
7844
7845 /* SRS is:
7846 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7847 * and specified mode is monitor mode
cbc0326b
PM
7848 * - UNDEFINED in Hyp mode
7849 * - UNPREDICTABLE in User or System mode
7850 * - UNPREDICTABLE if the specified mode is:
7851 * -- not implemented
7852 * -- not a valid mode number
7853 * -- a mode that's at a higher exception level
7854 * -- Monitor, if we are Non-secure
f01377f5 7855 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7856 */
ba63cf47 7857 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7858 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7859 return;
7860 }
7861
7862 if (s->current_el == 0 || s->current_el == 2) {
7863 undef = true;
7864 }
7865
7866 switch (mode) {
7867 case ARM_CPU_MODE_USR:
7868 case ARM_CPU_MODE_FIQ:
7869 case ARM_CPU_MODE_IRQ:
7870 case ARM_CPU_MODE_SVC:
7871 case ARM_CPU_MODE_ABT:
7872 case ARM_CPU_MODE_UND:
7873 case ARM_CPU_MODE_SYS:
7874 break;
7875 case ARM_CPU_MODE_HYP:
7876 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7877 undef = true;
7878 }
7879 break;
7880 case ARM_CPU_MODE_MON:
7881 /* No need to check specifically for "are we non-secure" because
7882 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7883 * so if this isn't EL3 then we must be non-secure.
7884 */
7885 if (s->current_el != 3) {
7886 undef = true;
7887 }
7888 break;
7889 default:
7890 undef = true;
7891 }
7892
7893 if (undef) {
7894 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7895 default_exception_el(s));
7896 return;
7897 }
7898
7899 addr = tcg_temp_new_i32();
7900 tmp = tcg_const_i32(mode);
f01377f5
PM
7901 /* get_r13_banked() will raise an exception if called from System mode */
7902 gen_set_condexec(s);
7903 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7904 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7905 tcg_temp_free_i32(tmp);
7906 switch (amode) {
7907 case 0: /* DA */
7908 offset = -4;
7909 break;
7910 case 1: /* IA */
7911 offset = 0;
7912 break;
7913 case 2: /* DB */
7914 offset = -8;
7915 break;
7916 case 3: /* IB */
7917 offset = 4;
7918 break;
7919 default:
7920 abort();
7921 }
7922 tcg_gen_addi_i32(addr, addr, offset);
7923 tmp = load_reg(s, 14);
12dcc321 7924 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7925 tcg_temp_free_i32(tmp);
81465888
PM
7926 tmp = load_cpu_field(spsr);
7927 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7928 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7929 tcg_temp_free_i32(tmp);
81465888
PM
7930 if (writeback) {
7931 switch (amode) {
7932 case 0:
7933 offset = -8;
7934 break;
7935 case 1:
7936 offset = 4;
7937 break;
7938 case 2:
7939 offset = -4;
7940 break;
7941 case 3:
7942 offset = 0;
7943 break;
7944 default:
7945 abort();
7946 }
7947 tcg_gen_addi_i32(addr, addr, offset);
7948 tmp = tcg_const_i32(mode);
7949 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7950 tcg_temp_free_i32(tmp);
7951 }
7952 tcg_temp_free_i32(addr);
f01377f5 7953 s->is_jmp = DISAS_UPDATE;
81465888
PM
7954}
7955
f4df2210 7956static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7957{
f4df2210 7958 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7959 TCGv_i32 tmp;
7960 TCGv_i32 tmp2;
7961 TCGv_i32 tmp3;
7962 TCGv_i32 addr;
a7812ae4 7963 TCGv_i64 tmp64;
9ee6e8bb 7964
9ee6e8bb 7965 /* M variants do not implement ARM mode. */
b53d8923 7966 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7967 goto illegal_op;
b53d8923 7968 }
9ee6e8bb
PB
7969 cond = insn >> 28;
7970 if (cond == 0xf){
be5e7a76
DES
7971 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7972 * choose to UNDEF. In ARMv5 and above the space is used
7973 * for miscellaneous unconditional instructions.
7974 */
7975 ARCH(5);
7976
9ee6e8bb
PB
7977 /* Unconditional instructions. */
7978 if (((insn >> 25) & 7) == 1) {
7979 /* NEON Data processing. */
d614a513 7980 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7981 goto illegal_op;
d614a513 7982 }
9ee6e8bb 7983
7dcc1f89 7984 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7985 goto illegal_op;
7dcc1f89 7986 }
9ee6e8bb
PB
7987 return;
7988 }
7989 if ((insn & 0x0f100000) == 0x04000000) {
7990 /* NEON load/store. */
d614a513 7991 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7992 goto illegal_op;
d614a513 7993 }
9ee6e8bb 7994
7dcc1f89 7995 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7996 goto illegal_op;
7dcc1f89 7997 }
9ee6e8bb
PB
7998 return;
7999 }
6a57f3eb
WN
8000 if ((insn & 0x0f000e10) == 0x0e000a00) {
8001 /* VFP. */
7dcc1f89 8002 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8003 goto illegal_op;
8004 }
8005 return;
8006 }
3d185e5d
PM
8007 if (((insn & 0x0f30f000) == 0x0510f000) ||
8008 ((insn & 0x0f30f010) == 0x0710f000)) {
8009 if ((insn & (1 << 22)) == 0) {
8010 /* PLDW; v7MP */
d614a513 8011 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8012 goto illegal_op;
8013 }
8014 }
8015 /* Otherwise PLD; v5TE+ */
be5e7a76 8016 ARCH(5TE);
3d185e5d
PM
8017 return;
8018 }
8019 if (((insn & 0x0f70f000) == 0x0450f000) ||
8020 ((insn & 0x0f70f010) == 0x0650f000)) {
8021 ARCH(7);
8022 return; /* PLI; V7 */
8023 }
8024 if (((insn & 0x0f700000) == 0x04100000) ||
8025 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8026 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8027 goto illegal_op;
8028 }
8029 return; /* v7MP: Unallocated memory hint: must NOP */
8030 }
8031
8032 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8033 ARCH(6);
8034 /* setend */
9886ecdf
PB
8035 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8036 gen_helper_setend(cpu_env);
8037 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8038 }
8039 return;
8040 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8041 switch ((insn >> 4) & 0xf) {
8042 case 1: /* clrex */
8043 ARCH(6K);
426f5abc 8044 gen_clrex(s);
9ee6e8bb
PB
8045 return;
8046 case 4: /* dsb */
8047 case 5: /* dmb */
9ee6e8bb
PB
8048 ARCH(7);
8049 /* We don't emulate caches so these are a no-op. */
8050 return;
6df99dec
SS
8051 case 6: /* isb */
8052 /* We need to break the TB after this insn to execute
8053 * self-modifying code correctly and also to take
8054 * any pending interrupts immediately.
8055 */
8056 gen_lookup_tb(s);
8057 return;
9ee6e8bb
PB
8058 default:
8059 goto illegal_op;
8060 }
8061 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8062 /* srs */
81465888
PM
8063 ARCH(6);
8064 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8065 return;
ea825eee 8066 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8067 /* rfe */
c67b6b71 8068 int32_t offset;
9ee6e8bb
PB
8069 if (IS_USER(s))
8070 goto illegal_op;
8071 ARCH(6);
8072 rn = (insn >> 16) & 0xf;
b0109805 8073 addr = load_reg(s, rn);
9ee6e8bb
PB
8074 i = (insn >> 23) & 3;
8075 switch (i) {
b0109805 8076 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8077 case 1: offset = 0; break; /* IA */
8078 case 2: offset = -8; break; /* DB */
b0109805 8079 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8080 default: abort();
8081 }
8082 if (offset)
b0109805
PB
8083 tcg_gen_addi_i32(addr, addr, offset);
8084 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8085 tmp = tcg_temp_new_i32();
12dcc321 8086 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8087 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8088 tmp2 = tcg_temp_new_i32();
12dcc321 8089 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8090 if (insn & (1 << 21)) {
8091 /* Base writeback. */
8092 switch (i) {
b0109805 8093 case 0: offset = -8; break;
c67b6b71
FN
8094 case 1: offset = 4; break;
8095 case 2: offset = -4; break;
b0109805 8096 case 3: offset = 0; break;
9ee6e8bb
PB
8097 default: abort();
8098 }
8099 if (offset)
b0109805
PB
8100 tcg_gen_addi_i32(addr, addr, offset);
8101 store_reg(s, rn, addr);
8102 } else {
7d1b0095 8103 tcg_temp_free_i32(addr);
9ee6e8bb 8104 }
b0109805 8105 gen_rfe(s, tmp, tmp2);
c67b6b71 8106 return;
9ee6e8bb
PB
8107 } else if ((insn & 0x0e000000) == 0x0a000000) {
8108 /* branch link and change to thumb (blx <offset>) */
8109 int32_t offset;
8110
8111 val = (uint32_t)s->pc;
7d1b0095 8112 tmp = tcg_temp_new_i32();
d9ba4830
PB
8113 tcg_gen_movi_i32(tmp, val);
8114 store_reg(s, 14, tmp);
9ee6e8bb
PB
8115 /* Sign-extend the 24-bit offset */
8116 offset = (((int32_t)insn) << 8) >> 8;
8117 /* offset * 4 + bit24 * 2 + (thumb bit) */
8118 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8119 /* pipeline offset */
8120 val += 4;
be5e7a76 8121 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8122 gen_bx_im(s, val);
9ee6e8bb
PB
8123 return;
8124 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8125 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8126 /* iWMMXt register transfer. */
c0f4af17 8127 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8128 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8129 return;
c0f4af17
PM
8130 }
8131 }
9ee6e8bb
PB
8132 }
8133 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8134 /* Coprocessor double register transfer. */
be5e7a76 8135 ARCH(5TE);
9ee6e8bb
PB
8136 } else if ((insn & 0x0f000010) == 0x0e000010) {
8137 /* Additional coprocessor register transfer. */
7997d92f 8138 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8139 uint32_t mask;
8140 uint32_t val;
8141 /* cps (privileged) */
8142 if (IS_USER(s))
8143 return;
8144 mask = val = 0;
8145 if (insn & (1 << 19)) {
8146 if (insn & (1 << 8))
8147 mask |= CPSR_A;
8148 if (insn & (1 << 7))
8149 mask |= CPSR_I;
8150 if (insn & (1 << 6))
8151 mask |= CPSR_F;
8152 if (insn & (1 << 18))
8153 val |= mask;
8154 }
7997d92f 8155 if (insn & (1 << 17)) {
9ee6e8bb
PB
8156 mask |= CPSR_M;
8157 val |= (insn & 0x1f);
8158 }
8159 if (mask) {
2fbac54b 8160 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8161 }
8162 return;
8163 }
8164 goto illegal_op;
8165 }
8166 if (cond != 0xe) {
8167 /* if not always execute, we generate a conditional jump to
8168 next instruction */
8169 s->condlabel = gen_new_label();
39fb730a 8170 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8171 s->condjmp = 1;
8172 }
8173 if ((insn & 0x0f900000) == 0x03000000) {
8174 if ((insn & (1 << 21)) == 0) {
8175 ARCH(6T2);
8176 rd = (insn >> 12) & 0xf;
8177 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8178 if ((insn & (1 << 22)) == 0) {
8179 /* MOVW */
7d1b0095 8180 tmp = tcg_temp_new_i32();
5e3f878a 8181 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8182 } else {
8183 /* MOVT */
5e3f878a 8184 tmp = load_reg(s, rd);
86831435 8185 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8186 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8187 }
5e3f878a 8188 store_reg(s, rd, tmp);
9ee6e8bb
PB
8189 } else {
8190 if (((insn >> 12) & 0xf) != 0xf)
8191 goto illegal_op;
8192 if (((insn >> 16) & 0xf) == 0) {
8193 gen_nop_hint(s, insn & 0xff);
8194 } else {
8195 /* CPSR = immediate */
8196 val = insn & 0xff;
8197 shift = ((insn >> 8) & 0xf) * 2;
8198 if (shift)
8199 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8200 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8201 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8202 i, val)) {
9ee6e8bb 8203 goto illegal_op;
7dcc1f89 8204 }
9ee6e8bb
PB
8205 }
8206 }
8207 } else if ((insn & 0x0f900000) == 0x01000000
8208 && (insn & 0x00000090) != 0x00000090) {
8209 /* miscellaneous instructions */
8210 op1 = (insn >> 21) & 3;
8211 sh = (insn >> 4) & 0xf;
8212 rm = insn & 0xf;
8213 switch (sh) {
8bfd0550
PM
8214 case 0x0: /* MSR, MRS */
8215 if (insn & (1 << 9)) {
8216 /* MSR (banked) and MRS (banked) */
8217 int sysm = extract32(insn, 16, 4) |
8218 (extract32(insn, 8, 1) << 4);
8219 int r = extract32(insn, 22, 1);
8220
8221 if (op1 & 1) {
8222 /* MSR (banked) */
8223 gen_msr_banked(s, r, sysm, rm);
8224 } else {
8225 /* MRS (banked) */
8226 int rd = extract32(insn, 12, 4);
8227
8228 gen_mrs_banked(s, r, sysm, rd);
8229 }
8230 break;
8231 }
8232
8233 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8234 if (op1 & 1) {
8235 /* PSR = reg */
2fbac54b 8236 tmp = load_reg(s, rm);
9ee6e8bb 8237 i = ((op1 & 2) != 0);
7dcc1f89 8238 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8239 goto illegal_op;
8240 } else {
8241 /* reg = PSR */
8242 rd = (insn >> 12) & 0xf;
8243 if (op1 & 2) {
8244 if (IS_USER(s))
8245 goto illegal_op;
d9ba4830 8246 tmp = load_cpu_field(spsr);
9ee6e8bb 8247 } else {
7d1b0095 8248 tmp = tcg_temp_new_i32();
9ef39277 8249 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8250 }
d9ba4830 8251 store_reg(s, rd, tmp);
9ee6e8bb
PB
8252 }
8253 break;
8254 case 0x1:
8255 if (op1 == 1) {
8256 /* branch/exchange thumb (bx). */
be5e7a76 8257 ARCH(4T);
d9ba4830
PB
8258 tmp = load_reg(s, rm);
8259 gen_bx(s, tmp);
9ee6e8bb
PB
8260 } else if (op1 == 3) {
8261 /* clz */
be5e7a76 8262 ARCH(5);
9ee6e8bb 8263 rd = (insn >> 12) & 0xf;
1497c961
PB
8264 tmp = load_reg(s, rm);
8265 gen_helper_clz(tmp, tmp);
8266 store_reg(s, rd, tmp);
9ee6e8bb
PB
8267 } else {
8268 goto illegal_op;
8269 }
8270 break;
8271 case 0x2:
8272 if (op1 == 1) {
8273 ARCH(5J); /* bxj */
8274 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8275 tmp = load_reg(s, rm);
8276 gen_bx(s, tmp);
9ee6e8bb
PB
8277 } else {
8278 goto illegal_op;
8279 }
8280 break;
8281 case 0x3:
8282 if (op1 != 1)
8283 goto illegal_op;
8284
be5e7a76 8285 ARCH(5);
9ee6e8bb 8286 /* branch link/exchange thumb (blx) */
d9ba4830 8287 tmp = load_reg(s, rm);
7d1b0095 8288 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8289 tcg_gen_movi_i32(tmp2, s->pc);
8290 store_reg(s, 14, tmp2);
8291 gen_bx(s, tmp);
9ee6e8bb 8292 break;
eb0ecd5a
WN
8293 case 0x4:
8294 {
8295 /* crc32/crc32c */
8296 uint32_t c = extract32(insn, 8, 4);
8297
8298 /* Check this CPU supports ARMv8 CRC instructions.
8299 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8300 * Bits 8, 10 and 11 should be zero.
8301 */
d614a513 8302 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8303 (c & 0xd) != 0) {
8304 goto illegal_op;
8305 }
8306
8307 rn = extract32(insn, 16, 4);
8308 rd = extract32(insn, 12, 4);
8309
8310 tmp = load_reg(s, rn);
8311 tmp2 = load_reg(s, rm);
aa633469
PM
8312 if (op1 == 0) {
8313 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8314 } else if (op1 == 1) {
8315 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8316 }
eb0ecd5a
WN
8317 tmp3 = tcg_const_i32(1 << op1);
8318 if (c & 0x2) {
8319 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8320 } else {
8321 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8322 }
8323 tcg_temp_free_i32(tmp2);
8324 tcg_temp_free_i32(tmp3);
8325 store_reg(s, rd, tmp);
8326 break;
8327 }
9ee6e8bb 8328 case 0x5: /* saturating add/subtract */
be5e7a76 8329 ARCH(5TE);
9ee6e8bb
PB
8330 rd = (insn >> 12) & 0xf;
8331 rn = (insn >> 16) & 0xf;
b40d0353 8332 tmp = load_reg(s, rm);
5e3f878a 8333 tmp2 = load_reg(s, rn);
9ee6e8bb 8334 if (op1 & 2)
9ef39277 8335 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8336 if (op1 & 1)
9ef39277 8337 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8338 else
9ef39277 8339 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8340 tcg_temp_free_i32(tmp2);
5e3f878a 8341 store_reg(s, rd, tmp);
9ee6e8bb 8342 break;
49e14940 8343 case 7:
d4a2dc67
PM
8344 {
8345 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
8346 switch (op1) {
8347 case 1:
8348 /* bkpt */
8349 ARCH(5);
8350 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8351 syn_aa32_bkpt(imm16, false),
8352 default_exception_el(s));
37e6456e
PM
8353 break;
8354 case 2:
8355 /* Hypervisor call (v7) */
8356 ARCH(7);
8357 if (IS_USER(s)) {
8358 goto illegal_op;
8359 }
8360 gen_hvc(s, imm16);
8361 break;
8362 case 3:
8363 /* Secure monitor call (v6+) */
8364 ARCH(6K);
8365 if (IS_USER(s)) {
8366 goto illegal_op;
8367 }
8368 gen_smc(s);
8369 break;
8370 default:
49e14940
AL
8371 goto illegal_op;
8372 }
9ee6e8bb 8373 break;
d4a2dc67 8374 }
9ee6e8bb
PB
8375 case 0x8: /* signed multiply */
8376 case 0xa:
8377 case 0xc:
8378 case 0xe:
be5e7a76 8379 ARCH(5TE);
9ee6e8bb
PB
8380 rs = (insn >> 8) & 0xf;
8381 rn = (insn >> 12) & 0xf;
8382 rd = (insn >> 16) & 0xf;
8383 if (op1 == 1) {
8384 /* (32 * 16) >> 16 */
5e3f878a
PB
8385 tmp = load_reg(s, rm);
8386 tmp2 = load_reg(s, rs);
9ee6e8bb 8387 if (sh & 4)
5e3f878a 8388 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8389 else
5e3f878a 8390 gen_sxth(tmp2);
a7812ae4
PB
8391 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8392 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8393 tmp = tcg_temp_new_i32();
ecc7b3aa 8394 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8395 tcg_temp_free_i64(tmp64);
9ee6e8bb 8396 if ((sh & 2) == 0) {
5e3f878a 8397 tmp2 = load_reg(s, rn);
9ef39277 8398 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8399 tcg_temp_free_i32(tmp2);
9ee6e8bb 8400 }
5e3f878a 8401 store_reg(s, rd, tmp);
9ee6e8bb
PB
8402 } else {
8403 /* 16 * 16 */
5e3f878a
PB
8404 tmp = load_reg(s, rm);
8405 tmp2 = load_reg(s, rs);
8406 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8407 tcg_temp_free_i32(tmp2);
9ee6e8bb 8408 if (op1 == 2) {
a7812ae4
PB
8409 tmp64 = tcg_temp_new_i64();
8410 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8411 tcg_temp_free_i32(tmp);
a7812ae4
PB
8412 gen_addq(s, tmp64, rn, rd);
8413 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8414 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8415 } else {
8416 if (op1 == 0) {
5e3f878a 8417 tmp2 = load_reg(s, rn);
9ef39277 8418 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8419 tcg_temp_free_i32(tmp2);
9ee6e8bb 8420 }
5e3f878a 8421 store_reg(s, rd, tmp);
9ee6e8bb
PB
8422 }
8423 }
8424 break;
8425 default:
8426 goto illegal_op;
8427 }
8428 } else if (((insn & 0x0e000000) == 0 &&
8429 (insn & 0x00000090) != 0x90) ||
8430 ((insn & 0x0e000000) == (1 << 25))) {
8431 int set_cc, logic_cc, shiftop;
8432
8433 op1 = (insn >> 21) & 0xf;
8434 set_cc = (insn >> 20) & 1;
8435 logic_cc = table_logic_cc[op1] & set_cc;
8436
8437 /* data processing instruction */
8438 if (insn & (1 << 25)) {
8439 /* immediate operand */
8440 val = insn & 0xff;
8441 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8442 if (shift) {
9ee6e8bb 8443 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8444 }
7d1b0095 8445 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8446 tcg_gen_movi_i32(tmp2, val);
8447 if (logic_cc && shift) {
8448 gen_set_CF_bit31(tmp2);
8449 }
9ee6e8bb
PB
8450 } else {
8451 /* register */
8452 rm = (insn) & 0xf;
e9bb4aa9 8453 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8454 shiftop = (insn >> 5) & 3;
8455 if (!(insn & (1 << 4))) {
8456 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8457 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8458 } else {
8459 rs = (insn >> 8) & 0xf;
8984bd2e 8460 tmp = load_reg(s, rs);
e9bb4aa9 8461 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8462 }
8463 }
8464 if (op1 != 0x0f && op1 != 0x0d) {
8465 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8466 tmp = load_reg(s, rn);
8467 } else {
39d5492a 8468 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8469 }
8470 rd = (insn >> 12) & 0xf;
8471 switch(op1) {
8472 case 0x00:
e9bb4aa9
JR
8473 tcg_gen_and_i32(tmp, tmp, tmp2);
8474 if (logic_cc) {
8475 gen_logic_CC(tmp);
8476 }
7dcc1f89 8477 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8478 break;
8479 case 0x01:
e9bb4aa9
JR
8480 tcg_gen_xor_i32(tmp, tmp, tmp2);
8481 if (logic_cc) {
8482 gen_logic_CC(tmp);
8483 }
7dcc1f89 8484 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8485 break;
8486 case 0x02:
8487 if (set_cc && rd == 15) {
8488 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8489 if (IS_USER(s)) {
9ee6e8bb 8490 goto illegal_op;
e9bb4aa9 8491 }
72485ec4 8492 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8493 gen_exception_return(s, tmp);
9ee6e8bb 8494 } else {
e9bb4aa9 8495 if (set_cc) {
72485ec4 8496 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8497 } else {
8498 tcg_gen_sub_i32(tmp, tmp, tmp2);
8499 }
7dcc1f89 8500 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8501 }
8502 break;
8503 case 0x03:
e9bb4aa9 8504 if (set_cc) {
72485ec4 8505 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8506 } else {
8507 tcg_gen_sub_i32(tmp, tmp2, tmp);
8508 }
7dcc1f89 8509 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8510 break;
8511 case 0x04:
e9bb4aa9 8512 if (set_cc) {
72485ec4 8513 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8514 } else {
8515 tcg_gen_add_i32(tmp, tmp, tmp2);
8516 }
7dcc1f89 8517 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8518 break;
8519 case 0x05:
e9bb4aa9 8520 if (set_cc) {
49b4c31e 8521 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8522 } else {
8523 gen_add_carry(tmp, tmp, tmp2);
8524 }
7dcc1f89 8525 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8526 break;
8527 case 0x06:
e9bb4aa9 8528 if (set_cc) {
2de68a49 8529 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8530 } else {
8531 gen_sub_carry(tmp, tmp, tmp2);
8532 }
7dcc1f89 8533 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8534 break;
8535 case 0x07:
e9bb4aa9 8536 if (set_cc) {
2de68a49 8537 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8538 } else {
8539 gen_sub_carry(tmp, tmp2, tmp);
8540 }
7dcc1f89 8541 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8542 break;
8543 case 0x08:
8544 if (set_cc) {
e9bb4aa9
JR
8545 tcg_gen_and_i32(tmp, tmp, tmp2);
8546 gen_logic_CC(tmp);
9ee6e8bb 8547 }
7d1b0095 8548 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8549 break;
8550 case 0x09:
8551 if (set_cc) {
e9bb4aa9
JR
8552 tcg_gen_xor_i32(tmp, tmp, tmp2);
8553 gen_logic_CC(tmp);
9ee6e8bb 8554 }
7d1b0095 8555 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8556 break;
8557 case 0x0a:
8558 if (set_cc) {
72485ec4 8559 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8560 }
7d1b0095 8561 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8562 break;
8563 case 0x0b:
8564 if (set_cc) {
72485ec4 8565 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8566 }
7d1b0095 8567 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8568 break;
8569 case 0x0c:
e9bb4aa9
JR
8570 tcg_gen_or_i32(tmp, tmp, tmp2);
8571 if (logic_cc) {
8572 gen_logic_CC(tmp);
8573 }
7dcc1f89 8574 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8575 break;
8576 case 0x0d:
8577 if (logic_cc && rd == 15) {
8578 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8579 if (IS_USER(s)) {
9ee6e8bb 8580 goto illegal_op;
e9bb4aa9
JR
8581 }
8582 gen_exception_return(s, tmp2);
9ee6e8bb 8583 } else {
e9bb4aa9
JR
8584 if (logic_cc) {
8585 gen_logic_CC(tmp2);
8586 }
7dcc1f89 8587 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8588 }
8589 break;
8590 case 0x0e:
f669df27 8591 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8592 if (logic_cc) {
8593 gen_logic_CC(tmp);
8594 }
7dcc1f89 8595 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8596 break;
8597 default:
8598 case 0x0f:
e9bb4aa9
JR
8599 tcg_gen_not_i32(tmp2, tmp2);
8600 if (logic_cc) {
8601 gen_logic_CC(tmp2);
8602 }
7dcc1f89 8603 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8604 break;
8605 }
e9bb4aa9 8606 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8607 tcg_temp_free_i32(tmp2);
e9bb4aa9 8608 }
9ee6e8bb
PB
8609 } else {
8610 /* other instructions */
8611 op1 = (insn >> 24) & 0xf;
8612 switch(op1) {
8613 case 0x0:
8614 case 0x1:
8615 /* multiplies, extra load/stores */
8616 sh = (insn >> 5) & 3;
8617 if (sh == 0) {
8618 if (op1 == 0x0) {
8619 rd = (insn >> 16) & 0xf;
8620 rn = (insn >> 12) & 0xf;
8621 rs = (insn >> 8) & 0xf;
8622 rm = (insn) & 0xf;
8623 op1 = (insn >> 20) & 0xf;
8624 switch (op1) {
8625 case 0: case 1: case 2: case 3: case 6:
8626 /* 32 bit mul */
5e3f878a
PB
8627 tmp = load_reg(s, rs);
8628 tmp2 = load_reg(s, rm);
8629 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8630 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8631 if (insn & (1 << 22)) {
8632 /* Subtract (mls) */
8633 ARCH(6T2);
5e3f878a
PB
8634 tmp2 = load_reg(s, rn);
8635 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8636 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8637 } else if (insn & (1 << 21)) {
8638 /* Add */
5e3f878a
PB
8639 tmp2 = load_reg(s, rn);
8640 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8641 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8642 }
8643 if (insn & (1 << 20))
5e3f878a
PB
8644 gen_logic_CC(tmp);
8645 store_reg(s, rd, tmp);
9ee6e8bb 8646 break;
8aac08b1
AJ
8647 case 4:
8648 /* 64 bit mul double accumulate (UMAAL) */
8649 ARCH(6);
8650 tmp = load_reg(s, rs);
8651 tmp2 = load_reg(s, rm);
8652 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8653 gen_addq_lo(s, tmp64, rn);
8654 gen_addq_lo(s, tmp64, rd);
8655 gen_storeq_reg(s, rn, rd, tmp64);
8656 tcg_temp_free_i64(tmp64);
8657 break;
8658 case 8: case 9: case 10: case 11:
8659 case 12: case 13: case 14: case 15:
8660 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8661 tmp = load_reg(s, rs);
8662 tmp2 = load_reg(s, rm);
8aac08b1 8663 if (insn & (1 << 22)) {
c9f10124 8664 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8665 } else {
c9f10124 8666 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8667 }
8668 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8669 TCGv_i32 al = load_reg(s, rn);
8670 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8671 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8672 tcg_temp_free_i32(al);
8673 tcg_temp_free_i32(ah);
9ee6e8bb 8674 }
8aac08b1 8675 if (insn & (1 << 20)) {
c9f10124 8676 gen_logicq_cc(tmp, tmp2);
8aac08b1 8677 }
c9f10124
RH
8678 store_reg(s, rn, tmp);
8679 store_reg(s, rd, tmp2);
9ee6e8bb 8680 break;
8aac08b1
AJ
8681 default:
8682 goto illegal_op;
9ee6e8bb
PB
8683 }
8684 } else {
8685 rn = (insn >> 16) & 0xf;
8686 rd = (insn >> 12) & 0xf;
8687 if (insn & (1 << 23)) {
8688 /* load/store exclusive */
2359bf80 8689 int op2 = (insn >> 8) & 3;
86753403 8690 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8691
8692 switch (op2) {
8693 case 0: /* lda/stl */
8694 if (op1 == 1) {
8695 goto illegal_op;
8696 }
8697 ARCH(8);
8698 break;
8699 case 1: /* reserved */
8700 goto illegal_op;
8701 case 2: /* ldaex/stlex */
8702 ARCH(8);
8703 break;
8704 case 3: /* ldrex/strex */
8705 if (op1) {
8706 ARCH(6K);
8707 } else {
8708 ARCH(6);
8709 }
8710 break;
8711 }
8712
3174f8e9 8713 addr = tcg_temp_local_new_i32();
98a46317 8714 load_reg_var(s, addr, rn);
2359bf80
MR
8715
8716 /* Since the emulation does not have barriers,
8717 the acquire/release semantics need no special
8718 handling */
8719 if (op2 == 0) {
8720 if (insn & (1 << 20)) {
8721 tmp = tcg_temp_new_i32();
8722 switch (op1) {
8723 case 0: /* lda */
12dcc321
PB
8724 gen_aa32_ld32u(s, tmp, addr,
8725 get_mem_index(s));
2359bf80
MR
8726 break;
8727 case 2: /* ldab */
12dcc321
PB
8728 gen_aa32_ld8u(s, tmp, addr,
8729 get_mem_index(s));
2359bf80
MR
8730 break;
8731 case 3: /* ldah */
12dcc321
PB
8732 gen_aa32_ld16u(s, tmp, addr,
8733 get_mem_index(s));
2359bf80
MR
8734 break;
8735 default:
8736 abort();
8737 }
8738 store_reg(s, rd, tmp);
8739 } else {
8740 rm = insn & 0xf;
8741 tmp = load_reg(s, rm);
8742 switch (op1) {
8743 case 0: /* stl */
12dcc321
PB
8744 gen_aa32_st32(s, tmp, addr,
8745 get_mem_index(s));
2359bf80
MR
8746 break;
8747 case 2: /* stlb */
12dcc321
PB
8748 gen_aa32_st8(s, tmp, addr,
8749 get_mem_index(s));
2359bf80
MR
8750 break;
8751 case 3: /* stlh */
12dcc321
PB
8752 gen_aa32_st16(s, tmp, addr,
8753 get_mem_index(s));
2359bf80
MR
8754 break;
8755 default:
8756 abort();
8757 }
8758 tcg_temp_free_i32(tmp);
8759 }
8760 } else if (insn & (1 << 20)) {
86753403
PB
8761 switch (op1) {
8762 case 0: /* ldrex */
426f5abc 8763 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8764 break;
8765 case 1: /* ldrexd */
426f5abc 8766 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8767 break;
8768 case 2: /* ldrexb */
426f5abc 8769 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8770 break;
8771 case 3: /* ldrexh */
426f5abc 8772 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8773 break;
8774 default:
8775 abort();
8776 }
9ee6e8bb
PB
8777 } else {
8778 rm = insn & 0xf;
86753403
PB
8779 switch (op1) {
8780 case 0: /* strex */
426f5abc 8781 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8782 break;
8783 case 1: /* strexd */
502e64fe 8784 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8785 break;
8786 case 2: /* strexb */
426f5abc 8787 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8788 break;
8789 case 3: /* strexh */
426f5abc 8790 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8791 break;
8792 default:
8793 abort();
8794 }
9ee6e8bb 8795 }
39d5492a 8796 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8797 } else {
8798 /* SWP instruction */
8799 rm = (insn) & 0xf;
8800
8984bd2e
PB
8801 /* ??? This is not really atomic. However we know
8802 we never have multiple CPUs running in parallel,
8803 so it is good enough. */
8804 addr = load_reg(s, rn);
8805 tmp = load_reg(s, rm);
5a839c0d 8806 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8807 if (insn & (1 << 22)) {
12dcc321
PB
8808 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8809 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8810 } else {
12dcc321
PB
8811 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8812 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8813 }
5a839c0d 8814 tcg_temp_free_i32(tmp);
7d1b0095 8815 tcg_temp_free_i32(addr);
8984bd2e 8816 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8817 }
8818 }
8819 } else {
8820 int address_offset;
3960c336
PM
8821 bool load = insn & (1 << 20);
8822 bool doubleword = false;
9ee6e8bb
PB
8823 /* Misc load/store */
8824 rn = (insn >> 16) & 0xf;
8825 rd = (insn >> 12) & 0xf;
3960c336
PM
8826
8827 if (!load && (sh & 2)) {
8828 /* doubleword */
8829 ARCH(5TE);
8830 if (rd & 1) {
8831 /* UNPREDICTABLE; we choose to UNDEF */
8832 goto illegal_op;
8833 }
8834 load = (sh & 1) == 0;
8835 doubleword = true;
8836 }
8837
b0109805 8838 addr = load_reg(s, rn);
9ee6e8bb 8839 if (insn & (1 << 24))
b0109805 8840 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8841 address_offset = 0;
3960c336
PM
8842
8843 if (doubleword) {
8844 if (!load) {
9ee6e8bb 8845 /* store */
b0109805 8846 tmp = load_reg(s, rd);
12dcc321 8847 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8848 tcg_temp_free_i32(tmp);
b0109805
PB
8849 tcg_gen_addi_i32(addr, addr, 4);
8850 tmp = load_reg(s, rd + 1);
12dcc321 8851 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8852 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8853 } else {
8854 /* load */
5a839c0d 8855 tmp = tcg_temp_new_i32();
12dcc321 8856 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8857 store_reg(s, rd, tmp);
8858 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8859 tmp = tcg_temp_new_i32();
12dcc321 8860 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8861 rd++;
9ee6e8bb
PB
8862 }
8863 address_offset = -4;
3960c336
PM
8864 } else if (load) {
8865 /* load */
8866 tmp = tcg_temp_new_i32();
8867 switch (sh) {
8868 case 1:
12dcc321 8869 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3960c336
PM
8870 break;
8871 case 2:
12dcc321 8872 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8873 break;
8874 default:
8875 case 3:
12dcc321 8876 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8877 break;
8878 }
9ee6e8bb
PB
8879 } else {
8880 /* store */
b0109805 8881 tmp = load_reg(s, rd);
12dcc321 8882 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5a839c0d 8883 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8884 }
8885 /* Perform base writeback before the loaded value to
8886 ensure correct behavior with overlapping index registers.
b6af0975 8887 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8888 destination and index registers overlap. */
8889 if (!(insn & (1 << 24))) {
b0109805
PB
8890 gen_add_datah_offset(s, insn, address_offset, addr);
8891 store_reg(s, rn, addr);
9ee6e8bb
PB
8892 } else if (insn & (1 << 21)) {
8893 if (address_offset)
b0109805
PB
8894 tcg_gen_addi_i32(addr, addr, address_offset);
8895 store_reg(s, rn, addr);
8896 } else {
7d1b0095 8897 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8898 }
8899 if (load) {
8900 /* Complete the load. */
b0109805 8901 store_reg(s, rd, tmp);
9ee6e8bb
PB
8902 }
8903 }
8904 break;
8905 case 0x4:
8906 case 0x5:
8907 goto do_ldst;
8908 case 0x6:
8909 case 0x7:
8910 if (insn & (1 << 4)) {
8911 ARCH(6);
8912 /* Armv6 Media instructions. */
8913 rm = insn & 0xf;
8914 rn = (insn >> 16) & 0xf;
2c0262af 8915 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8916 rs = (insn >> 8) & 0xf;
8917 switch ((insn >> 23) & 3) {
8918 case 0: /* Parallel add/subtract. */
8919 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8920 tmp = load_reg(s, rn);
8921 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8922 sh = (insn >> 5) & 7;
8923 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8924 goto illegal_op;
6ddbc6e4 8925 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8926 tcg_temp_free_i32(tmp2);
6ddbc6e4 8927 store_reg(s, rd, tmp);
9ee6e8bb
PB
8928 break;
8929 case 1:
8930 if ((insn & 0x00700020) == 0) {
6c95676b 8931 /* Halfword pack. */
3670669c
PB
8932 tmp = load_reg(s, rn);
8933 tmp2 = load_reg(s, rm);
9ee6e8bb 8934 shift = (insn >> 7) & 0x1f;
3670669c
PB
8935 if (insn & (1 << 6)) {
8936 /* pkhtb */
22478e79
AZ
8937 if (shift == 0)
8938 shift = 31;
8939 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8940 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8941 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8942 } else {
8943 /* pkhbt */
22478e79
AZ
8944 if (shift)
8945 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8946 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8947 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8948 }
8949 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8950 tcg_temp_free_i32(tmp2);
3670669c 8951 store_reg(s, rd, tmp);
9ee6e8bb
PB
8952 } else if ((insn & 0x00200020) == 0x00200000) {
8953 /* [us]sat */
6ddbc6e4 8954 tmp = load_reg(s, rm);
9ee6e8bb
PB
8955 shift = (insn >> 7) & 0x1f;
8956 if (insn & (1 << 6)) {
8957 if (shift == 0)
8958 shift = 31;
6ddbc6e4 8959 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8960 } else {
6ddbc6e4 8961 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8962 }
8963 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8964 tmp2 = tcg_const_i32(sh);
8965 if (insn & (1 << 22))
9ef39277 8966 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8967 else
9ef39277 8968 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8969 tcg_temp_free_i32(tmp2);
6ddbc6e4 8970 store_reg(s, rd, tmp);
9ee6e8bb
PB
8971 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8972 /* [us]sat16 */
6ddbc6e4 8973 tmp = load_reg(s, rm);
9ee6e8bb 8974 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8975 tmp2 = tcg_const_i32(sh);
8976 if (insn & (1 << 22))
9ef39277 8977 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8978 else
9ef39277 8979 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8980 tcg_temp_free_i32(tmp2);
6ddbc6e4 8981 store_reg(s, rd, tmp);
9ee6e8bb
PB
8982 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8983 /* Select bytes. */
6ddbc6e4
PB
8984 tmp = load_reg(s, rn);
8985 tmp2 = load_reg(s, rm);
7d1b0095 8986 tmp3 = tcg_temp_new_i32();
0ecb72a5 8987 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8988 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8989 tcg_temp_free_i32(tmp3);
8990 tcg_temp_free_i32(tmp2);
6ddbc6e4 8991 store_reg(s, rd, tmp);
9ee6e8bb 8992 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8993 tmp = load_reg(s, rm);
9ee6e8bb 8994 shift = (insn >> 10) & 3;
1301f322 8995 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8996 rotate, a shift is sufficient. */
8997 if (shift != 0)
f669df27 8998 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8999 op1 = (insn >> 20) & 7;
9000 switch (op1) {
5e3f878a
PB
9001 case 0: gen_sxtb16(tmp); break;
9002 case 2: gen_sxtb(tmp); break;
9003 case 3: gen_sxth(tmp); break;
9004 case 4: gen_uxtb16(tmp); break;
9005 case 6: gen_uxtb(tmp); break;
9006 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9007 default: goto illegal_op;
9008 }
9009 if (rn != 15) {
5e3f878a 9010 tmp2 = load_reg(s, rn);
9ee6e8bb 9011 if ((op1 & 3) == 0) {
5e3f878a 9012 gen_add16(tmp, tmp2);
9ee6e8bb 9013 } else {
5e3f878a 9014 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9015 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9016 }
9017 }
6c95676b 9018 store_reg(s, rd, tmp);
9ee6e8bb
PB
9019 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9020 /* rev */
b0109805 9021 tmp = load_reg(s, rm);
9ee6e8bb
PB
9022 if (insn & (1 << 22)) {
9023 if (insn & (1 << 7)) {
b0109805 9024 gen_revsh(tmp);
9ee6e8bb
PB
9025 } else {
9026 ARCH(6T2);
b0109805 9027 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9028 }
9029 } else {
9030 if (insn & (1 << 7))
b0109805 9031 gen_rev16(tmp);
9ee6e8bb 9032 else
66896cb8 9033 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9034 }
b0109805 9035 store_reg(s, rd, tmp);
9ee6e8bb
PB
9036 } else {
9037 goto illegal_op;
9038 }
9039 break;
9040 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9041 switch ((insn >> 20) & 0x7) {
9042 case 5:
9043 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9044 /* op2 not 00x or 11x : UNDEF */
9045 goto illegal_op;
9046 }
838fa72d
AJ
9047 /* Signed multiply most significant [accumulate].
9048 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9049 tmp = load_reg(s, rm);
9050 tmp2 = load_reg(s, rs);
a7812ae4 9051 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9052
955a7dd5 9053 if (rd != 15) {
838fa72d 9054 tmp = load_reg(s, rd);
9ee6e8bb 9055 if (insn & (1 << 6)) {
838fa72d 9056 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9057 } else {
838fa72d 9058 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9059 }
9060 }
838fa72d
AJ
9061 if (insn & (1 << 5)) {
9062 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9063 }
9064 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9065 tmp = tcg_temp_new_i32();
ecc7b3aa 9066 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9067 tcg_temp_free_i64(tmp64);
955a7dd5 9068 store_reg(s, rn, tmp);
41e9564d
PM
9069 break;
9070 case 0:
9071 case 4:
9072 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9073 if (insn & (1 << 7)) {
9074 goto illegal_op;
9075 }
9076 tmp = load_reg(s, rm);
9077 tmp2 = load_reg(s, rs);
9ee6e8bb 9078 if (insn & (1 << 5))
5e3f878a
PB
9079 gen_swap_half(tmp2);
9080 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9081 if (insn & (1 << 22)) {
5e3f878a 9082 /* smlald, smlsld */
33bbd75a
PC
9083 TCGv_i64 tmp64_2;
9084
a7812ae4 9085 tmp64 = tcg_temp_new_i64();
33bbd75a 9086 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9087 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9088 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9089 tcg_temp_free_i32(tmp);
33bbd75a
PC
9090 tcg_temp_free_i32(tmp2);
9091 if (insn & (1 << 6)) {
9092 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9093 } else {
9094 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9095 }
9096 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9097 gen_addq(s, tmp64, rd, rn);
9098 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9099 tcg_temp_free_i64(tmp64);
9ee6e8bb 9100 } else {
5e3f878a 9101 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9102 if (insn & (1 << 6)) {
9103 /* This subtraction cannot overflow. */
9104 tcg_gen_sub_i32(tmp, tmp, tmp2);
9105 } else {
9106 /* This addition cannot overflow 32 bits;
9107 * however it may overflow considered as a
9108 * signed operation, in which case we must set
9109 * the Q flag.
9110 */
9111 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9112 }
9113 tcg_temp_free_i32(tmp2);
22478e79 9114 if (rd != 15)
9ee6e8bb 9115 {
22478e79 9116 tmp2 = load_reg(s, rd);
9ef39277 9117 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9118 tcg_temp_free_i32(tmp2);
9ee6e8bb 9119 }
22478e79 9120 store_reg(s, rn, tmp);
9ee6e8bb 9121 }
41e9564d 9122 break;
b8b8ea05
PM
9123 case 1:
9124 case 3:
9125 /* SDIV, UDIV */
d614a513 9126 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9127 goto illegal_op;
9128 }
9129 if (((insn >> 5) & 7) || (rd != 15)) {
9130 goto illegal_op;
9131 }
9132 tmp = load_reg(s, rm);
9133 tmp2 = load_reg(s, rs);
9134 if (insn & (1 << 21)) {
9135 gen_helper_udiv(tmp, tmp, tmp2);
9136 } else {
9137 gen_helper_sdiv(tmp, tmp, tmp2);
9138 }
9139 tcg_temp_free_i32(tmp2);
9140 store_reg(s, rn, tmp);
9141 break;
41e9564d
PM
9142 default:
9143 goto illegal_op;
9ee6e8bb
PB
9144 }
9145 break;
9146 case 3:
9147 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9148 switch (op1) {
9149 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9150 ARCH(6);
9151 tmp = load_reg(s, rm);
9152 tmp2 = load_reg(s, rs);
9153 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9154 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9155 if (rd != 15) {
9156 tmp2 = load_reg(s, rd);
6ddbc6e4 9157 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9158 tcg_temp_free_i32(tmp2);
9ee6e8bb 9159 }
ded9d295 9160 store_reg(s, rn, tmp);
9ee6e8bb
PB
9161 break;
9162 case 0x20: case 0x24: case 0x28: case 0x2c:
9163 /* Bitfield insert/clear. */
9164 ARCH(6T2);
9165 shift = (insn >> 7) & 0x1f;
9166 i = (insn >> 16) & 0x1f;
45140a57
KB
9167 if (i < shift) {
9168 /* UNPREDICTABLE; we choose to UNDEF */
9169 goto illegal_op;
9170 }
9ee6e8bb
PB
9171 i = i + 1 - shift;
9172 if (rm == 15) {
7d1b0095 9173 tmp = tcg_temp_new_i32();
5e3f878a 9174 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9175 } else {
5e3f878a 9176 tmp = load_reg(s, rm);
9ee6e8bb
PB
9177 }
9178 if (i != 32) {
5e3f878a 9179 tmp2 = load_reg(s, rd);
d593c48e 9180 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9181 tcg_temp_free_i32(tmp2);
9ee6e8bb 9182 }
5e3f878a 9183 store_reg(s, rd, tmp);
9ee6e8bb
PB
9184 break;
9185 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9186 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9187 ARCH(6T2);
5e3f878a 9188 tmp = load_reg(s, rm);
9ee6e8bb
PB
9189 shift = (insn >> 7) & 0x1f;
9190 i = ((insn >> 16) & 0x1f) + 1;
9191 if (shift + i > 32)
9192 goto illegal_op;
9193 if (i < 32) {
9194 if (op1 & 0x20) {
5e3f878a 9195 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 9196 } else {
5e3f878a 9197 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
9198 }
9199 }
5e3f878a 9200 store_reg(s, rd, tmp);
9ee6e8bb
PB
9201 break;
9202 default:
9203 goto illegal_op;
9204 }
9205 break;
9206 }
9207 break;
9208 }
9209 do_ldst:
9210 /* Check for undefined extension instructions
9211 * per the ARM Bible IE:
9212 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9213 */
9214 sh = (0xf << 20) | (0xf << 4);
9215 if (op1 == 0x7 && ((insn & sh) == sh))
9216 {
9217 goto illegal_op;
9218 }
9219 /* load/store byte/word */
9220 rn = (insn >> 16) & 0xf;
9221 rd = (insn >> 12) & 0xf;
b0109805 9222 tmp2 = load_reg(s, rn);
a99caa48
PM
9223 if ((insn & 0x01200000) == 0x00200000) {
9224 /* ldrt/strt */
579d21cc 9225 i = get_a32_user_mem_index(s);
a99caa48
PM
9226 } else {
9227 i = get_mem_index(s);
9228 }
9ee6e8bb 9229 if (insn & (1 << 24))
b0109805 9230 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9231 if (insn & (1 << 20)) {
9232 /* load */
5a839c0d 9233 tmp = tcg_temp_new_i32();
9ee6e8bb 9234 if (insn & (1 << 22)) {
12dcc321 9235 gen_aa32_ld8u(s, tmp, tmp2, i);
9ee6e8bb 9236 } else {
12dcc321 9237 gen_aa32_ld32u(s, tmp, tmp2, i);
9ee6e8bb 9238 }
9ee6e8bb
PB
9239 } else {
9240 /* store */
b0109805 9241 tmp = load_reg(s, rd);
5a839c0d 9242 if (insn & (1 << 22)) {
12dcc321 9243 gen_aa32_st8(s, tmp, tmp2, i);
5a839c0d 9244 } else {
12dcc321 9245 gen_aa32_st32(s, tmp, tmp2, i);
5a839c0d
PM
9246 }
9247 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9248 }
9249 if (!(insn & (1 << 24))) {
b0109805
PB
9250 gen_add_data_offset(s, insn, tmp2);
9251 store_reg(s, rn, tmp2);
9252 } else if (insn & (1 << 21)) {
9253 store_reg(s, rn, tmp2);
9254 } else {
7d1b0095 9255 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9256 }
9257 if (insn & (1 << 20)) {
9258 /* Complete the load. */
7dcc1f89 9259 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9260 }
9261 break;
9262 case 0x08:
9263 case 0x09:
9264 {
da3e53dd
PM
9265 int j, n, loaded_base;
9266 bool exc_return = false;
9267 bool is_load = extract32(insn, 20, 1);
9268 bool user = false;
39d5492a 9269 TCGv_i32 loaded_var;
9ee6e8bb
PB
9270 /* load/store multiple words */
9271 /* XXX: store correct base if write back */
9ee6e8bb 9272 if (insn & (1 << 22)) {
da3e53dd 9273 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9274 if (IS_USER(s))
9275 goto illegal_op; /* only usable in supervisor mode */
9276
da3e53dd
PM
9277 if (is_load && extract32(insn, 15, 1)) {
9278 exc_return = true;
9279 } else {
9280 user = true;
9281 }
9ee6e8bb
PB
9282 }
9283 rn = (insn >> 16) & 0xf;
b0109805 9284 addr = load_reg(s, rn);
9ee6e8bb
PB
9285
9286 /* compute total size */
9287 loaded_base = 0;
39d5492a 9288 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9289 n = 0;
9290 for(i=0;i<16;i++) {
9291 if (insn & (1 << i))
9292 n++;
9293 }
9294 /* XXX: test invalid n == 0 case ? */
9295 if (insn & (1 << 23)) {
9296 if (insn & (1 << 24)) {
9297 /* pre increment */
b0109805 9298 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9299 } else {
9300 /* post increment */
9301 }
9302 } else {
9303 if (insn & (1 << 24)) {
9304 /* pre decrement */
b0109805 9305 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9306 } else {
9307 /* post decrement */
9308 if (n != 1)
b0109805 9309 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9310 }
9311 }
9312 j = 0;
9313 for(i=0;i<16;i++) {
9314 if (insn & (1 << i)) {
da3e53dd 9315 if (is_load) {
9ee6e8bb 9316 /* load */
5a839c0d 9317 tmp = tcg_temp_new_i32();
12dcc321 9318 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9319 if (user) {
b75263d6 9320 tmp2 = tcg_const_i32(i);
1ce94f81 9321 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9322 tcg_temp_free_i32(tmp2);
7d1b0095 9323 tcg_temp_free_i32(tmp);
9ee6e8bb 9324 } else if (i == rn) {
b0109805 9325 loaded_var = tmp;
9ee6e8bb
PB
9326 loaded_base = 1;
9327 } else {
7dcc1f89 9328 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9329 }
9330 } else {
9331 /* store */
9332 if (i == 15) {
9333 /* special case: r15 = PC + 8 */
9334 val = (long)s->pc + 4;
7d1b0095 9335 tmp = tcg_temp_new_i32();
b0109805 9336 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9337 } else if (user) {
7d1b0095 9338 tmp = tcg_temp_new_i32();
b75263d6 9339 tmp2 = tcg_const_i32(i);
9ef39277 9340 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9341 tcg_temp_free_i32(tmp2);
9ee6e8bb 9342 } else {
b0109805 9343 tmp = load_reg(s, i);
9ee6e8bb 9344 }
12dcc321 9345 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9346 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9347 }
9348 j++;
9349 /* no need to add after the last transfer */
9350 if (j != n)
b0109805 9351 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9352 }
9353 }
9354 if (insn & (1 << 21)) {
9355 /* write back */
9356 if (insn & (1 << 23)) {
9357 if (insn & (1 << 24)) {
9358 /* pre increment */
9359 } else {
9360 /* post increment */
b0109805 9361 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9362 }
9363 } else {
9364 if (insn & (1 << 24)) {
9365 /* pre decrement */
9366 if (n != 1)
b0109805 9367 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9368 } else {
9369 /* post decrement */
b0109805 9370 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9371 }
9372 }
b0109805
PB
9373 store_reg(s, rn, addr);
9374 } else {
7d1b0095 9375 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9376 }
9377 if (loaded_base) {
b0109805 9378 store_reg(s, rn, loaded_var);
9ee6e8bb 9379 }
da3e53dd 9380 if (exc_return) {
9ee6e8bb 9381 /* Restore CPSR from SPSR. */
d9ba4830 9382 tmp = load_cpu_field(spsr);
235ea1f5 9383 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9384 tcg_temp_free_i32(tmp);
577bf808 9385 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9386 }
9387 }
9388 break;
9389 case 0xa:
9390 case 0xb:
9391 {
9392 int32_t offset;
9393
9394 /* branch (and link) */
9395 val = (int32_t)s->pc;
9396 if (insn & (1 << 24)) {
7d1b0095 9397 tmp = tcg_temp_new_i32();
5e3f878a
PB
9398 tcg_gen_movi_i32(tmp, val);
9399 store_reg(s, 14, tmp);
9ee6e8bb 9400 }
534df156
PM
9401 offset = sextract32(insn << 2, 0, 26);
9402 val += offset + 4;
9ee6e8bb
PB
9403 gen_jmp(s, val);
9404 }
9405 break;
9406 case 0xc:
9407 case 0xd:
9408 case 0xe:
6a57f3eb
WN
9409 if (((insn >> 8) & 0xe) == 10) {
9410 /* VFP. */
7dcc1f89 9411 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9412 goto illegal_op;
9413 }
7dcc1f89 9414 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9415 /* Coprocessor. */
9ee6e8bb 9416 goto illegal_op;
6a57f3eb 9417 }
9ee6e8bb
PB
9418 break;
9419 case 0xf:
9420 /* swi */
eaed129d 9421 gen_set_pc_im(s, s->pc);
d4a2dc67 9422 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9423 s->is_jmp = DISAS_SWI;
9424 break;
9425 default:
9426 illegal_op:
73710361
GB
9427 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9428 default_exception_el(s));
9ee6e8bb
PB
9429 break;
9430 }
9431 }
9432}
9433
9434/* Return true if this is a Thumb-2 logical op. */
9435static int
9436thumb2_logic_op(int op)
9437{
9438 return (op < 8);
9439}
9440
9441/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9442 then set condition code flags based on the result of the operation.
9443 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9444 to the high bit of T1.
9445 Returns zero if the opcode is valid. */
9446
9447static int
39d5492a
PM
9448gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9449 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9450{
9451 int logic_cc;
9452
9453 logic_cc = 0;
9454 switch (op) {
9455 case 0: /* and */
396e467c 9456 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9457 logic_cc = conds;
9458 break;
9459 case 1: /* bic */
f669df27 9460 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9461 logic_cc = conds;
9462 break;
9463 case 2: /* orr */
396e467c 9464 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9465 logic_cc = conds;
9466 break;
9467 case 3: /* orn */
29501f1b 9468 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9469 logic_cc = conds;
9470 break;
9471 case 4: /* eor */
396e467c 9472 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9473 logic_cc = conds;
9474 break;
9475 case 8: /* add */
9476 if (conds)
72485ec4 9477 gen_add_CC(t0, t0, t1);
9ee6e8bb 9478 else
396e467c 9479 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9480 break;
9481 case 10: /* adc */
9482 if (conds)
49b4c31e 9483 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9484 else
396e467c 9485 gen_adc(t0, t1);
9ee6e8bb
PB
9486 break;
9487 case 11: /* sbc */
2de68a49
RH
9488 if (conds) {
9489 gen_sbc_CC(t0, t0, t1);
9490 } else {
396e467c 9491 gen_sub_carry(t0, t0, t1);
2de68a49 9492 }
9ee6e8bb
PB
9493 break;
9494 case 13: /* sub */
9495 if (conds)
72485ec4 9496 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9497 else
396e467c 9498 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9499 break;
9500 case 14: /* rsb */
9501 if (conds)
72485ec4 9502 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9503 else
396e467c 9504 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9505 break;
9506 default: /* 5, 6, 7, 9, 12, 15. */
9507 return 1;
9508 }
9509 if (logic_cc) {
396e467c 9510 gen_logic_CC(t0);
9ee6e8bb 9511 if (shifter_out)
396e467c 9512 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9513 }
9514 return 0;
9515}
9516
9517/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9518 is not legal. */
0ecb72a5 9519static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9520{
b0109805 9521 uint32_t insn, imm, shift, offset;
9ee6e8bb 9522 uint32_t rd, rn, rm, rs;
39d5492a
PM
9523 TCGv_i32 tmp;
9524 TCGv_i32 tmp2;
9525 TCGv_i32 tmp3;
9526 TCGv_i32 addr;
a7812ae4 9527 TCGv_i64 tmp64;
9ee6e8bb
PB
9528 int op;
9529 int shiftop;
9530 int conds;
9531 int logic_cc;
9532
d614a513
PM
9533 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9534 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9535 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9536 16-bit instructions to get correct prefetch abort behavior. */
9537 insn = insn_hw1;
9538 if ((insn & (1 << 12)) == 0) {
be5e7a76 9539 ARCH(5);
9ee6e8bb
PB
9540 /* Second half of blx. */
9541 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9542 tmp = load_reg(s, 14);
9543 tcg_gen_addi_i32(tmp, tmp, offset);
9544 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9545
7d1b0095 9546 tmp2 = tcg_temp_new_i32();
b0109805 9547 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9548 store_reg(s, 14, tmp2);
9549 gen_bx(s, tmp);
9ee6e8bb
PB
9550 return 0;
9551 }
9552 if (insn & (1 << 11)) {
9553 /* Second half of bl. */
9554 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9555 tmp = load_reg(s, 14);
6a0d8a1d 9556 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9557
7d1b0095 9558 tmp2 = tcg_temp_new_i32();
b0109805 9559 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9560 store_reg(s, 14, tmp2);
9561 gen_bx(s, tmp);
9ee6e8bb
PB
9562 return 0;
9563 }
9564 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9565 /* Instruction spans a page boundary. Implement it as two
9566 16-bit instructions in case the second half causes an
9567 prefetch abort. */
9568 offset = ((int32_t)insn << 21) >> 9;
396e467c 9569 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9570 return 0;
9571 }
9572 /* Fall through to 32-bit decode. */
9573 }
9574
f9fd40eb 9575 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9576 s->pc += 2;
9577 insn |= (uint32_t)insn_hw1 << 16;
9578
9579 if ((insn & 0xf800e800) != 0xf000e800) {
9580 ARCH(6T2);
9581 }
9582
9583 rn = (insn >> 16) & 0xf;
9584 rs = (insn >> 12) & 0xf;
9585 rd = (insn >> 8) & 0xf;
9586 rm = insn & 0xf;
9587 switch ((insn >> 25) & 0xf) {
9588 case 0: case 1: case 2: case 3:
9589 /* 16-bit instructions. Should never happen. */
9590 abort();
9591 case 4:
9592 if (insn & (1 << 22)) {
9593 /* Other load/store, table branch. */
9594 if (insn & 0x01200000) {
9595 /* Load/store doubleword. */
9596 if (rn == 15) {
7d1b0095 9597 addr = tcg_temp_new_i32();
b0109805 9598 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9599 } else {
b0109805 9600 addr = load_reg(s, rn);
9ee6e8bb
PB
9601 }
9602 offset = (insn & 0xff) * 4;
9603 if ((insn & (1 << 23)) == 0)
9604 offset = -offset;
9605 if (insn & (1 << 24)) {
b0109805 9606 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9607 offset = 0;
9608 }
9609 if (insn & (1 << 20)) {
9610 /* ldrd */
e2592fad 9611 tmp = tcg_temp_new_i32();
12dcc321 9612 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9613 store_reg(s, rs, tmp);
9614 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9615 tmp = tcg_temp_new_i32();
12dcc321 9616 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9617 store_reg(s, rd, tmp);
9ee6e8bb
PB
9618 } else {
9619 /* strd */
b0109805 9620 tmp = load_reg(s, rs);
12dcc321 9621 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9622 tcg_temp_free_i32(tmp);
b0109805
PB
9623 tcg_gen_addi_i32(addr, addr, 4);
9624 tmp = load_reg(s, rd);
12dcc321 9625 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9626 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9627 }
9628 if (insn & (1 << 21)) {
9629 /* Base writeback. */
9630 if (rn == 15)
9631 goto illegal_op;
b0109805
PB
9632 tcg_gen_addi_i32(addr, addr, offset - 4);
9633 store_reg(s, rn, addr);
9634 } else {
7d1b0095 9635 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9636 }
9637 } else if ((insn & (1 << 23)) == 0) {
9638 /* Load/store exclusive word. */
39d5492a 9639 addr = tcg_temp_local_new_i32();
98a46317 9640 load_reg_var(s, addr, rn);
426f5abc 9641 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9642 if (insn & (1 << 20)) {
426f5abc 9643 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9644 } else {
426f5abc 9645 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9646 }
39d5492a 9647 tcg_temp_free_i32(addr);
2359bf80 9648 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9649 /* Table Branch. */
9650 if (rn == 15) {
7d1b0095 9651 addr = tcg_temp_new_i32();
b0109805 9652 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9653 } else {
b0109805 9654 addr = load_reg(s, rn);
9ee6e8bb 9655 }
b26eefb6 9656 tmp = load_reg(s, rm);
b0109805 9657 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9658 if (insn & (1 << 4)) {
9659 /* tbh */
b0109805 9660 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9661 tcg_temp_free_i32(tmp);
e2592fad 9662 tmp = tcg_temp_new_i32();
12dcc321 9663 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9664 } else { /* tbb */
7d1b0095 9665 tcg_temp_free_i32(tmp);
e2592fad 9666 tmp = tcg_temp_new_i32();
12dcc321 9667 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9668 }
7d1b0095 9669 tcg_temp_free_i32(addr);
b0109805
PB
9670 tcg_gen_shli_i32(tmp, tmp, 1);
9671 tcg_gen_addi_i32(tmp, tmp, s->pc);
9672 store_reg(s, 15, tmp);
9ee6e8bb 9673 } else {
2359bf80 9674 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9675 op = (insn >> 4) & 0x3;
2359bf80
MR
9676 switch (op2) {
9677 case 0:
426f5abc 9678 goto illegal_op;
2359bf80
MR
9679 case 1:
9680 /* Load/store exclusive byte/halfword/doubleword */
9681 if (op == 2) {
9682 goto illegal_op;
9683 }
9684 ARCH(7);
9685 break;
9686 case 2:
9687 /* Load-acquire/store-release */
9688 if (op == 3) {
9689 goto illegal_op;
9690 }
9691 /* Fall through */
9692 case 3:
9693 /* Load-acquire/store-release exclusive */
9694 ARCH(8);
9695 break;
426f5abc 9696 }
39d5492a 9697 addr = tcg_temp_local_new_i32();
98a46317 9698 load_reg_var(s, addr, rn);
2359bf80
MR
9699 if (!(op2 & 1)) {
9700 if (insn & (1 << 20)) {
9701 tmp = tcg_temp_new_i32();
9702 switch (op) {
9703 case 0: /* ldab */
12dcc321 9704 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9705 break;
9706 case 1: /* ldah */
12dcc321 9707 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9708 break;
9709 case 2: /* lda */
12dcc321 9710 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9711 break;
9712 default:
9713 abort();
9714 }
9715 store_reg(s, rs, tmp);
9716 } else {
9717 tmp = load_reg(s, rs);
9718 switch (op) {
9719 case 0: /* stlb */
12dcc321 9720 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9721 break;
9722 case 1: /* stlh */
12dcc321 9723 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9724 break;
9725 case 2: /* stl */
12dcc321 9726 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9727 break;
9728 default:
9729 abort();
9730 }
9731 tcg_temp_free_i32(tmp);
9732 }
9733 } else if (insn & (1 << 20)) {
426f5abc 9734 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9735 } else {
426f5abc 9736 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9737 }
39d5492a 9738 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9739 }
9740 } else {
9741 /* Load/store multiple, RFE, SRS. */
9742 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9743 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9744 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9745 goto illegal_op;
00115976 9746 }
9ee6e8bb
PB
9747 if (insn & (1 << 20)) {
9748 /* rfe */
b0109805
PB
9749 addr = load_reg(s, rn);
9750 if ((insn & (1 << 24)) == 0)
9751 tcg_gen_addi_i32(addr, addr, -8);
9752 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9753 tmp = tcg_temp_new_i32();
12dcc321 9754 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9755 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9756 tmp2 = tcg_temp_new_i32();
12dcc321 9757 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9758 if (insn & (1 << 21)) {
9759 /* Base writeback. */
b0109805
PB
9760 if (insn & (1 << 24)) {
9761 tcg_gen_addi_i32(addr, addr, 4);
9762 } else {
9763 tcg_gen_addi_i32(addr, addr, -4);
9764 }
9765 store_reg(s, rn, addr);
9766 } else {
7d1b0095 9767 tcg_temp_free_i32(addr);
9ee6e8bb 9768 }
b0109805 9769 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9770 } else {
9771 /* srs */
81465888
PM
9772 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9773 insn & (1 << 21));
9ee6e8bb
PB
9774 }
9775 } else {
5856d44e 9776 int i, loaded_base = 0;
39d5492a 9777 TCGv_i32 loaded_var;
9ee6e8bb 9778 /* Load/store multiple. */
b0109805 9779 addr = load_reg(s, rn);
9ee6e8bb
PB
9780 offset = 0;
9781 for (i = 0; i < 16; i++) {
9782 if (insn & (1 << i))
9783 offset += 4;
9784 }
9785 if (insn & (1 << 24)) {
b0109805 9786 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9787 }
9788
39d5492a 9789 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9790 for (i = 0; i < 16; i++) {
9791 if ((insn & (1 << i)) == 0)
9792 continue;
9793 if (insn & (1 << 20)) {
9794 /* Load. */
e2592fad 9795 tmp = tcg_temp_new_i32();
12dcc321 9796 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9797 if (i == 15) {
b0109805 9798 gen_bx(s, tmp);
5856d44e
YO
9799 } else if (i == rn) {
9800 loaded_var = tmp;
9801 loaded_base = 1;
9ee6e8bb 9802 } else {
b0109805 9803 store_reg(s, i, tmp);
9ee6e8bb
PB
9804 }
9805 } else {
9806 /* Store. */
b0109805 9807 tmp = load_reg(s, i);
12dcc321 9808 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9809 tcg_temp_free_i32(tmp);
9ee6e8bb 9810 }
b0109805 9811 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9812 }
5856d44e
YO
9813 if (loaded_base) {
9814 store_reg(s, rn, loaded_var);
9815 }
9ee6e8bb
PB
9816 if (insn & (1 << 21)) {
9817 /* Base register writeback. */
9818 if (insn & (1 << 24)) {
b0109805 9819 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9820 }
9821 /* Fault if writeback register is in register list. */
9822 if (insn & (1 << rn))
9823 goto illegal_op;
b0109805
PB
9824 store_reg(s, rn, addr);
9825 } else {
7d1b0095 9826 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9827 }
9828 }
9829 }
9830 break;
2af9ab77
JB
9831 case 5:
9832
9ee6e8bb 9833 op = (insn >> 21) & 0xf;
2af9ab77 9834 if (op == 6) {
62b44f05
AR
9835 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9836 goto illegal_op;
9837 }
2af9ab77
JB
9838 /* Halfword pack. */
9839 tmp = load_reg(s, rn);
9840 tmp2 = load_reg(s, rm);
9841 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9842 if (insn & (1 << 5)) {
9843 /* pkhtb */
9844 if (shift == 0)
9845 shift = 31;
9846 tcg_gen_sari_i32(tmp2, tmp2, shift);
9847 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9848 tcg_gen_ext16u_i32(tmp2, tmp2);
9849 } else {
9850 /* pkhbt */
9851 if (shift)
9852 tcg_gen_shli_i32(tmp2, tmp2, shift);
9853 tcg_gen_ext16u_i32(tmp, tmp);
9854 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9855 }
9856 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9857 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9858 store_reg(s, rd, tmp);
9859 } else {
2af9ab77
JB
9860 /* Data processing register constant shift. */
9861 if (rn == 15) {
7d1b0095 9862 tmp = tcg_temp_new_i32();
2af9ab77
JB
9863 tcg_gen_movi_i32(tmp, 0);
9864 } else {
9865 tmp = load_reg(s, rn);
9866 }
9867 tmp2 = load_reg(s, rm);
9868
9869 shiftop = (insn >> 4) & 3;
9870 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9871 conds = (insn & (1 << 20)) != 0;
9872 logic_cc = (conds && thumb2_logic_op(op));
9873 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9874 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9875 goto illegal_op;
7d1b0095 9876 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9877 if (rd != 15) {
9878 store_reg(s, rd, tmp);
9879 } else {
7d1b0095 9880 tcg_temp_free_i32(tmp);
2af9ab77 9881 }
3174f8e9 9882 }
9ee6e8bb
PB
9883 break;
9884 case 13: /* Misc data processing. */
9885 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9886 if (op < 4 && (insn & 0xf000) != 0xf000)
9887 goto illegal_op;
9888 switch (op) {
9889 case 0: /* Register controlled shift. */
8984bd2e
PB
9890 tmp = load_reg(s, rn);
9891 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9892 if ((insn & 0x70) != 0)
9893 goto illegal_op;
9894 op = (insn >> 21) & 3;
8984bd2e
PB
9895 logic_cc = (insn & (1 << 20)) != 0;
9896 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9897 if (logic_cc)
9898 gen_logic_CC(tmp);
7dcc1f89 9899 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9900 break;
9901 case 1: /* Sign/zero extend. */
62b44f05
AR
9902 op = (insn >> 20) & 7;
9903 switch (op) {
9904 case 0: /* SXTAH, SXTH */
9905 case 1: /* UXTAH, UXTH */
9906 case 4: /* SXTAB, SXTB */
9907 case 5: /* UXTAB, UXTB */
9908 break;
9909 case 2: /* SXTAB16, SXTB16 */
9910 case 3: /* UXTAB16, UXTB16 */
9911 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9912 goto illegal_op;
9913 }
9914 break;
9915 default:
9916 goto illegal_op;
9917 }
9918 if (rn != 15) {
9919 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9920 goto illegal_op;
9921 }
9922 }
5e3f878a 9923 tmp = load_reg(s, rm);
9ee6e8bb 9924 shift = (insn >> 4) & 3;
1301f322 9925 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9926 rotate, a shift is sufficient. */
9927 if (shift != 0)
f669df27 9928 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9929 op = (insn >> 20) & 7;
9930 switch (op) {
5e3f878a
PB
9931 case 0: gen_sxth(tmp); break;
9932 case 1: gen_uxth(tmp); break;
9933 case 2: gen_sxtb16(tmp); break;
9934 case 3: gen_uxtb16(tmp); break;
9935 case 4: gen_sxtb(tmp); break;
9936 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9937 default:
9938 g_assert_not_reached();
9ee6e8bb
PB
9939 }
9940 if (rn != 15) {
5e3f878a 9941 tmp2 = load_reg(s, rn);
9ee6e8bb 9942 if ((op >> 1) == 1) {
5e3f878a 9943 gen_add16(tmp, tmp2);
9ee6e8bb 9944 } else {
5e3f878a 9945 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9946 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9947 }
9948 }
5e3f878a 9949 store_reg(s, rd, tmp);
9ee6e8bb
PB
9950 break;
9951 case 2: /* SIMD add/subtract. */
62b44f05
AR
9952 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9953 goto illegal_op;
9954 }
9ee6e8bb
PB
9955 op = (insn >> 20) & 7;
9956 shift = (insn >> 4) & 7;
9957 if ((op & 3) == 3 || (shift & 3) == 3)
9958 goto illegal_op;
6ddbc6e4
PB
9959 tmp = load_reg(s, rn);
9960 tmp2 = load_reg(s, rm);
9961 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9962 tcg_temp_free_i32(tmp2);
6ddbc6e4 9963 store_reg(s, rd, tmp);
9ee6e8bb
PB
9964 break;
9965 case 3: /* Other data processing. */
9966 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9967 if (op < 4) {
9968 /* Saturating add/subtract. */
62b44f05
AR
9969 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9970 goto illegal_op;
9971 }
d9ba4830
PB
9972 tmp = load_reg(s, rn);
9973 tmp2 = load_reg(s, rm);
9ee6e8bb 9974 if (op & 1)
9ef39277 9975 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9976 if (op & 2)
9ef39277 9977 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9978 else
9ef39277 9979 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9980 tcg_temp_free_i32(tmp2);
9ee6e8bb 9981 } else {
62b44f05
AR
9982 switch (op) {
9983 case 0x0a: /* rbit */
9984 case 0x08: /* rev */
9985 case 0x09: /* rev16 */
9986 case 0x0b: /* revsh */
9987 case 0x18: /* clz */
9988 break;
9989 case 0x10: /* sel */
9990 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9991 goto illegal_op;
9992 }
9993 break;
9994 case 0x20: /* crc32/crc32c */
9995 case 0x21:
9996 case 0x22:
9997 case 0x28:
9998 case 0x29:
9999 case 0x2a:
10000 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10001 goto illegal_op;
10002 }
10003 break;
10004 default:
10005 goto illegal_op;
10006 }
d9ba4830 10007 tmp = load_reg(s, rn);
9ee6e8bb
PB
10008 switch (op) {
10009 case 0x0a: /* rbit */
d9ba4830 10010 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10011 break;
10012 case 0x08: /* rev */
66896cb8 10013 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10014 break;
10015 case 0x09: /* rev16 */
d9ba4830 10016 gen_rev16(tmp);
9ee6e8bb
PB
10017 break;
10018 case 0x0b: /* revsh */
d9ba4830 10019 gen_revsh(tmp);
9ee6e8bb
PB
10020 break;
10021 case 0x10: /* sel */
d9ba4830 10022 tmp2 = load_reg(s, rm);
7d1b0095 10023 tmp3 = tcg_temp_new_i32();
0ecb72a5 10024 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10025 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10026 tcg_temp_free_i32(tmp3);
10027 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10028 break;
10029 case 0x18: /* clz */
d9ba4830 10030 gen_helper_clz(tmp, tmp);
9ee6e8bb 10031 break;
eb0ecd5a
WN
10032 case 0x20:
10033 case 0x21:
10034 case 0x22:
10035 case 0x28:
10036 case 0x29:
10037 case 0x2a:
10038 {
10039 /* crc32/crc32c */
10040 uint32_t sz = op & 0x3;
10041 uint32_t c = op & 0x8;
10042
eb0ecd5a 10043 tmp2 = load_reg(s, rm);
aa633469
PM
10044 if (sz == 0) {
10045 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10046 } else if (sz == 1) {
10047 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10048 }
eb0ecd5a
WN
10049 tmp3 = tcg_const_i32(1 << sz);
10050 if (c) {
10051 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10052 } else {
10053 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10054 }
10055 tcg_temp_free_i32(tmp2);
10056 tcg_temp_free_i32(tmp3);
10057 break;
10058 }
9ee6e8bb 10059 default:
62b44f05 10060 g_assert_not_reached();
9ee6e8bb
PB
10061 }
10062 }
d9ba4830 10063 store_reg(s, rd, tmp);
9ee6e8bb
PB
10064 break;
10065 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10066 switch ((insn >> 20) & 7) {
10067 case 0: /* 32 x 32 -> 32 */
10068 case 7: /* Unsigned sum of absolute differences. */
10069 break;
10070 case 1: /* 16 x 16 -> 32 */
10071 case 2: /* Dual multiply add. */
10072 case 3: /* 32 * 16 -> 32msb */
10073 case 4: /* Dual multiply subtract. */
10074 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10075 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10076 goto illegal_op;
10077 }
10078 break;
10079 }
9ee6e8bb 10080 op = (insn >> 4) & 0xf;
d9ba4830
PB
10081 tmp = load_reg(s, rn);
10082 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10083 switch ((insn >> 20) & 7) {
10084 case 0: /* 32 x 32 -> 32 */
d9ba4830 10085 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10086 tcg_temp_free_i32(tmp2);
9ee6e8bb 10087 if (rs != 15) {
d9ba4830 10088 tmp2 = load_reg(s, rs);
9ee6e8bb 10089 if (op)
d9ba4830 10090 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10091 else
d9ba4830 10092 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10093 tcg_temp_free_i32(tmp2);
9ee6e8bb 10094 }
9ee6e8bb
PB
10095 break;
10096 case 1: /* 16 x 16 -> 32 */
d9ba4830 10097 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10098 tcg_temp_free_i32(tmp2);
9ee6e8bb 10099 if (rs != 15) {
d9ba4830 10100 tmp2 = load_reg(s, rs);
9ef39277 10101 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10102 tcg_temp_free_i32(tmp2);
9ee6e8bb 10103 }
9ee6e8bb
PB
10104 break;
10105 case 2: /* Dual multiply add. */
10106 case 4: /* Dual multiply subtract. */
10107 if (op)
d9ba4830
PB
10108 gen_swap_half(tmp2);
10109 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10110 if (insn & (1 << 22)) {
e1d177b9 10111 /* This subtraction cannot overflow. */
d9ba4830 10112 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10113 } else {
e1d177b9
PM
10114 /* This addition cannot overflow 32 bits;
10115 * however it may overflow considered as a signed
10116 * operation, in which case we must set the Q flag.
10117 */
9ef39277 10118 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10119 }
7d1b0095 10120 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10121 if (rs != 15)
10122 {
d9ba4830 10123 tmp2 = load_reg(s, rs);
9ef39277 10124 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10125 tcg_temp_free_i32(tmp2);
9ee6e8bb 10126 }
9ee6e8bb
PB
10127 break;
10128 case 3: /* 32 * 16 -> 32msb */
10129 if (op)
d9ba4830 10130 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10131 else
d9ba4830 10132 gen_sxth(tmp2);
a7812ae4
PB
10133 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10134 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10135 tmp = tcg_temp_new_i32();
ecc7b3aa 10136 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10137 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10138 if (rs != 15)
10139 {
d9ba4830 10140 tmp2 = load_reg(s, rs);
9ef39277 10141 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10142 tcg_temp_free_i32(tmp2);
9ee6e8bb 10143 }
9ee6e8bb 10144 break;
838fa72d
AJ
10145 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10146 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10147 if (rs != 15) {
838fa72d
AJ
10148 tmp = load_reg(s, rs);
10149 if (insn & (1 << 20)) {
10150 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10151 } else {
838fa72d 10152 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10153 }
2c0262af 10154 }
838fa72d
AJ
10155 if (insn & (1 << 4)) {
10156 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10157 }
10158 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10159 tmp = tcg_temp_new_i32();
ecc7b3aa 10160 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10161 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10162 break;
10163 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10164 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10165 tcg_temp_free_i32(tmp2);
9ee6e8bb 10166 if (rs != 15) {
d9ba4830
PB
10167 tmp2 = load_reg(s, rs);
10168 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10169 tcg_temp_free_i32(tmp2);
5fd46862 10170 }
9ee6e8bb 10171 break;
2c0262af 10172 }
d9ba4830 10173 store_reg(s, rd, tmp);
2c0262af 10174 break;
9ee6e8bb
PB
10175 case 6: case 7: /* 64-bit multiply, Divide. */
10176 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10177 tmp = load_reg(s, rn);
10178 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10179 if ((op & 0x50) == 0x10) {
10180 /* sdiv, udiv */
d614a513 10181 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10182 goto illegal_op;
47789990 10183 }
9ee6e8bb 10184 if (op & 0x20)
5e3f878a 10185 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10186 else
5e3f878a 10187 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10188 tcg_temp_free_i32(tmp2);
5e3f878a 10189 store_reg(s, rd, tmp);
9ee6e8bb
PB
10190 } else if ((op & 0xe) == 0xc) {
10191 /* Dual multiply accumulate long. */
62b44f05
AR
10192 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10193 tcg_temp_free_i32(tmp);
10194 tcg_temp_free_i32(tmp2);
10195 goto illegal_op;
10196 }
9ee6e8bb 10197 if (op & 1)
5e3f878a
PB
10198 gen_swap_half(tmp2);
10199 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10200 if (op & 0x10) {
5e3f878a 10201 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10202 } else {
5e3f878a 10203 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10204 }
7d1b0095 10205 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10206 /* BUGFIX */
10207 tmp64 = tcg_temp_new_i64();
10208 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10209 tcg_temp_free_i32(tmp);
a7812ae4
PB
10210 gen_addq(s, tmp64, rs, rd);
10211 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10212 tcg_temp_free_i64(tmp64);
2c0262af 10213 } else {
9ee6e8bb
PB
10214 if (op & 0x20) {
10215 /* Unsigned 64-bit multiply */
a7812ae4 10216 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10217 } else {
9ee6e8bb
PB
10218 if (op & 8) {
10219 /* smlalxy */
62b44f05
AR
10220 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10221 tcg_temp_free_i32(tmp2);
10222 tcg_temp_free_i32(tmp);
10223 goto illegal_op;
10224 }
5e3f878a 10225 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10226 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10227 tmp64 = tcg_temp_new_i64();
10228 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10229 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10230 } else {
10231 /* Signed 64-bit multiply */
a7812ae4 10232 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10233 }
b5ff1b31 10234 }
9ee6e8bb
PB
10235 if (op & 4) {
10236 /* umaal */
62b44f05
AR
10237 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10238 tcg_temp_free_i64(tmp64);
10239 goto illegal_op;
10240 }
a7812ae4
PB
10241 gen_addq_lo(s, tmp64, rs);
10242 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10243 } else if (op & 0x40) {
10244 /* 64-bit accumulate. */
a7812ae4 10245 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10246 }
a7812ae4 10247 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10248 tcg_temp_free_i64(tmp64);
5fd46862 10249 }
2c0262af 10250 break;
9ee6e8bb
PB
10251 }
10252 break;
10253 case 6: case 7: case 14: case 15:
10254 /* Coprocessor. */
10255 if (((insn >> 24) & 3) == 3) {
10256 /* Translate into the equivalent ARM encoding. */
f06053e3 10257 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10258 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10259 goto illegal_op;
7dcc1f89 10260 }
6a57f3eb 10261 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10262 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10263 goto illegal_op;
10264 }
9ee6e8bb
PB
10265 } else {
10266 if (insn & (1 << 28))
10267 goto illegal_op;
7dcc1f89 10268 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10269 goto illegal_op;
7dcc1f89 10270 }
9ee6e8bb
PB
10271 }
10272 break;
10273 case 8: case 9: case 10: case 11:
10274 if (insn & (1 << 15)) {
10275 /* Branches, misc control. */
10276 if (insn & 0x5000) {
10277 /* Unconditional branch. */
10278 /* signextend(hw1[10:0]) -> offset[:12]. */
10279 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10280 /* hw1[10:0] -> offset[11:1]. */
10281 offset |= (insn & 0x7ff) << 1;
10282 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10283 offset[24:22] already have the same value because of the
10284 sign extension above. */
10285 offset ^= ((~insn) & (1 << 13)) << 10;
10286 offset ^= ((~insn) & (1 << 11)) << 11;
10287
9ee6e8bb
PB
10288 if (insn & (1 << 14)) {
10289 /* Branch and link. */
3174f8e9 10290 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10291 }
3b46e624 10292
b0109805 10293 offset += s->pc;
9ee6e8bb
PB
10294 if (insn & (1 << 12)) {
10295 /* b/bl */
b0109805 10296 gen_jmp(s, offset);
9ee6e8bb
PB
10297 } else {
10298 /* blx */
b0109805 10299 offset &= ~(uint32_t)2;
be5e7a76 10300 /* thumb2 bx, no need to check */
b0109805 10301 gen_bx_im(s, offset);
2c0262af 10302 }
9ee6e8bb
PB
10303 } else if (((insn >> 23) & 7) == 7) {
10304 /* Misc control */
10305 if (insn & (1 << 13))
10306 goto illegal_op;
10307
10308 if (insn & (1 << 26)) {
37e6456e
PM
10309 if (!(insn & (1 << 20))) {
10310 /* Hypervisor call (v7) */
10311 int imm16 = extract32(insn, 16, 4) << 12
10312 | extract32(insn, 0, 12);
10313 ARCH(7);
10314 if (IS_USER(s)) {
10315 goto illegal_op;
10316 }
10317 gen_hvc(s, imm16);
10318 } else {
10319 /* Secure monitor call (v6+) */
10320 ARCH(6K);
10321 if (IS_USER(s)) {
10322 goto illegal_op;
10323 }
10324 gen_smc(s);
10325 }
2c0262af 10326 } else {
9ee6e8bb
PB
10327 op = (insn >> 20) & 7;
10328 switch (op) {
10329 case 0: /* msr cpsr. */
b53d8923 10330 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10331 tmp = load_reg(s, rn);
10332 addr = tcg_const_i32(insn & 0xff);
10333 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10334 tcg_temp_free_i32(addr);
7d1b0095 10335 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10336 gen_lookup_tb(s);
10337 break;
10338 }
10339 /* fall through */
10340 case 1: /* msr spsr. */
b53d8923 10341 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10342 goto illegal_op;
b53d8923 10343 }
8bfd0550
PM
10344
10345 if (extract32(insn, 5, 1)) {
10346 /* MSR (banked) */
10347 int sysm = extract32(insn, 8, 4) |
10348 (extract32(insn, 4, 1) << 4);
10349 int r = op & 1;
10350
10351 gen_msr_banked(s, r, sysm, rm);
10352 break;
10353 }
10354
10355 /* MSR (for PSRs) */
2fbac54b
FN
10356 tmp = load_reg(s, rn);
10357 if (gen_set_psr(s,
7dcc1f89 10358 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10359 op == 1, tmp))
9ee6e8bb
PB
10360 goto illegal_op;
10361 break;
10362 case 2: /* cps, nop-hint. */
10363 if (((insn >> 8) & 7) == 0) {
10364 gen_nop_hint(s, insn & 0xff);
10365 }
10366 /* Implemented as NOP in user mode. */
10367 if (IS_USER(s))
10368 break;
10369 offset = 0;
10370 imm = 0;
10371 if (insn & (1 << 10)) {
10372 if (insn & (1 << 7))
10373 offset |= CPSR_A;
10374 if (insn & (1 << 6))
10375 offset |= CPSR_I;
10376 if (insn & (1 << 5))
10377 offset |= CPSR_F;
10378 if (insn & (1 << 9))
10379 imm = CPSR_A | CPSR_I | CPSR_F;
10380 }
10381 if (insn & (1 << 8)) {
10382 offset |= 0x1f;
10383 imm |= (insn & 0x1f);
10384 }
10385 if (offset) {
2fbac54b 10386 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10387 }
10388 break;
10389 case 3: /* Special control operations. */
426f5abc 10390 ARCH(7);
9ee6e8bb
PB
10391 op = (insn >> 4) & 0xf;
10392 switch (op) {
10393 case 2: /* clrex */
426f5abc 10394 gen_clrex(s);
9ee6e8bb
PB
10395 break;
10396 case 4: /* dsb */
10397 case 5: /* dmb */
9ee6e8bb 10398 /* These execute as NOPs. */
9ee6e8bb 10399 break;
6df99dec
SS
10400 case 6: /* isb */
10401 /* We need to break the TB after this insn
10402 * to execute self-modifying code correctly
10403 * and also to take any pending interrupts
10404 * immediately.
10405 */
10406 gen_lookup_tb(s);
10407 break;
9ee6e8bb
PB
10408 default:
10409 goto illegal_op;
10410 }
10411 break;
10412 case 4: /* bxj */
10413 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10414 tmp = load_reg(s, rn);
10415 gen_bx(s, tmp);
9ee6e8bb
PB
10416 break;
10417 case 5: /* Exception return. */
b8b45b68
RV
10418 if (IS_USER(s)) {
10419 goto illegal_op;
10420 }
10421 if (rn != 14 || rd != 15) {
10422 goto illegal_op;
10423 }
10424 tmp = load_reg(s, rn);
10425 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10426 gen_exception_return(s, tmp);
10427 break;
8bfd0550
PM
10428 case 6: /* MRS */
10429 if (extract32(insn, 5, 1)) {
10430 /* MRS (banked) */
10431 int sysm = extract32(insn, 16, 4) |
10432 (extract32(insn, 4, 1) << 4);
10433
10434 gen_mrs_banked(s, 0, sysm, rd);
10435 break;
10436 }
10437
10438 /* mrs cpsr */
7d1b0095 10439 tmp = tcg_temp_new_i32();
b53d8923 10440 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10441 addr = tcg_const_i32(insn & 0xff);
10442 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10443 tcg_temp_free_i32(addr);
9ee6e8bb 10444 } else {
9ef39277 10445 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10446 }
8984bd2e 10447 store_reg(s, rd, tmp);
9ee6e8bb 10448 break;
8bfd0550
PM
10449 case 7: /* MRS */
10450 if (extract32(insn, 5, 1)) {
10451 /* MRS (banked) */
10452 int sysm = extract32(insn, 16, 4) |
10453 (extract32(insn, 4, 1) << 4);
10454
10455 gen_mrs_banked(s, 1, sysm, rd);
10456 break;
10457 }
10458
10459 /* mrs spsr. */
9ee6e8bb 10460 /* Not accessible in user mode. */
b53d8923 10461 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10462 goto illegal_op;
b53d8923 10463 }
d9ba4830
PB
10464 tmp = load_cpu_field(spsr);
10465 store_reg(s, rd, tmp);
9ee6e8bb 10466 break;
2c0262af
FB
10467 }
10468 }
9ee6e8bb
PB
10469 } else {
10470 /* Conditional branch. */
10471 op = (insn >> 22) & 0xf;
10472 /* Generate a conditional jump to next instruction. */
10473 s->condlabel = gen_new_label();
39fb730a 10474 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10475 s->condjmp = 1;
10476
10477 /* offset[11:1] = insn[10:0] */
10478 offset = (insn & 0x7ff) << 1;
10479 /* offset[17:12] = insn[21:16]. */
10480 offset |= (insn & 0x003f0000) >> 4;
10481 /* offset[31:20] = insn[26]. */
10482 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10483 /* offset[18] = insn[13]. */
10484 offset |= (insn & (1 << 13)) << 5;
10485 /* offset[19] = insn[11]. */
10486 offset |= (insn & (1 << 11)) << 8;
10487
10488 /* jump to the offset */
b0109805 10489 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10490 }
10491 } else {
10492 /* Data processing immediate. */
10493 if (insn & (1 << 25)) {
10494 if (insn & (1 << 24)) {
10495 if (insn & (1 << 20))
10496 goto illegal_op;
10497 /* Bitfield/Saturate. */
10498 op = (insn >> 21) & 7;
10499 imm = insn & 0x1f;
10500 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10501 if (rn == 15) {
7d1b0095 10502 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10503 tcg_gen_movi_i32(tmp, 0);
10504 } else {
10505 tmp = load_reg(s, rn);
10506 }
9ee6e8bb
PB
10507 switch (op) {
10508 case 2: /* Signed bitfield extract. */
10509 imm++;
10510 if (shift + imm > 32)
10511 goto illegal_op;
10512 if (imm < 32)
6ddbc6e4 10513 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10514 break;
10515 case 6: /* Unsigned bitfield extract. */
10516 imm++;
10517 if (shift + imm > 32)
10518 goto illegal_op;
10519 if (imm < 32)
6ddbc6e4 10520 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10521 break;
10522 case 3: /* Bitfield insert/clear. */
10523 if (imm < shift)
10524 goto illegal_op;
10525 imm = imm + 1 - shift;
10526 if (imm != 32) {
6ddbc6e4 10527 tmp2 = load_reg(s, rd);
d593c48e 10528 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10529 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10530 }
10531 break;
10532 case 7:
10533 goto illegal_op;
10534 default: /* Saturate. */
9ee6e8bb
PB
10535 if (shift) {
10536 if (op & 1)
6ddbc6e4 10537 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10538 else
6ddbc6e4 10539 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10540 }
6ddbc6e4 10541 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10542 if (op & 4) {
10543 /* Unsigned. */
62b44f05
AR
10544 if ((op & 1) && shift == 0) {
10545 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10546 tcg_temp_free_i32(tmp);
10547 tcg_temp_free_i32(tmp2);
10548 goto illegal_op;
10549 }
9ef39277 10550 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10551 } else {
9ef39277 10552 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10553 }
2c0262af 10554 } else {
9ee6e8bb 10555 /* Signed. */
62b44f05
AR
10556 if ((op & 1) && shift == 0) {
10557 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10558 tcg_temp_free_i32(tmp);
10559 tcg_temp_free_i32(tmp2);
10560 goto illegal_op;
10561 }
9ef39277 10562 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10563 } else {
9ef39277 10564 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10565 }
2c0262af 10566 }
b75263d6 10567 tcg_temp_free_i32(tmp2);
9ee6e8bb 10568 break;
2c0262af 10569 }
6ddbc6e4 10570 store_reg(s, rd, tmp);
9ee6e8bb
PB
10571 } else {
10572 imm = ((insn & 0x04000000) >> 15)
10573 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10574 if (insn & (1 << 22)) {
10575 /* 16-bit immediate. */
10576 imm |= (insn >> 4) & 0xf000;
10577 if (insn & (1 << 23)) {
10578 /* movt */
5e3f878a 10579 tmp = load_reg(s, rd);
86831435 10580 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10581 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10582 } else {
9ee6e8bb 10583 /* movw */
7d1b0095 10584 tmp = tcg_temp_new_i32();
5e3f878a 10585 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10586 }
10587 } else {
9ee6e8bb
PB
10588 /* Add/sub 12-bit immediate. */
10589 if (rn == 15) {
b0109805 10590 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10591 if (insn & (1 << 23))
b0109805 10592 offset -= imm;
9ee6e8bb 10593 else
b0109805 10594 offset += imm;
7d1b0095 10595 tmp = tcg_temp_new_i32();
5e3f878a 10596 tcg_gen_movi_i32(tmp, offset);
2c0262af 10597 } else {
5e3f878a 10598 tmp = load_reg(s, rn);
9ee6e8bb 10599 if (insn & (1 << 23))
5e3f878a 10600 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10601 else
5e3f878a 10602 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10603 }
9ee6e8bb 10604 }
5e3f878a 10605 store_reg(s, rd, tmp);
191abaa2 10606 }
9ee6e8bb
PB
10607 } else {
10608 int shifter_out = 0;
10609 /* modified 12-bit immediate. */
10610 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10611 imm = (insn & 0xff);
10612 switch (shift) {
10613 case 0: /* XY */
10614 /* Nothing to do. */
10615 break;
10616 case 1: /* 00XY00XY */
10617 imm |= imm << 16;
10618 break;
10619 case 2: /* XY00XY00 */
10620 imm |= imm << 16;
10621 imm <<= 8;
10622 break;
10623 case 3: /* XYXYXYXY */
10624 imm |= imm << 16;
10625 imm |= imm << 8;
10626 break;
10627 default: /* Rotated constant. */
10628 shift = (shift << 1) | (imm >> 7);
10629 imm |= 0x80;
10630 imm = imm << (32 - shift);
10631 shifter_out = 1;
10632 break;
b5ff1b31 10633 }
7d1b0095 10634 tmp2 = tcg_temp_new_i32();
3174f8e9 10635 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10636 rn = (insn >> 16) & 0xf;
3174f8e9 10637 if (rn == 15) {
7d1b0095 10638 tmp = tcg_temp_new_i32();
3174f8e9
FN
10639 tcg_gen_movi_i32(tmp, 0);
10640 } else {
10641 tmp = load_reg(s, rn);
10642 }
9ee6e8bb
PB
10643 op = (insn >> 21) & 0xf;
10644 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10645 shifter_out, tmp, tmp2))
9ee6e8bb 10646 goto illegal_op;
7d1b0095 10647 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10648 rd = (insn >> 8) & 0xf;
10649 if (rd != 15) {
3174f8e9
FN
10650 store_reg(s, rd, tmp);
10651 } else {
7d1b0095 10652 tcg_temp_free_i32(tmp);
2c0262af 10653 }
2c0262af 10654 }
9ee6e8bb
PB
10655 }
10656 break;
10657 case 12: /* Load/store single data item. */
10658 {
10659 int postinc = 0;
10660 int writeback = 0;
a99caa48 10661 int memidx;
9ee6e8bb 10662 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10663 if (disas_neon_ls_insn(s, insn)) {
c1713132 10664 goto illegal_op;
7dcc1f89 10665 }
9ee6e8bb
PB
10666 break;
10667 }
a2fdc890
PM
10668 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10669 if (rs == 15) {
10670 if (!(insn & (1 << 20))) {
10671 goto illegal_op;
10672 }
10673 if (op != 2) {
10674 /* Byte or halfword load space with dest == r15 : memory hints.
10675 * Catch them early so we don't emit pointless addressing code.
10676 * This space is a mix of:
10677 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10678 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10679 * cores)
10680 * unallocated hints, which must be treated as NOPs
10681 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10682 * which is easiest for the decoding logic
10683 * Some space which must UNDEF
10684 */
10685 int op1 = (insn >> 23) & 3;
10686 int op2 = (insn >> 6) & 0x3f;
10687 if (op & 2) {
10688 goto illegal_op;
10689 }
10690 if (rn == 15) {
02afbf64
PM
10691 /* UNPREDICTABLE, unallocated hint or
10692 * PLD/PLDW/PLI (literal)
10693 */
a2fdc890
PM
10694 return 0;
10695 }
10696 if (op1 & 1) {
02afbf64 10697 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10698 }
10699 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10700 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10701 }
10702 /* UNDEF space, or an UNPREDICTABLE */
10703 return 1;
10704 }
10705 }
a99caa48 10706 memidx = get_mem_index(s);
9ee6e8bb 10707 if (rn == 15) {
7d1b0095 10708 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10709 /* PC relative. */
10710 /* s->pc has already been incremented by 4. */
10711 imm = s->pc & 0xfffffffc;
10712 if (insn & (1 << 23))
10713 imm += insn & 0xfff;
10714 else
10715 imm -= insn & 0xfff;
b0109805 10716 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10717 } else {
b0109805 10718 addr = load_reg(s, rn);
9ee6e8bb
PB
10719 if (insn & (1 << 23)) {
10720 /* Positive offset. */
10721 imm = insn & 0xfff;
b0109805 10722 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10723 } else {
9ee6e8bb 10724 imm = insn & 0xff;
2a0308c5
PM
10725 switch ((insn >> 8) & 0xf) {
10726 case 0x0: /* Shifted Register. */
9ee6e8bb 10727 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10728 if (shift > 3) {
10729 tcg_temp_free_i32(addr);
18c9b560 10730 goto illegal_op;
2a0308c5 10731 }
b26eefb6 10732 tmp = load_reg(s, rm);
9ee6e8bb 10733 if (shift)
b26eefb6 10734 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10735 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10736 tcg_temp_free_i32(tmp);
9ee6e8bb 10737 break;
2a0308c5 10738 case 0xc: /* Negative offset. */
b0109805 10739 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10740 break;
2a0308c5 10741 case 0xe: /* User privilege. */
b0109805 10742 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10743 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10744 break;
2a0308c5 10745 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10746 imm = -imm;
10747 /* Fall through. */
2a0308c5 10748 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10749 postinc = 1;
10750 writeback = 1;
10751 break;
2a0308c5 10752 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10753 imm = -imm;
10754 /* Fall through. */
2a0308c5 10755 case 0xf: /* Pre-increment. */
b0109805 10756 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10757 writeback = 1;
10758 break;
10759 default:
2a0308c5 10760 tcg_temp_free_i32(addr);
b7bcbe95 10761 goto illegal_op;
9ee6e8bb
PB
10762 }
10763 }
10764 }
9ee6e8bb
PB
10765 if (insn & (1 << 20)) {
10766 /* Load. */
5a839c0d 10767 tmp = tcg_temp_new_i32();
a2fdc890 10768 switch (op) {
5a839c0d 10769 case 0:
12dcc321 10770 gen_aa32_ld8u(s, tmp, addr, memidx);
5a839c0d
PM
10771 break;
10772 case 4:
12dcc321 10773 gen_aa32_ld8s(s, tmp, addr, memidx);
5a839c0d
PM
10774 break;
10775 case 1:
12dcc321 10776 gen_aa32_ld16u(s, tmp, addr, memidx);
5a839c0d
PM
10777 break;
10778 case 5:
12dcc321 10779 gen_aa32_ld16s(s, tmp, addr, memidx);
5a839c0d
PM
10780 break;
10781 case 2:
12dcc321 10782 gen_aa32_ld32u(s, tmp, addr, memidx);
5a839c0d 10783 break;
2a0308c5 10784 default:
5a839c0d 10785 tcg_temp_free_i32(tmp);
2a0308c5
PM
10786 tcg_temp_free_i32(addr);
10787 goto illegal_op;
a2fdc890
PM
10788 }
10789 if (rs == 15) {
10790 gen_bx(s, tmp);
9ee6e8bb 10791 } else {
a2fdc890 10792 store_reg(s, rs, tmp);
9ee6e8bb
PB
10793 }
10794 } else {
10795 /* Store. */
b0109805 10796 tmp = load_reg(s, rs);
9ee6e8bb 10797 switch (op) {
5a839c0d 10798 case 0:
12dcc321 10799 gen_aa32_st8(s, tmp, addr, memidx);
5a839c0d
PM
10800 break;
10801 case 1:
12dcc321 10802 gen_aa32_st16(s, tmp, addr, memidx);
5a839c0d
PM
10803 break;
10804 case 2:
12dcc321 10805 gen_aa32_st32(s, tmp, addr, memidx);
5a839c0d 10806 break;
2a0308c5 10807 default:
5a839c0d 10808 tcg_temp_free_i32(tmp);
2a0308c5
PM
10809 tcg_temp_free_i32(addr);
10810 goto illegal_op;
b7bcbe95 10811 }
5a839c0d 10812 tcg_temp_free_i32(tmp);
2c0262af 10813 }
9ee6e8bb 10814 if (postinc)
b0109805
PB
10815 tcg_gen_addi_i32(addr, addr, imm);
10816 if (writeback) {
10817 store_reg(s, rn, addr);
10818 } else {
7d1b0095 10819 tcg_temp_free_i32(addr);
b0109805 10820 }
9ee6e8bb
PB
10821 }
10822 break;
10823 default:
10824 goto illegal_op;
2c0262af 10825 }
9ee6e8bb
PB
10826 return 0;
10827illegal_op:
10828 return 1;
2c0262af
FB
10829}
10830
0ecb72a5 10831static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10832{
10833 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10834 int32_t offset;
10835 int i;
39d5492a
PM
10836 TCGv_i32 tmp;
10837 TCGv_i32 tmp2;
10838 TCGv_i32 addr;
99c475ab 10839
9ee6e8bb
PB
10840 if (s->condexec_mask) {
10841 cond = s->condexec_cond;
bedd2912
JB
10842 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10843 s->condlabel = gen_new_label();
39fb730a 10844 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10845 s->condjmp = 1;
10846 }
9ee6e8bb
PB
10847 }
10848
f9fd40eb 10849 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 10850 s->pc += 2;
b5ff1b31 10851
99c475ab
FB
10852 switch (insn >> 12) {
10853 case 0: case 1:
396e467c 10854
99c475ab
FB
10855 rd = insn & 7;
10856 op = (insn >> 11) & 3;
10857 if (op == 3) {
10858 /* add/subtract */
10859 rn = (insn >> 3) & 7;
396e467c 10860 tmp = load_reg(s, rn);
99c475ab
FB
10861 if (insn & (1 << 10)) {
10862 /* immediate */
7d1b0095 10863 tmp2 = tcg_temp_new_i32();
396e467c 10864 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10865 } else {
10866 /* reg */
10867 rm = (insn >> 6) & 7;
396e467c 10868 tmp2 = load_reg(s, rm);
99c475ab 10869 }
9ee6e8bb
PB
10870 if (insn & (1 << 9)) {
10871 if (s->condexec_mask)
396e467c 10872 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10873 else
72485ec4 10874 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10875 } else {
10876 if (s->condexec_mask)
396e467c 10877 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10878 else
72485ec4 10879 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10880 }
7d1b0095 10881 tcg_temp_free_i32(tmp2);
396e467c 10882 store_reg(s, rd, tmp);
99c475ab
FB
10883 } else {
10884 /* shift immediate */
10885 rm = (insn >> 3) & 7;
10886 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10887 tmp = load_reg(s, rm);
10888 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10889 if (!s->condexec_mask)
10890 gen_logic_CC(tmp);
10891 store_reg(s, rd, tmp);
99c475ab
FB
10892 }
10893 break;
10894 case 2: case 3:
10895 /* arithmetic large immediate */
10896 op = (insn >> 11) & 3;
10897 rd = (insn >> 8) & 0x7;
396e467c 10898 if (op == 0) { /* mov */
7d1b0095 10899 tmp = tcg_temp_new_i32();
396e467c 10900 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10901 if (!s->condexec_mask)
396e467c
FN
10902 gen_logic_CC(tmp);
10903 store_reg(s, rd, tmp);
10904 } else {
10905 tmp = load_reg(s, rd);
7d1b0095 10906 tmp2 = tcg_temp_new_i32();
396e467c
FN
10907 tcg_gen_movi_i32(tmp2, insn & 0xff);
10908 switch (op) {
10909 case 1: /* cmp */
72485ec4 10910 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10911 tcg_temp_free_i32(tmp);
10912 tcg_temp_free_i32(tmp2);
396e467c
FN
10913 break;
10914 case 2: /* add */
10915 if (s->condexec_mask)
10916 tcg_gen_add_i32(tmp, tmp, tmp2);
10917 else
72485ec4 10918 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10919 tcg_temp_free_i32(tmp2);
396e467c
FN
10920 store_reg(s, rd, tmp);
10921 break;
10922 case 3: /* sub */
10923 if (s->condexec_mask)
10924 tcg_gen_sub_i32(tmp, tmp, tmp2);
10925 else
72485ec4 10926 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10927 tcg_temp_free_i32(tmp2);
396e467c
FN
10928 store_reg(s, rd, tmp);
10929 break;
10930 }
99c475ab 10931 }
99c475ab
FB
10932 break;
10933 case 4:
10934 if (insn & (1 << 11)) {
10935 rd = (insn >> 8) & 7;
5899f386
FB
10936 /* load pc-relative. Bit 1 of PC is ignored. */
10937 val = s->pc + 2 + ((insn & 0xff) * 4);
10938 val &= ~(uint32_t)2;
7d1b0095 10939 addr = tcg_temp_new_i32();
b0109805 10940 tcg_gen_movi_i32(addr, val);
c40c8556 10941 tmp = tcg_temp_new_i32();
12dcc321 10942 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7d1b0095 10943 tcg_temp_free_i32(addr);
b0109805 10944 store_reg(s, rd, tmp);
99c475ab
FB
10945 break;
10946 }
10947 if (insn & (1 << 10)) {
10948 /* data processing extended or blx */
10949 rd = (insn & 7) | ((insn >> 4) & 8);
10950 rm = (insn >> 3) & 0xf;
10951 op = (insn >> 8) & 3;
10952 switch (op) {
10953 case 0: /* add */
396e467c
FN
10954 tmp = load_reg(s, rd);
10955 tmp2 = load_reg(s, rm);
10956 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10957 tcg_temp_free_i32(tmp2);
396e467c 10958 store_reg(s, rd, tmp);
99c475ab
FB
10959 break;
10960 case 1: /* cmp */
396e467c
FN
10961 tmp = load_reg(s, rd);
10962 tmp2 = load_reg(s, rm);
72485ec4 10963 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10964 tcg_temp_free_i32(tmp2);
10965 tcg_temp_free_i32(tmp);
99c475ab
FB
10966 break;
10967 case 2: /* mov/cpy */
396e467c
FN
10968 tmp = load_reg(s, rm);
10969 store_reg(s, rd, tmp);
99c475ab
FB
10970 break;
10971 case 3:/* branch [and link] exchange thumb register */
b0109805 10972 tmp = load_reg(s, rm);
99c475ab 10973 if (insn & (1 << 7)) {
be5e7a76 10974 ARCH(5);
99c475ab 10975 val = (uint32_t)s->pc | 1;
7d1b0095 10976 tmp2 = tcg_temp_new_i32();
b0109805
PB
10977 tcg_gen_movi_i32(tmp2, val);
10978 store_reg(s, 14, tmp2);
99c475ab 10979 }
be5e7a76 10980 /* already thumb, no need to check */
d9ba4830 10981 gen_bx(s, tmp);
99c475ab
FB
10982 break;
10983 }
10984 break;
10985 }
10986
10987 /* data processing register */
10988 rd = insn & 7;
10989 rm = (insn >> 3) & 7;
10990 op = (insn >> 6) & 0xf;
10991 if (op == 2 || op == 3 || op == 4 || op == 7) {
10992 /* the shift/rotate ops want the operands backwards */
10993 val = rm;
10994 rm = rd;
10995 rd = val;
10996 val = 1;
10997 } else {
10998 val = 0;
10999 }
11000
396e467c 11001 if (op == 9) { /* neg */
7d1b0095 11002 tmp = tcg_temp_new_i32();
396e467c
FN
11003 tcg_gen_movi_i32(tmp, 0);
11004 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11005 tmp = load_reg(s, rd);
11006 } else {
39d5492a 11007 TCGV_UNUSED_I32(tmp);
396e467c 11008 }
99c475ab 11009
396e467c 11010 tmp2 = load_reg(s, rm);
5899f386 11011 switch (op) {
99c475ab 11012 case 0x0: /* and */
396e467c 11013 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11014 if (!s->condexec_mask)
396e467c 11015 gen_logic_CC(tmp);
99c475ab
FB
11016 break;
11017 case 0x1: /* eor */
396e467c 11018 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11019 if (!s->condexec_mask)
396e467c 11020 gen_logic_CC(tmp);
99c475ab
FB
11021 break;
11022 case 0x2: /* lsl */
9ee6e8bb 11023 if (s->condexec_mask) {
365af80e 11024 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11025 } else {
9ef39277 11026 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11027 gen_logic_CC(tmp2);
9ee6e8bb 11028 }
99c475ab
FB
11029 break;
11030 case 0x3: /* lsr */
9ee6e8bb 11031 if (s->condexec_mask) {
365af80e 11032 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11033 } else {
9ef39277 11034 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11035 gen_logic_CC(tmp2);
9ee6e8bb 11036 }
99c475ab
FB
11037 break;
11038 case 0x4: /* asr */
9ee6e8bb 11039 if (s->condexec_mask) {
365af80e 11040 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11041 } else {
9ef39277 11042 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11043 gen_logic_CC(tmp2);
9ee6e8bb 11044 }
99c475ab
FB
11045 break;
11046 case 0x5: /* adc */
49b4c31e 11047 if (s->condexec_mask) {
396e467c 11048 gen_adc(tmp, tmp2);
49b4c31e
RH
11049 } else {
11050 gen_adc_CC(tmp, tmp, tmp2);
11051 }
99c475ab
FB
11052 break;
11053 case 0x6: /* sbc */
2de68a49 11054 if (s->condexec_mask) {
396e467c 11055 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11056 } else {
11057 gen_sbc_CC(tmp, tmp, tmp2);
11058 }
99c475ab
FB
11059 break;
11060 case 0x7: /* ror */
9ee6e8bb 11061 if (s->condexec_mask) {
f669df27
AJ
11062 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11063 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11064 } else {
9ef39277 11065 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11066 gen_logic_CC(tmp2);
9ee6e8bb 11067 }
99c475ab
FB
11068 break;
11069 case 0x8: /* tst */
396e467c
FN
11070 tcg_gen_and_i32(tmp, tmp, tmp2);
11071 gen_logic_CC(tmp);
99c475ab 11072 rd = 16;
5899f386 11073 break;
99c475ab 11074 case 0x9: /* neg */
9ee6e8bb 11075 if (s->condexec_mask)
396e467c 11076 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11077 else
72485ec4 11078 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11079 break;
11080 case 0xa: /* cmp */
72485ec4 11081 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11082 rd = 16;
11083 break;
11084 case 0xb: /* cmn */
72485ec4 11085 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11086 rd = 16;
11087 break;
11088 case 0xc: /* orr */
396e467c 11089 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11090 if (!s->condexec_mask)
396e467c 11091 gen_logic_CC(tmp);
99c475ab
FB
11092 break;
11093 case 0xd: /* mul */
7b2919a0 11094 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11095 if (!s->condexec_mask)
396e467c 11096 gen_logic_CC(tmp);
99c475ab
FB
11097 break;
11098 case 0xe: /* bic */
f669df27 11099 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11100 if (!s->condexec_mask)
396e467c 11101 gen_logic_CC(tmp);
99c475ab
FB
11102 break;
11103 case 0xf: /* mvn */
396e467c 11104 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11105 if (!s->condexec_mask)
396e467c 11106 gen_logic_CC(tmp2);
99c475ab 11107 val = 1;
5899f386 11108 rm = rd;
99c475ab
FB
11109 break;
11110 }
11111 if (rd != 16) {
396e467c
FN
11112 if (val) {
11113 store_reg(s, rm, tmp2);
11114 if (op != 0xf)
7d1b0095 11115 tcg_temp_free_i32(tmp);
396e467c
FN
11116 } else {
11117 store_reg(s, rd, tmp);
7d1b0095 11118 tcg_temp_free_i32(tmp2);
396e467c
FN
11119 }
11120 } else {
7d1b0095
PM
11121 tcg_temp_free_i32(tmp);
11122 tcg_temp_free_i32(tmp2);
99c475ab
FB
11123 }
11124 break;
11125
11126 case 5:
11127 /* load/store register offset. */
11128 rd = insn & 7;
11129 rn = (insn >> 3) & 7;
11130 rm = (insn >> 6) & 7;
11131 op = (insn >> 9) & 7;
b0109805 11132 addr = load_reg(s, rn);
b26eefb6 11133 tmp = load_reg(s, rm);
b0109805 11134 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11135 tcg_temp_free_i32(tmp);
99c475ab 11136
c40c8556 11137 if (op < 3) { /* store */
b0109805 11138 tmp = load_reg(s, rd);
c40c8556
PM
11139 } else {
11140 tmp = tcg_temp_new_i32();
11141 }
99c475ab
FB
11142
11143 switch (op) {
11144 case 0: /* str */
12dcc321 11145 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11146 break;
11147 case 1: /* strh */
12dcc321 11148 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11149 break;
11150 case 2: /* strb */
12dcc321 11151 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11152 break;
11153 case 3: /* ldrsb */
12dcc321 11154 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11155 break;
11156 case 4: /* ldr */
12dcc321 11157 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11158 break;
11159 case 5: /* ldrh */
12dcc321 11160 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11161 break;
11162 case 6: /* ldrb */
12dcc321 11163 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11164 break;
11165 case 7: /* ldrsh */
12dcc321 11166 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11167 break;
11168 }
c40c8556 11169 if (op >= 3) { /* load */
b0109805 11170 store_reg(s, rd, tmp);
c40c8556
PM
11171 } else {
11172 tcg_temp_free_i32(tmp);
11173 }
7d1b0095 11174 tcg_temp_free_i32(addr);
99c475ab
FB
11175 break;
11176
11177 case 6:
11178 /* load/store word immediate offset */
11179 rd = insn & 7;
11180 rn = (insn >> 3) & 7;
b0109805 11181 addr = load_reg(s, rn);
99c475ab 11182 val = (insn >> 4) & 0x7c;
b0109805 11183 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11184
11185 if (insn & (1 << 11)) {
11186 /* load */
c40c8556 11187 tmp = tcg_temp_new_i32();
12dcc321 11188 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11189 store_reg(s, rd, tmp);
99c475ab
FB
11190 } else {
11191 /* store */
b0109805 11192 tmp = load_reg(s, rd);
12dcc321 11193 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11194 tcg_temp_free_i32(tmp);
99c475ab 11195 }
7d1b0095 11196 tcg_temp_free_i32(addr);
99c475ab
FB
11197 break;
11198
11199 case 7:
11200 /* load/store byte immediate offset */
11201 rd = insn & 7;
11202 rn = (insn >> 3) & 7;
b0109805 11203 addr = load_reg(s, rn);
99c475ab 11204 val = (insn >> 6) & 0x1f;
b0109805 11205 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11206
11207 if (insn & (1 << 11)) {
11208 /* load */
c40c8556 11209 tmp = tcg_temp_new_i32();
12dcc321 11210 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
b0109805 11211 store_reg(s, rd, tmp);
99c475ab
FB
11212 } else {
11213 /* store */
b0109805 11214 tmp = load_reg(s, rd);
12dcc321 11215 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
c40c8556 11216 tcg_temp_free_i32(tmp);
99c475ab 11217 }
7d1b0095 11218 tcg_temp_free_i32(addr);
99c475ab
FB
11219 break;
11220
11221 case 8:
11222 /* load/store halfword immediate offset */
11223 rd = insn & 7;
11224 rn = (insn >> 3) & 7;
b0109805 11225 addr = load_reg(s, rn);
99c475ab 11226 val = (insn >> 5) & 0x3e;
b0109805 11227 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11228
11229 if (insn & (1 << 11)) {
11230 /* load */
c40c8556 11231 tmp = tcg_temp_new_i32();
12dcc321 11232 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
b0109805 11233 store_reg(s, rd, tmp);
99c475ab
FB
11234 } else {
11235 /* store */
b0109805 11236 tmp = load_reg(s, rd);
12dcc321 11237 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
c40c8556 11238 tcg_temp_free_i32(tmp);
99c475ab 11239 }
7d1b0095 11240 tcg_temp_free_i32(addr);
99c475ab
FB
11241 break;
11242
11243 case 9:
11244 /* load/store from stack */
11245 rd = (insn >> 8) & 7;
b0109805 11246 addr = load_reg(s, 13);
99c475ab 11247 val = (insn & 0xff) * 4;
b0109805 11248 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11249
11250 if (insn & (1 << 11)) {
11251 /* load */
c40c8556 11252 tmp = tcg_temp_new_i32();
12dcc321 11253 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11254 store_reg(s, rd, tmp);
99c475ab
FB
11255 } else {
11256 /* store */
b0109805 11257 tmp = load_reg(s, rd);
12dcc321 11258 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11259 tcg_temp_free_i32(tmp);
99c475ab 11260 }
7d1b0095 11261 tcg_temp_free_i32(addr);
99c475ab
FB
11262 break;
11263
11264 case 10:
11265 /* add to high reg */
11266 rd = (insn >> 8) & 7;
5899f386
FB
11267 if (insn & (1 << 11)) {
11268 /* SP */
5e3f878a 11269 tmp = load_reg(s, 13);
5899f386
FB
11270 } else {
11271 /* PC. bit 1 is ignored. */
7d1b0095 11272 tmp = tcg_temp_new_i32();
5e3f878a 11273 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11274 }
99c475ab 11275 val = (insn & 0xff) * 4;
5e3f878a
PB
11276 tcg_gen_addi_i32(tmp, tmp, val);
11277 store_reg(s, rd, tmp);
99c475ab
FB
11278 break;
11279
11280 case 11:
11281 /* misc */
11282 op = (insn >> 8) & 0xf;
11283 switch (op) {
11284 case 0:
11285 /* adjust stack pointer */
b26eefb6 11286 tmp = load_reg(s, 13);
99c475ab
FB
11287 val = (insn & 0x7f) * 4;
11288 if (insn & (1 << 7))
6a0d8a1d 11289 val = -(int32_t)val;
b26eefb6
PB
11290 tcg_gen_addi_i32(tmp, tmp, val);
11291 store_reg(s, 13, tmp);
99c475ab
FB
11292 break;
11293
9ee6e8bb
PB
11294 case 2: /* sign/zero extend. */
11295 ARCH(6);
11296 rd = insn & 7;
11297 rm = (insn >> 3) & 7;
b0109805 11298 tmp = load_reg(s, rm);
9ee6e8bb 11299 switch ((insn >> 6) & 3) {
b0109805
PB
11300 case 0: gen_sxth(tmp); break;
11301 case 1: gen_sxtb(tmp); break;
11302 case 2: gen_uxth(tmp); break;
11303 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11304 }
b0109805 11305 store_reg(s, rd, tmp);
9ee6e8bb 11306 break;
99c475ab
FB
11307 case 4: case 5: case 0xc: case 0xd:
11308 /* push/pop */
b0109805 11309 addr = load_reg(s, 13);
5899f386
FB
11310 if (insn & (1 << 8))
11311 offset = 4;
99c475ab 11312 else
5899f386
FB
11313 offset = 0;
11314 for (i = 0; i < 8; i++) {
11315 if (insn & (1 << i))
11316 offset += 4;
11317 }
11318 if ((insn & (1 << 11)) == 0) {
b0109805 11319 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11320 }
99c475ab
FB
11321 for (i = 0; i < 8; i++) {
11322 if (insn & (1 << i)) {
11323 if (insn & (1 << 11)) {
11324 /* pop */
c40c8556 11325 tmp = tcg_temp_new_i32();
12dcc321 11326 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11327 store_reg(s, i, tmp);
99c475ab
FB
11328 } else {
11329 /* push */
b0109805 11330 tmp = load_reg(s, i);
12dcc321 11331 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11332 tcg_temp_free_i32(tmp);
99c475ab 11333 }
5899f386 11334 /* advance to the next address. */
b0109805 11335 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11336 }
11337 }
39d5492a 11338 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11339 if (insn & (1 << 8)) {
11340 if (insn & (1 << 11)) {
11341 /* pop pc */
c40c8556 11342 tmp = tcg_temp_new_i32();
12dcc321 11343 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11344 /* don't set the pc until the rest of the instruction
11345 has completed */
11346 } else {
11347 /* push lr */
b0109805 11348 tmp = load_reg(s, 14);
12dcc321 11349 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11350 tcg_temp_free_i32(tmp);
99c475ab 11351 }
b0109805 11352 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11353 }
5899f386 11354 if ((insn & (1 << 11)) == 0) {
b0109805 11355 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11356 }
99c475ab 11357 /* write back the new stack pointer */
b0109805 11358 store_reg(s, 13, addr);
99c475ab 11359 /* set the new PC value */
be5e7a76 11360 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11361 store_reg_from_load(s, 15, tmp);
be5e7a76 11362 }
99c475ab
FB
11363 break;
11364
9ee6e8bb
PB
11365 case 1: case 3: case 9: case 11: /* czb */
11366 rm = insn & 7;
d9ba4830 11367 tmp = load_reg(s, rm);
9ee6e8bb
PB
11368 s->condlabel = gen_new_label();
11369 s->condjmp = 1;
11370 if (insn & (1 << 11))
cb63669a 11371 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11372 else
cb63669a 11373 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11374 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11375 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11376 val = (uint32_t)s->pc + 2;
11377 val += offset;
11378 gen_jmp(s, val);
11379 break;
11380
11381 case 15: /* IT, nop-hint. */
11382 if ((insn & 0xf) == 0) {
11383 gen_nop_hint(s, (insn >> 4) & 0xf);
11384 break;
11385 }
11386 /* If Then. */
11387 s->condexec_cond = (insn >> 4) & 0xe;
11388 s->condexec_mask = insn & 0x1f;
11389 /* No actual code generated for this insn, just setup state. */
11390 break;
11391
06c949e6 11392 case 0xe: /* bkpt */
d4a2dc67
PM
11393 {
11394 int imm8 = extract32(insn, 0, 8);
be5e7a76 11395 ARCH(5);
73710361
GB
11396 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11397 default_exception_el(s));
06c949e6 11398 break;
d4a2dc67 11399 }
06c949e6 11400
9ee6e8bb
PB
11401 case 0xa: /* rev */
11402 ARCH(6);
11403 rn = (insn >> 3) & 0x7;
11404 rd = insn & 0x7;
b0109805 11405 tmp = load_reg(s, rn);
9ee6e8bb 11406 switch ((insn >> 6) & 3) {
66896cb8 11407 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11408 case 1: gen_rev16(tmp); break;
11409 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
11410 default: goto illegal_op;
11411 }
b0109805 11412 store_reg(s, rd, tmp);
9ee6e8bb
PB
11413 break;
11414
d9e028c1
PM
11415 case 6:
11416 switch ((insn >> 5) & 7) {
11417 case 2:
11418 /* setend */
11419 ARCH(6);
9886ecdf
PB
11420 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11421 gen_helper_setend(cpu_env);
11422 s->is_jmp = DISAS_UPDATE;
d9e028c1 11423 }
9ee6e8bb 11424 break;
d9e028c1
PM
11425 case 3:
11426 /* cps */
11427 ARCH(6);
11428 if (IS_USER(s)) {
11429 break;
8984bd2e 11430 }
b53d8923 11431 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11432 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11433 /* FAULTMASK */
11434 if (insn & 1) {
11435 addr = tcg_const_i32(19);
11436 gen_helper_v7m_msr(cpu_env, addr, tmp);
11437 tcg_temp_free_i32(addr);
11438 }
11439 /* PRIMASK */
11440 if (insn & 2) {
11441 addr = tcg_const_i32(16);
11442 gen_helper_v7m_msr(cpu_env, addr, tmp);
11443 tcg_temp_free_i32(addr);
11444 }
11445 tcg_temp_free_i32(tmp);
11446 gen_lookup_tb(s);
11447 } else {
11448 if (insn & (1 << 4)) {
11449 shift = CPSR_A | CPSR_I | CPSR_F;
11450 } else {
11451 shift = 0;
11452 }
11453 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11454 }
d9e028c1
PM
11455 break;
11456 default:
11457 goto undef;
9ee6e8bb
PB
11458 }
11459 break;
11460
99c475ab
FB
11461 default:
11462 goto undef;
11463 }
11464 break;
11465
11466 case 12:
a7d3970d 11467 {
99c475ab 11468 /* load/store multiple */
39d5492a
PM
11469 TCGv_i32 loaded_var;
11470 TCGV_UNUSED_I32(loaded_var);
99c475ab 11471 rn = (insn >> 8) & 0x7;
b0109805 11472 addr = load_reg(s, rn);
99c475ab
FB
11473 for (i = 0; i < 8; i++) {
11474 if (insn & (1 << i)) {
99c475ab
FB
11475 if (insn & (1 << 11)) {
11476 /* load */
c40c8556 11477 tmp = tcg_temp_new_i32();
12dcc321 11478 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11479 if (i == rn) {
11480 loaded_var = tmp;
11481 } else {
11482 store_reg(s, i, tmp);
11483 }
99c475ab
FB
11484 } else {
11485 /* store */
b0109805 11486 tmp = load_reg(s, i);
12dcc321 11487 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11488 tcg_temp_free_i32(tmp);
99c475ab 11489 }
5899f386 11490 /* advance to the next address */
b0109805 11491 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11492 }
11493 }
b0109805 11494 if ((insn & (1 << rn)) == 0) {
a7d3970d 11495 /* base reg not in list: base register writeback */
b0109805
PB
11496 store_reg(s, rn, addr);
11497 } else {
a7d3970d
PM
11498 /* base reg in list: if load, complete it now */
11499 if (insn & (1 << 11)) {
11500 store_reg(s, rn, loaded_var);
11501 }
7d1b0095 11502 tcg_temp_free_i32(addr);
b0109805 11503 }
99c475ab 11504 break;
a7d3970d 11505 }
99c475ab
FB
11506 case 13:
11507 /* conditional branch or swi */
11508 cond = (insn >> 8) & 0xf;
11509 if (cond == 0xe)
11510 goto undef;
11511
11512 if (cond == 0xf) {
11513 /* swi */
eaed129d 11514 gen_set_pc_im(s, s->pc);
d4a2dc67 11515 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11516 s->is_jmp = DISAS_SWI;
99c475ab
FB
11517 break;
11518 }
11519 /* generate a conditional jump to next instruction */
e50e6a20 11520 s->condlabel = gen_new_label();
39fb730a 11521 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11522 s->condjmp = 1;
99c475ab
FB
11523
11524 /* jump to the offset */
5899f386 11525 val = (uint32_t)s->pc + 2;
99c475ab 11526 offset = ((int32_t)insn << 24) >> 24;
5899f386 11527 val += offset << 1;
8aaca4c0 11528 gen_jmp(s, val);
99c475ab
FB
11529 break;
11530
11531 case 14:
358bf29e 11532 if (insn & (1 << 11)) {
9ee6e8bb
PB
11533 if (disas_thumb2_insn(env, s, insn))
11534 goto undef32;
358bf29e
PB
11535 break;
11536 }
9ee6e8bb 11537 /* unconditional branch */
99c475ab
FB
11538 val = (uint32_t)s->pc;
11539 offset = ((int32_t)insn << 21) >> 21;
11540 val += (offset << 1) + 2;
8aaca4c0 11541 gen_jmp(s, val);
99c475ab
FB
11542 break;
11543
11544 case 15:
9ee6e8bb 11545 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11546 goto undef32;
9ee6e8bb 11547 break;
99c475ab
FB
11548 }
11549 return;
9ee6e8bb 11550undef32:
73710361
GB
11551 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11552 default_exception_el(s));
9ee6e8bb
PB
11553 return;
11554illegal_op:
99c475ab 11555undef:
73710361
GB
11556 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11557 default_exception_el(s));
99c475ab
FB
11558}
11559
541ebcd4
PM
11560static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11561{
11562 /* Return true if the insn at dc->pc might cross a page boundary.
11563 * (False positives are OK, false negatives are not.)
11564 */
11565 uint16_t insn;
11566
11567 if ((s->pc & 3) == 0) {
11568 /* At a 4-aligned address we can't be crossing a page */
11569 return false;
11570 }
11571
11572 /* This must be a Thumb insn */
f9fd40eb 11573 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11574
11575 if ((insn >> 11) >= 0x1d) {
11576 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11577 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11578 * end up actually treating this as two 16-bit insns (see the
11579 * code at the start of disas_thumb2_insn()) but we don't bother
11580 * to check for that as it is unlikely, and false positives here
11581 * are harmless.
11582 */
11583 return true;
11584 }
11585 /* Definitely a 16-bit insn, can't be crossing a page. */
11586 return false;
11587}
11588
20157705 11589/* generate intermediate code for basic block 'tb'. */
4e5e1215 11590void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11591{
4e5e1215 11592 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11593 CPUState *cs = CPU(cpu);
2c0262af 11594 DisasContext dc1, *dc = &dc1;
0fa85d43 11595 target_ulong pc_start;
0a2461fa 11596 target_ulong next_page_start;
2e70f6ef
PB
11597 int num_insns;
11598 int max_insns;
541ebcd4 11599 bool end_of_page;
3b46e624 11600
2c0262af 11601 /* generate intermediate code */
40f860cd
PM
11602
11603 /* The A64 decoder has its own top level loop, because it doesn't need
11604 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11605 */
11606 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11607 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11608 return;
11609 }
11610
0fa85d43 11611 pc_start = tb->pc;
3b46e624 11612
2c0262af
FB
11613 dc->tb = tb;
11614
2c0262af
FB
11615 dc->is_jmp = DISAS_NEXT;
11616 dc->pc = pc_start;
ed2803da 11617 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11618 dc->condjmp = 0;
3926cc84 11619
40f860cd 11620 dc->aarch64 = 0;
cef9ee70
SS
11621 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11622 * there is no secure EL1, so we route exceptions to EL3.
11623 */
11624 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11625 !arm_el_is_aa64(env, 3);
40f860cd 11626 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11627 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11628 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11629 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11630 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11631 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11632 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11633#if !defined(CONFIG_USER_ONLY)
c1e37810 11634 dc->user = (dc->current_el == 0);
3926cc84 11635#endif
3f342b9e 11636 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11637 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11638 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11639 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11640 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11641 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11642 dc->cp_regs = cpu->cp_regs;
a984e42c 11643 dc->features = env->features;
40f860cd 11644
50225ad0
PM
11645 /* Single step state. The code-generation logic here is:
11646 * SS_ACTIVE == 0:
11647 * generate code with no special handling for single-stepping (except
11648 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11649 * this happens anyway because those changes are all system register or
11650 * PSTATE writes).
11651 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11652 * emit code for one insn
11653 * emit code to clear PSTATE.SS
11654 * emit code to generate software step exception for completed step
11655 * end TB (as usual for having generated an exception)
11656 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11657 * emit code to generate a software step exception
11658 * end the TB
11659 */
11660 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11661 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11662 dc->is_ldex = false;
11663 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11664
a7812ae4
PB
11665 cpu_F0s = tcg_temp_new_i32();
11666 cpu_F1s = tcg_temp_new_i32();
11667 cpu_F0d = tcg_temp_new_i64();
11668 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11669 cpu_V0 = cpu_F0d;
11670 cpu_V1 = cpu_F1d;
e677137d 11671 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11672 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11673 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11674 num_insns = 0;
11675 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11676 if (max_insns == 0) {
2e70f6ef 11677 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11678 }
11679 if (max_insns > TCG_MAX_INSNS) {
11680 max_insns = TCG_MAX_INSNS;
11681 }
2e70f6ef 11682
cd42d5b2 11683 gen_tb_start(tb);
e12ce78d 11684
3849902c
PM
11685 tcg_clear_temp_count();
11686
e12ce78d
PM
11687 /* A note on handling of the condexec (IT) bits:
11688 *
11689 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11690 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11691 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11692 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11693 * to do it at the end of the block. (For example if we don't do this
11694 * it's hard to identify whether we can safely skip writing condexec
11695 * at the end of the TB, which we definitely want to do for the case
11696 * where a TB doesn't do anything with the IT state at all.)
11697 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11698 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11699 * This is done both for leaving the TB at the end, and for leaving
11700 * it because of an exception we know will happen, which is done in
11701 * gen_exception_insn(). The latter is necessary because we need to
11702 * leave the TB with the PC/IT state just prior to execution of the
11703 * instruction which caused the exception.
11704 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11705 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11706 * This is handled in the same way as restoration of the
4e5e1215
RH
11707 * PC in these situations; we save the value of the condexec bits
11708 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11709 * then uses this to restore them after an exception.
e12ce78d
PM
11710 *
11711 * Note that there are no instructions which can read the condexec
11712 * bits, and none which can write non-static values to them, so
0ecb72a5 11713 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11714 * middle of a TB.
11715 */
11716
9ee6e8bb
PB
11717 /* Reset the conditional execution bits immediately. This avoids
11718 complications trying to do it at the end of the block. */
98eac7ca 11719 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11720 {
39d5492a 11721 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11722 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11723 store_cpu_field(tmp, condexec_bits);
8f01245e 11724 }
2c0262af 11725 do {
52e971d9
RH
11726 tcg_gen_insn_start(dc->pc,
11727 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
b933066a
RH
11728 num_insns++;
11729
fbb4a2e3
PB
11730#ifdef CONFIG_USER_ONLY
11731 /* Intercept jump to the magic kernel page. */
40f860cd 11732 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11733 /* We always get here via a jump, so know we are not in a
11734 conditional execution block. */
d4a2dc67 11735 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11736 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11737 break;
11738 }
11739#else
b53d8923 11740 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11741 /* We always get here via a jump, so know we are not in a
11742 conditional execution block. */
d4a2dc67 11743 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11744 dc->is_jmp = DISAS_EXC;
d60bb01c 11745 break;
9ee6e8bb
PB
11746 }
11747#endif
11748
f0c3c505 11749 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11750 CPUBreakpoint *bp;
f0c3c505 11751 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11752 if (bp->pc == dc->pc) {
5d98bf8f 11753 if (bp->flags & BP_CPU) {
ce8a1b54 11754 gen_set_condexec(dc);
ed6c6448 11755 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11756 gen_helper_check_breakpoints(cpu_env);
11757 /* End the TB early; it's likely not going to be executed */
11758 dc->is_jmp = DISAS_UPDATE;
11759 } else {
11760 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11761 /* The address covered by the breakpoint must be
11762 included in [tb->pc, tb->pc + tb->size) in order
11763 to for it to be properly cleared -- thus we
11764 increment the PC here so that the logic setting
11765 tb->size below does the right thing. */
5d98bf8f
SF
11766 /* TODO: Advance PC by correct instruction length to
11767 * avoid disassembler error messages */
11768 dc->pc += 2;
11769 goto done_generating;
11770 }
11771 break;
1fddef4b
FB
11772 }
11773 }
11774 }
e50e6a20 11775
959082fc 11776 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11777 gen_io_start();
959082fc 11778 }
2e70f6ef 11779
50225ad0
PM
11780 if (dc->ss_active && !dc->pstate_ss) {
11781 /* Singlestep state is Active-pending.
11782 * If we're in this state at the start of a TB then either
11783 * a) we just took an exception to an EL which is being debugged
11784 * and this is the first insn in the exception handler
11785 * b) debug exceptions were masked and we just unmasked them
11786 * without changing EL (eg by clearing PSTATE.D)
11787 * In either case we're going to take a swstep exception in the
11788 * "did not step an insn" case, and so the syndrome ISV and EX
11789 * bits should be zero.
11790 */
959082fc 11791 assert(num_insns == 1);
73710361
GB
11792 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11793 default_exception_el(dc));
50225ad0
PM
11794 goto done_generating;
11795 }
11796
40f860cd 11797 if (dc->thumb) {
9ee6e8bb
PB
11798 disas_thumb_insn(env, dc);
11799 if (dc->condexec_mask) {
11800 dc->condexec_cond = (dc->condexec_cond & 0xe)
11801 | ((dc->condexec_mask >> 4) & 1);
11802 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11803 if (dc->condexec_mask == 0) {
11804 dc->condexec_cond = 0;
11805 }
11806 }
11807 } else {
f9fd40eb 11808 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11809 dc->pc += 4;
11810 disas_arm_insn(dc, insn);
9ee6e8bb 11811 }
e50e6a20
FB
11812
11813 if (dc->condjmp && !dc->is_jmp) {
11814 gen_set_label(dc->condlabel);
11815 dc->condjmp = 0;
11816 }
3849902c
PM
11817
11818 if (tcg_check_temp_count()) {
0a2461fa
AG
11819 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11820 dc->pc);
3849902c
PM
11821 }
11822
aaf2d97d 11823 /* Translation stops when a conditional branch is encountered.
e50e6a20 11824 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11825 * Also stop translation when a page boundary is reached. This
bf20dc07 11826 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11827
11828 /* We want to stop the TB if the next insn starts in a new page,
11829 * or if it spans between this page and the next. This means that
11830 * if we're looking at the last halfword in the page we need to
11831 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11832 * or a 32-bit Thumb insn (which won't).
11833 * This is to avoid generating a silly TB with a single 16-bit insn
11834 * in it at the end of this page (which would execute correctly
11835 * but isn't very efficient).
11836 */
11837 end_of_page = (dc->pc >= next_page_start) ||
11838 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11839
fe700adb 11840 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11841 !cs->singlestep_enabled &&
1b530a6d 11842 !singlestep &&
50225ad0 11843 !dc->ss_active &&
541ebcd4 11844 !end_of_page &&
2e70f6ef
PB
11845 num_insns < max_insns);
11846
11847 if (tb->cflags & CF_LAST_IO) {
11848 if (dc->condjmp) {
11849 /* FIXME: This can theoretically happen with self-modifying
11850 code. */
a47dddd7 11851 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11852 }
11853 gen_io_end();
11854 }
9ee6e8bb 11855
b5ff1b31 11856 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11857 instruction was a conditional branch or trap, and the PC has
11858 already been written. */
50225ad0 11859 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11860 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11861 gen_set_condexec(dc);
7999a5c8
SF
11862 switch (dc->is_jmp) {
11863 case DISAS_SWI:
50225ad0 11864 gen_ss_advance(dc);
73710361
GB
11865 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11866 default_exception_el(dc));
7999a5c8
SF
11867 break;
11868 case DISAS_HVC:
37e6456e 11869 gen_ss_advance(dc);
73710361 11870 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11871 break;
11872 case DISAS_SMC:
37e6456e 11873 gen_ss_advance(dc);
73710361 11874 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11875 break;
11876 case DISAS_NEXT:
11877 case DISAS_UPDATE:
11878 gen_set_pc_im(dc, dc->pc);
11879 /* fall through */
11880 default:
11881 if (dc->ss_active) {
11882 gen_step_complete_exception(dc);
11883 } else {
11884 /* FIXME: Single stepping a WFI insn will not halt
11885 the CPU. */
11886 gen_exception_internal(EXCP_DEBUG);
11887 }
11888 }
11889 if (dc->condjmp) {
11890 /* "Condition failed" instruction codepath. */
11891 gen_set_label(dc->condlabel);
11892 gen_set_condexec(dc);
11893 gen_set_pc_im(dc, dc->pc);
11894 if (dc->ss_active) {
11895 gen_step_complete_exception(dc);
11896 } else {
11897 gen_exception_internal(EXCP_DEBUG);
11898 }
9ee6e8bb 11899 }
8aaca4c0 11900 } else {
9ee6e8bb
PB
11901 /* While branches must always occur at the end of an IT block,
11902 there are a few other things that can cause us to terminate
65626741 11903 the TB in the middle of an IT block:
9ee6e8bb
PB
11904 - Exception generating instructions (bkpt, swi, undefined).
11905 - Page boundaries.
11906 - Hardware watchpoints.
11907 Hardware breakpoints have already been handled and skip this code.
11908 */
11909 gen_set_condexec(dc);
8aaca4c0 11910 switch(dc->is_jmp) {
8aaca4c0 11911 case DISAS_NEXT:
6e256c93 11912 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11913 break;
8aaca4c0 11914 case DISAS_UPDATE:
577bf808
SF
11915 gen_set_pc_im(dc, dc->pc);
11916 /* fall through */
11917 case DISAS_JUMP:
11918 default:
8aaca4c0 11919 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11920 tcg_gen_exit_tb(0);
8aaca4c0
FB
11921 break;
11922 case DISAS_TB_JUMP:
11923 /* nothing more to generate */
11924 break;
9ee6e8bb 11925 case DISAS_WFI:
1ce94f81 11926 gen_helper_wfi(cpu_env);
84549b6d
PM
11927 /* The helper doesn't necessarily throw an exception, but we
11928 * must go back to the main loop to check for interrupts anyway.
11929 */
11930 tcg_gen_exit_tb(0);
9ee6e8bb 11931 break;
72c1d3af
PM
11932 case DISAS_WFE:
11933 gen_helper_wfe(cpu_env);
11934 break;
c87e5a61
PM
11935 case DISAS_YIELD:
11936 gen_helper_yield(cpu_env);
11937 break;
9ee6e8bb 11938 case DISAS_SWI:
73710361
GB
11939 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11940 default_exception_el(dc));
9ee6e8bb 11941 break;
37e6456e 11942 case DISAS_HVC:
73710361 11943 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11944 break;
11945 case DISAS_SMC:
73710361 11946 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 11947 break;
8aaca4c0 11948 }
e50e6a20
FB
11949 if (dc->condjmp) {
11950 gen_set_label(dc->condlabel);
9ee6e8bb 11951 gen_set_condexec(dc);
6e256c93 11952 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11953 dc->condjmp = 0;
11954 }
2c0262af 11955 }
2e70f6ef 11956
9ee6e8bb 11957done_generating:
806f352d 11958 gen_tb_end(tb, num_insns);
2c0262af
FB
11959
11960#ifdef DEBUG_DISAS
8fec2b8c 11961 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11962 qemu_log("----------------\n");
11963 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 11964 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 11965 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 11966 qemu_log("\n");
2c0262af
FB
11967 }
11968#endif
4e5e1215
RH
11969 tb->size = dc->pc - pc_start;
11970 tb->icount = num_insns;
2c0262af
FB
11971}
11972
b5ff1b31 11973static const char *cpu_mode_names[16] = {
28c9457d
EI
11974 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11975 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11976};
9ee6e8bb 11977
878096ee
AF
11978void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11979 int flags)
2c0262af 11980{
878096ee
AF
11981 ARMCPU *cpu = ARM_CPU(cs);
11982 CPUARMState *env = &cpu->env;
2c0262af 11983 int i;
b5ff1b31 11984 uint32_t psr;
06e5cf7a 11985 const char *ns_status;
2c0262af 11986
17731115
PM
11987 if (is_a64(env)) {
11988 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11989 return;
11990 }
11991
2c0262af 11992 for(i=0;i<16;i++) {
7fe48483 11993 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11994 if ((i % 4) == 3)
7fe48483 11995 cpu_fprintf(f, "\n");
2c0262af 11996 else
7fe48483 11997 cpu_fprintf(f, " ");
2c0262af 11998 }
b5ff1b31 11999 psr = cpsr_read(env);
06e5cf7a
PM
12000
12001 if (arm_feature(env, ARM_FEATURE_EL3) &&
12002 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12003 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12004 } else {
12005 ns_status = "";
12006 }
12007
12008 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12009 psr,
b5ff1b31
FB
12010 psr & (1 << 31) ? 'N' : '-',
12011 psr & (1 << 30) ? 'Z' : '-',
12012 psr & (1 << 29) ? 'C' : '-',
12013 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12014 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12015 ns_status,
b5ff1b31 12016 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12017
f2617cfc
PM
12018 if (flags & CPU_DUMP_FPU) {
12019 int numvfpregs = 0;
12020 if (arm_feature(env, ARM_FEATURE_VFP)) {
12021 numvfpregs += 16;
12022 }
12023 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12024 numvfpregs += 16;
12025 }
12026 for (i = 0; i < numvfpregs; i++) {
12027 uint64_t v = float64_val(env->vfp.regs[i]);
12028 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12029 i * 2, (uint32_t)v,
12030 i * 2 + 1, (uint32_t)(v >> 32),
12031 i, v);
12032 }
12033 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12034 }
2c0262af 12035}
a6b025d3 12036
bad729e2
RH
12037void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12038 target_ulong *data)
d2856f1a 12039{
3926cc84 12040 if (is_a64(env)) {
bad729e2 12041 env->pc = data[0];
40f860cd 12042 env->condexec_bits = 0;
3926cc84 12043 } else {
bad729e2
RH
12044 env->regs[15] = data[0];
12045 env->condexec_bits = data[1];
3926cc84 12046 }
d2856f1a 12047}