]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
exec: extract exec/tb-context.h
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
57fec1fe 26#include "tcg-op.h"
1de7afc9 27#include "qemu/log.h"
534df156 28#include "qemu/bitops.h"
1d854765 29#include "arm_ldst.h"
1497c961 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
2c0262af 33
a7e30d84 34#include "trace-tcg.h"
508127e2 35#include "exec/log.h"
a7e30d84
LV
36
37
2b51668f
PM
38#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 40/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 41#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 42#define ENABLE_ARCH_5J 0
2b51668f
PM
43#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
47#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d 52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
1bcea73e 59TCGv_env cpu_env;
ad69471c 60/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 61static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 62static TCGv_i32 cpu_R[16];
78bcaa3e
RH
63TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
64TCGv_i64 cpu_exclusive_addr;
65TCGv_i64 cpu_exclusive_val;
426f5abc 66#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
67TCGv_i64 cpu_exclusive_test;
68TCGv_i32 cpu_exclusive_info;
426f5abc 69#endif
ad69471c 70
b26eefb6 71/* FIXME: These should be removed. */
39d5492a 72static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 73static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 74
022c62cb 75#include "exec/gen-icount.h"
2e70f6ef 76
155c3eac
FN
77static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
a7812ae4
PB
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 102#ifdef CONFIG_USER_ONLY
e1ccc054 103 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
e1ccc054 105 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 107#endif
155c3eac 108
14ade10f 109 a64_translate_init();
b26eefb6
PB
110}
111
579d21cc
PM
112static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
113{
114 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
115 * insns:
116 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
117 * otherwise, access as if at PL0.
118 */
119 switch (s->mmu_idx) {
120 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
121 case ARMMMUIdx_S12NSE0:
122 case ARMMMUIdx_S12NSE1:
123 return ARMMMUIdx_S12NSE0;
124 case ARMMMUIdx_S1E3:
125 case ARMMMUIdx_S1SE0:
126 case ARMMMUIdx_S1SE1:
127 return ARMMMUIdx_S1SE0;
128 case ARMMMUIdx_S2NS:
129 default:
130 g_assert_not_reached();
131 }
132}
133
39d5492a 134static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 135{
39d5492a 136 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
0ecb72a5 141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 142
39d5492a 143static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
0ecb72a5 150 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 151
b26eefb6 152/* Set a variable to the value of a CPU register. */
39d5492a 153static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
154{
155 if (reg == 15) {
156 uint32_t addr;
b90372ad 157 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 169static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 170{
39d5492a 171 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
39d5492a 178static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
39d5492a 198static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 199{
39d5492a 200 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 201 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
d4a2dc67 207static void gen_exception_internal(int excp)
d9ba4830 208{
d4a2dc67
PM
209 TCGv_i32 tcg_excp = tcg_const_i32(excp);
210
211 assert(excp_is_internal(excp));
212 gen_helper_exception_internal(cpu_env, tcg_excp);
213 tcg_temp_free_i32(tcg_excp);
214}
215
73710361 216static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
217{
218 TCGv_i32 tcg_excp = tcg_const_i32(excp);
219 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 220 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 221
73710361
GB
222 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
223 tcg_syn, tcg_el);
224
225 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
228}
229
50225ad0
PM
230static void gen_ss_advance(DisasContext *s)
231{
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
234 */
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
238 }
239}
240
241static void gen_step_complete_exception(DisasContext *s)
242{
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
251 */
252 gen_ss_advance(s);
73710361
GB
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
254 default_exception_el(s));
50225ad0
PM
255 s->is_jmp = DISAS_EXC;
256}
257
39d5492a 258static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 259{
39d5492a
PM
260 TCGv_i32 tmp1 = tcg_temp_new_i32();
261 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
262 tcg_gen_ext16s_i32(tmp1, a);
263 tcg_gen_ext16s_i32(tmp2, b);
3670669c 264 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 265 tcg_temp_free_i32(tmp2);
3670669c
PB
266 tcg_gen_sari_i32(a, a, 16);
267 tcg_gen_sari_i32(b, b, 16);
268 tcg_gen_mul_i32(b, b, a);
269 tcg_gen_mov_i32(a, tmp1);
7d1b0095 270 tcg_temp_free_i32(tmp1);
3670669c
PB
271}
272
273/* Byteswap each halfword. */
39d5492a 274static void gen_rev16(TCGv_i32 var)
3670669c 275{
39d5492a 276 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_andi_i32(var, var, 0xff00ff00);
281 tcg_gen_or_i32(var, var, tmp);
7d1b0095 282 tcg_temp_free_i32(tmp);
3670669c
PB
283}
284
285/* Byteswap low halfword and sign extend. */
39d5492a 286static void gen_revsh(TCGv_i32 var)
3670669c 287{
1a855029
AJ
288 tcg_gen_ext16u_i32(var, var);
289 tcg_gen_bswap16_i32(var, var);
290 tcg_gen_ext16s_i32(var, var);
3670669c
PB
291}
292
293/* Unsigned bitfield extract. */
39d5492a 294static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
295{
296 if (shift)
297 tcg_gen_shri_i32(var, var, shift);
298 tcg_gen_andi_i32(var, var, mask);
299}
300
301/* Signed bitfield extract. */
39d5492a 302static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
303{
304 uint32_t signbit;
305
306 if (shift)
307 tcg_gen_sari_i32(var, var, shift);
308 if (shift + width < 32) {
309 signbit = 1u << (width - 1);
310 tcg_gen_andi_i32(var, var, (1u << width) - 1);
311 tcg_gen_xori_i32(var, var, signbit);
312 tcg_gen_subi_i32(var, var, signbit);
313 }
314}
315
838fa72d 316/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 317static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 318{
838fa72d
AJ
319 TCGv_i64 tmp64 = tcg_temp_new_i64();
320
321 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 322 tcg_temp_free_i32(b);
838fa72d
AJ
323 tcg_gen_shli_i64(tmp64, tmp64, 32);
324 tcg_gen_add_i64(a, tmp64, a);
325
326 tcg_temp_free_i64(tmp64);
327 return a;
328}
329
330/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 331static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
332{
333 TCGv_i64 tmp64 = tcg_temp_new_i64();
334
335 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 336 tcg_temp_free_i32(b);
838fa72d
AJ
337 tcg_gen_shli_i64(tmp64, tmp64, 32);
338 tcg_gen_sub_i64(a, tmp64, a);
339
340 tcg_temp_free_i64(tmp64);
341 return a;
3670669c
PB
342}
343
5e3f878a 344/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 345static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 346{
39d5492a
PM
347 TCGv_i32 lo = tcg_temp_new_i32();
348 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 349 TCGv_i64 ret;
5e3f878a 350
831d7fe8 351 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 352 tcg_temp_free_i32(a);
7d1b0095 353 tcg_temp_free_i32(b);
831d7fe8
RH
354
355 ret = tcg_temp_new_i64();
356 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
357 tcg_temp_free_i32(lo);
358 tcg_temp_free_i32(hi);
831d7fe8
RH
359
360 return ret;
5e3f878a
PB
361}
362
39d5492a 363static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 364{
39d5492a
PM
365 TCGv_i32 lo = tcg_temp_new_i32();
366 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 367 TCGv_i64 ret;
5e3f878a 368
831d7fe8 369 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 370 tcg_temp_free_i32(a);
7d1b0095 371 tcg_temp_free_i32(b);
831d7fe8
RH
372
373 ret = tcg_temp_new_i64();
374 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
375 tcg_temp_free_i32(lo);
376 tcg_temp_free_i32(hi);
831d7fe8
RH
377
378 return ret;
5e3f878a
PB
379}
380
8f01245e 381/* Swap low and high halfwords. */
39d5492a 382static void gen_swap_half(TCGv_i32 var)
8f01245e 383{
39d5492a 384 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
7d1b0095 388 tcg_temp_free_i32(tmp);
8f01245e
PB
389}
390
b26eefb6
PB
391/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
396 */
397
39d5492a 398static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 399{
39d5492a 400 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
407 tcg_temp_free_i32(tmp);
408 tcg_temp_free_i32(t1);
b26eefb6
PB
409}
410
411/* Set CF to the top bit of var. */
39d5492a 412static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 413{
66c374de 414 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
415}
416
417/* Set N and Z flags from var. */
39d5492a 418static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 419{
66c374de
AJ
420 tcg_gen_mov_i32(cpu_NF, var);
421 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
422}
423
424/* T0 += T1 + CF. */
39d5492a 425static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 426{
396e467c 427 tcg_gen_add_i32(t0, t0, t1);
66c374de 428 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
429}
430
e9bb4aa9 431/* dest = T0 + T1 + CF. */
39d5492a 432static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 433{
e9bb4aa9 434 tcg_gen_add_i32(dest, t0, t1);
66c374de 435 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
436}
437
3670669c 438/* dest = T0 - T1 + CF - 1. */
39d5492a 439static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 440{
3670669c 441 tcg_gen_sub_i32(dest, t0, t1);
66c374de 442 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 443 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
444}
445
72485ec4 446/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 447static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 448{
39d5492a 449 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
450 tcg_gen_movi_i32(tmp, 0);
451 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 452 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 453 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
454 tcg_gen_xor_i32(tmp, t0, t1);
455 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
456 tcg_temp_free_i32(tmp);
457 tcg_gen_mov_i32(dest, cpu_NF);
458}
459
49b4c31e 460/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 461static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 462{
39d5492a 463 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
464 if (TCG_TARGET_HAS_add2_i32) {
465 tcg_gen_movi_i32(tmp, 0);
466 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 467 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
468 } else {
469 TCGv_i64 q0 = tcg_temp_new_i64();
470 TCGv_i64 q1 = tcg_temp_new_i64();
471 tcg_gen_extu_i32_i64(q0, t0);
472 tcg_gen_extu_i32_i64(q1, t1);
473 tcg_gen_add_i64(q0, q0, q1);
474 tcg_gen_extu_i32_i64(q1, cpu_CF);
475 tcg_gen_add_i64(q0, q0, q1);
476 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
477 tcg_temp_free_i64(q0);
478 tcg_temp_free_i64(q1);
479 }
480 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
481 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
482 tcg_gen_xor_i32(tmp, t0, t1);
483 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
484 tcg_temp_free_i32(tmp);
485 tcg_gen_mov_i32(dest, cpu_NF);
486}
487
72485ec4 488/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 489static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 490{
39d5492a 491 TCGv_i32 tmp;
72485ec4
AJ
492 tcg_gen_sub_i32(cpu_NF, t0, t1);
493 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
494 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
495 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
496 tmp = tcg_temp_new_i32();
497 tcg_gen_xor_i32(tmp, t0, t1);
498 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
499 tcg_temp_free_i32(tmp);
500 tcg_gen_mov_i32(dest, cpu_NF);
501}
502
e77f0832 503/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 504static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 505{
39d5492a 506 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
507 tcg_gen_not_i32(tmp, t1);
508 gen_adc_CC(dest, t0, tmp);
39d5492a 509 tcg_temp_free_i32(tmp);
2de68a49
RH
510}
511
365af80e 512#define GEN_SHIFT(name) \
39d5492a 513static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 514{ \
39d5492a 515 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
516 tmp1 = tcg_temp_new_i32(); \
517 tcg_gen_andi_i32(tmp1, t1, 0xff); \
518 tmp2 = tcg_const_i32(0); \
519 tmp3 = tcg_const_i32(0x1f); \
520 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
521 tcg_temp_free_i32(tmp3); \
522 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
523 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
524 tcg_temp_free_i32(tmp2); \
525 tcg_temp_free_i32(tmp1); \
526}
527GEN_SHIFT(shl)
528GEN_SHIFT(shr)
529#undef GEN_SHIFT
530
39d5492a 531static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 532{
39d5492a 533 TCGv_i32 tmp1, tmp2;
365af80e
AJ
534 tmp1 = tcg_temp_new_i32();
535 tcg_gen_andi_i32(tmp1, t1, 0xff);
536 tmp2 = tcg_const_i32(0x1f);
537 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
538 tcg_temp_free_i32(tmp2);
539 tcg_gen_sar_i32(dest, t0, tmp1);
540 tcg_temp_free_i32(tmp1);
541}
542
39d5492a 543static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 544{
39d5492a
PM
545 TCGv_i32 c0 = tcg_const_i32(0);
546 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
547 tcg_gen_neg_i32(tmp, src);
548 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
549 tcg_temp_free_i32(c0);
550 tcg_temp_free_i32(tmp);
551}
ad69471c 552
39d5492a 553static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 554{
9a119ff6 555 if (shift == 0) {
66c374de 556 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 557 } else {
66c374de
AJ
558 tcg_gen_shri_i32(cpu_CF, var, shift);
559 if (shift != 31) {
560 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
561 }
9a119ff6 562 }
9a119ff6 563}
b26eefb6 564
9a119ff6 565/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
566static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
567 int shift, int flags)
9a119ff6
PB
568{
569 switch (shiftop) {
570 case 0: /* LSL */
571 if (shift != 0) {
572 if (flags)
573 shifter_out_im(var, 32 - shift);
574 tcg_gen_shli_i32(var, var, shift);
575 }
576 break;
577 case 1: /* LSR */
578 if (shift == 0) {
579 if (flags) {
66c374de 580 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
581 }
582 tcg_gen_movi_i32(var, 0);
583 } else {
584 if (flags)
585 shifter_out_im(var, shift - 1);
586 tcg_gen_shri_i32(var, var, shift);
587 }
588 break;
589 case 2: /* ASR */
590 if (shift == 0)
591 shift = 32;
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 if (shift == 32)
595 shift = 31;
596 tcg_gen_sari_i32(var, var, shift);
597 break;
598 case 3: /* ROR/RRX */
599 if (shift != 0) {
600 if (flags)
601 shifter_out_im(var, shift - 1);
f669df27 602 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 603 } else {
39d5492a 604 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 605 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
606 if (flags)
607 shifter_out_im(var, 0);
608 tcg_gen_shri_i32(var, var, 1);
b26eefb6 609 tcg_gen_or_i32(var, var, tmp);
7d1b0095 610 tcg_temp_free_i32(tmp);
b26eefb6
PB
611 }
612 }
613};
614
39d5492a
PM
615static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
616 TCGv_i32 shift, int flags)
8984bd2e
PB
617{
618 if (flags) {
619 switch (shiftop) {
9ef39277
BS
620 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
621 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
622 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
623 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
624 }
625 } else {
626 switch (shiftop) {
365af80e
AJ
627 case 0:
628 gen_shl(var, var, shift);
629 break;
630 case 1:
631 gen_shr(var, var, shift);
632 break;
633 case 2:
634 gen_sar(var, var, shift);
635 break;
f669df27
AJ
636 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
637 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
638 }
639 }
7d1b0095 640 tcg_temp_free_i32(shift);
8984bd2e
PB
641}
642
6ddbc6e4
PB
643#define PAS_OP(pfx) \
644 switch (op2) { \
645 case 0: gen_pas_helper(glue(pfx,add16)); break; \
646 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
647 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
648 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
649 case 4: gen_pas_helper(glue(pfx,add8)); break; \
650 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
651 }
39d5492a 652static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 653{
a7812ae4 654 TCGv_ptr tmp;
6ddbc6e4
PB
655
656 switch (op1) {
657#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
658 case 1:
a7812ae4 659 tmp = tcg_temp_new_ptr();
0ecb72a5 660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 661 PAS_OP(s)
b75263d6 662 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
663 break;
664 case 5:
a7812ae4 665 tmp = tcg_temp_new_ptr();
0ecb72a5 666 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 667 PAS_OP(u)
b75263d6 668 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
669 break;
670#undef gen_pas_helper
671#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
672 case 2:
673 PAS_OP(q);
674 break;
675 case 3:
676 PAS_OP(sh);
677 break;
678 case 6:
679 PAS_OP(uq);
680 break;
681 case 7:
682 PAS_OP(uh);
683 break;
684#undef gen_pas_helper
685 }
686}
9ee6e8bb
PB
687#undef PAS_OP
688
6ddbc6e4
PB
689/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
690#define PAS_OP(pfx) \
ed89a2f1 691 switch (op1) { \
6ddbc6e4
PB
692 case 0: gen_pas_helper(glue(pfx,add8)); break; \
693 case 1: gen_pas_helper(glue(pfx,add16)); break; \
694 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
696 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
698 }
39d5492a 699static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 700{
a7812ae4 701 TCGv_ptr tmp;
6ddbc6e4 702
ed89a2f1 703 switch (op2) {
6ddbc6e4
PB
704#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 case 0:
a7812ae4 706 tmp = tcg_temp_new_ptr();
0ecb72a5 707 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 708 PAS_OP(s)
b75263d6 709 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
710 break;
711 case 4:
a7812ae4 712 tmp = tcg_temp_new_ptr();
0ecb72a5 713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 714 PAS_OP(u)
b75263d6 715 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
716 break;
717#undef gen_pas_helper
718#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
719 case 1:
720 PAS_OP(q);
721 break;
722 case 2:
723 PAS_OP(sh);
724 break;
725 case 5:
726 PAS_OP(uq);
727 break;
728 case 6:
729 PAS_OP(uh);
730 break;
731#undef gen_pas_helper
732 }
733}
9ee6e8bb
PB
734#undef PAS_OP
735
39fb730a 736/*
6c2c63d3 737 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
738 * This is common between ARM and Aarch64 targets.
739 */
6c2c63d3 740void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 741{
6c2c63d3
RH
742 TCGv_i32 value;
743 TCGCond cond;
744 bool global = true;
d9ba4830 745
d9ba4830
PB
746 switch (cc) {
747 case 0: /* eq: Z */
d9ba4830 748 case 1: /* ne: !Z */
6c2c63d3
RH
749 cond = TCG_COND_EQ;
750 value = cpu_ZF;
d9ba4830 751 break;
6c2c63d3 752
d9ba4830 753 case 2: /* cs: C */
d9ba4830 754 case 3: /* cc: !C */
6c2c63d3
RH
755 cond = TCG_COND_NE;
756 value = cpu_CF;
d9ba4830 757 break;
6c2c63d3 758
d9ba4830 759 case 4: /* mi: N */
d9ba4830 760 case 5: /* pl: !N */
6c2c63d3
RH
761 cond = TCG_COND_LT;
762 value = cpu_NF;
d9ba4830 763 break;
6c2c63d3 764
d9ba4830 765 case 6: /* vs: V */
d9ba4830 766 case 7: /* vc: !V */
6c2c63d3
RH
767 cond = TCG_COND_LT;
768 value = cpu_VF;
d9ba4830 769 break;
6c2c63d3 770
d9ba4830 771 case 8: /* hi: C && !Z */
6c2c63d3
RH
772 case 9: /* ls: !C || Z -> !(C && !Z) */
773 cond = TCG_COND_NE;
774 value = tcg_temp_new_i32();
775 global = false;
776 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
777 ZF is non-zero for !Z; so AND the two subexpressions. */
778 tcg_gen_neg_i32(value, cpu_CF);
779 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 780 break;
6c2c63d3 781
d9ba4830 782 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 783 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
784 /* Since we're only interested in the sign bit, == 0 is >= 0. */
785 cond = TCG_COND_GE;
786 value = tcg_temp_new_i32();
787 global = false;
788 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 789 break;
6c2c63d3 790
d9ba4830 791 case 12: /* gt: !Z && N == V */
d9ba4830 792 case 13: /* le: Z || N != V */
6c2c63d3
RH
793 cond = TCG_COND_NE;
794 value = tcg_temp_new_i32();
795 global = false;
796 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
797 * the sign bit then AND with ZF to yield the result. */
798 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
799 tcg_gen_sari_i32(value, value, 31);
800 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 801 break;
6c2c63d3 802
9305eac0
RH
803 case 14: /* always */
804 case 15: /* always */
805 /* Use the ALWAYS condition, which will fold early.
806 * It doesn't matter what we use for the value. */
807 cond = TCG_COND_ALWAYS;
808 value = cpu_ZF;
809 goto no_invert;
810
d9ba4830
PB
811 default:
812 fprintf(stderr, "Bad condition code 0x%x\n", cc);
813 abort();
814 }
6c2c63d3
RH
815
816 if (cc & 1) {
817 cond = tcg_invert_cond(cond);
818 }
819
9305eac0 820 no_invert:
6c2c63d3
RH
821 cmp->cond = cond;
822 cmp->value = value;
823 cmp->value_global = global;
824}
825
826void arm_free_cc(DisasCompare *cmp)
827{
828 if (!cmp->value_global) {
829 tcg_temp_free_i32(cmp->value);
830 }
831}
832
833void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
834{
835 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
836}
837
838void arm_gen_test_cc(int cc, TCGLabel *label)
839{
840 DisasCompare cmp;
841 arm_test_cc(&cmp, cc);
842 arm_jump_cc(&cmp, label);
843 arm_free_cc(&cmp);
d9ba4830 844}
2c0262af 845
b1d8e52e 846static const uint8_t table_logic_cc[16] = {
2c0262af
FB
847 1, /* and */
848 1, /* xor */
849 0, /* sub */
850 0, /* rsb */
851 0, /* add */
852 0, /* adc */
853 0, /* sbc */
854 0, /* rsc */
855 1, /* andl */
856 1, /* xorl */
857 0, /* cmp */
858 0, /* cmn */
859 1, /* orr */
860 1, /* mov */
861 1, /* bic */
862 1, /* mvn */
863};
3b46e624 864
d9ba4830
PB
865/* Set PC and Thumb state from an immediate address. */
866static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 867{
39d5492a 868 TCGv_i32 tmp;
99c475ab 869
577bf808 870 s->is_jmp = DISAS_JUMP;
d9ba4830 871 if (s->thumb != (addr & 1)) {
7d1b0095 872 tmp = tcg_temp_new_i32();
d9ba4830 873 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 874 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 875 tcg_temp_free_i32(tmp);
d9ba4830 876 }
155c3eac 877 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
878}
879
880/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 881static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 882{
577bf808 883 s->is_jmp = DISAS_JUMP;
155c3eac
FN
884 tcg_gen_andi_i32(cpu_R[15], var, ~1);
885 tcg_gen_andi_i32(var, var, 1);
886 store_cpu_field(var, thumb);
d9ba4830
PB
887}
888
21aeb343
JR
889/* Variant of store_reg which uses branch&exchange logic when storing
890 to r15 in ARM architecture v7 and above. The source must be a temporary
891 and will be marked as dead. */
7dcc1f89 892static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
893{
894 if (reg == 15 && ENABLE_ARCH_7) {
895 gen_bx(s, var);
896 } else {
897 store_reg(s, reg, var);
898 }
899}
900
be5e7a76
DES
901/* Variant of store_reg which uses branch&exchange logic when storing
902 * to r15 in ARM architecture v5T and above. This is used for storing
903 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
904 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 905static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
906{
907 if (reg == 15 && ENABLE_ARCH_5) {
908 gen_bx(s, var);
909 } else {
910 store_reg(s, reg, var);
911 }
912}
913
e334bd31
PB
914#ifdef CONFIG_USER_ONLY
915#define IS_USER_ONLY 1
916#else
917#define IS_USER_ONLY 0
918#endif
919
08307563
PM
920/* Abstractions of "generate code to do a guest load/store for
921 * AArch32", where a vaddr is always 32 bits (and is zero
922 * extended if we're a 64 bit core) and data is also
923 * 32 bits unless specifically doing a 64 bit access.
924 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 925 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
926 */
927#if TARGET_LONG_BITS == 32
928
e334bd31 929#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
930static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
931 TCGv_i32 addr, int index) \
08307563 932{ \
dacf0a2f 933 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
934 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
935 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
936 TCGv addr_be = tcg_temp_new(); \
937 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
938 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
939 tcg_temp_free(addr_be); \
940 return; \
941 } \
dacf0a2f 942 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
08307563
PM
943}
944
e334bd31 945#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
946static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
947 TCGv_i32 addr, int index) \
08307563 948{ \
dacf0a2f 949 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
950 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
951 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
952 TCGv addr_be = tcg_temp_new(); \
953 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
954 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
955 tcg_temp_free(addr_be); \
956 return; \
957 } \
dacf0a2f 958 tcg_gen_qemu_st_i32(val, addr, index, opc); \
08307563
PM
959}
960
12dcc321
PB
961static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
962 TCGv_i32 addr, int index)
08307563 963{
dacf0a2f
PB
964 TCGMemOp opc = MO_Q | s->be_data;
965 tcg_gen_qemu_ld_i64(val, addr, index, opc);
e334bd31
PB
966 /* Not needed for user-mode BE32, where we use MO_BE instead. */
967 if (!IS_USER_ONLY && s->sctlr_b) {
968 tcg_gen_rotri_i64(val, val, 32);
969 }
08307563
PM
970}
971
12dcc321
PB
972static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
973 TCGv_i32 addr, int index)
08307563 974{
dacf0a2f 975 TCGMemOp opc = MO_Q | s->be_data;
e334bd31
PB
976 /* Not needed for user-mode BE32, where we use MO_BE instead. */
977 if (!IS_USER_ONLY && s->sctlr_b) {
978 TCGv_i64 tmp = tcg_temp_new_i64();
979 tcg_gen_rotri_i64(tmp, val, 32);
980 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
981 tcg_temp_free_i64(tmp);
982 return;
983 }
dacf0a2f 984 tcg_gen_qemu_st_i64(val, addr, index, opc);
08307563
PM
985}
986
987#else
988
e334bd31 989#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
990static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
991 TCGv_i32 addr, int index) \
08307563 992{ \
dacf0a2f 993 TCGMemOp opc = (OPC) | s->be_data; \
08307563 994 TCGv addr64 = tcg_temp_new(); \
08307563 995 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
996 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
997 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
998 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
999 } \
dacf0a2f 1000 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
08307563 1001 tcg_temp_free(addr64); \
08307563
PM
1002}
1003
e334bd31 1004#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
1005static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1006 TCGv_i32 addr, int index) \
08307563 1007{ \
dacf0a2f 1008 TCGMemOp opc = (OPC) | s->be_data; \
08307563 1009 TCGv addr64 = tcg_temp_new(); \
08307563 1010 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
1011 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1012 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1013 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1014 } \
dacf0a2f 1015 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
08307563 1016 tcg_temp_free(addr64); \
08307563
PM
1017}
1018
12dcc321
PB
1019static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1020 TCGv_i32 addr, int index)
08307563 1021{
dacf0a2f 1022 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1023 TCGv addr64 = tcg_temp_new();
1024 tcg_gen_extu_i32_i64(addr64, addr);
dacf0a2f 1025 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
e334bd31
PB
1026
1027 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1028 if (!IS_USER_ONLY && s->sctlr_b) {
1029 tcg_gen_rotri_i64(val, val, 32);
1030 }
08307563
PM
1031 tcg_temp_free(addr64);
1032}
1033
12dcc321
PB
1034static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1035 TCGv_i32 addr, int index)
08307563 1036{
dacf0a2f 1037 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1038 TCGv addr64 = tcg_temp_new();
1039 tcg_gen_extu_i32_i64(addr64, addr);
e334bd31
PB
1040
1041 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1042 if (!IS_USER_ONLY && s->sctlr_b) {
1043 TCGv tmp = tcg_temp_new();
1044 tcg_gen_rotri_i64(tmp, val, 32);
1045 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1046 tcg_temp_free(tmp);
1047 } else {
1048 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1049 }
08307563
PM
1050 tcg_temp_free(addr64);
1051}
1052
1053#endif
1054
e334bd31
PB
1055DO_GEN_LD(8s, MO_SB, 3)
1056DO_GEN_LD(8u, MO_UB, 3)
1057DO_GEN_LD(16s, MO_SW, 2)
1058DO_GEN_LD(16u, MO_UW, 2)
1059DO_GEN_LD(32u, MO_UL, 0)
30901475 1060/* 'a' variants include an alignment check */
e334bd31
PB
1061DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1062DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1063DO_GEN_ST(8, MO_UB, 3)
1064DO_GEN_ST(16, MO_UW, 2)
1065DO_GEN_ST(32, MO_UL, 0)
08307563 1066
eaed129d 1067static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 1068{
40f860cd 1069 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
1070}
1071
37e6456e
PM
1072static inline void gen_hvc(DisasContext *s, int imm16)
1073{
1074 /* The pre HVC helper handles cases when HVC gets trapped
1075 * as an undefined insn by runtime configuration (ie before
1076 * the insn really executes).
1077 */
1078 gen_set_pc_im(s, s->pc - 4);
1079 gen_helper_pre_hvc(cpu_env);
1080 /* Otherwise we will treat this as a real exception which
1081 * happens after execution of the insn. (The distinction matters
1082 * for the PC value reported to the exception handler and also
1083 * for single stepping.)
1084 */
1085 s->svc_imm = imm16;
1086 gen_set_pc_im(s, s->pc);
1087 s->is_jmp = DISAS_HVC;
1088}
1089
1090static inline void gen_smc(DisasContext *s)
1091{
1092 /* As with HVC, we may take an exception either before or after
1093 * the insn executes.
1094 */
1095 TCGv_i32 tmp;
1096
1097 gen_set_pc_im(s, s->pc - 4);
1098 tmp = tcg_const_i32(syn_aa32_smc());
1099 gen_helper_pre_smc(cpu_env, tmp);
1100 tcg_temp_free_i32(tmp);
1101 gen_set_pc_im(s, s->pc);
1102 s->is_jmp = DISAS_SMC;
1103}
1104
d4a2dc67
PM
1105static inline void
1106gen_set_condexec (DisasContext *s)
1107{
1108 if (s->condexec_mask) {
1109 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1110 TCGv_i32 tmp = tcg_temp_new_i32();
1111 tcg_gen_movi_i32(tmp, val);
1112 store_cpu_field(tmp, condexec_bits);
1113 }
1114}
1115
1116static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1117{
1118 gen_set_condexec(s);
1119 gen_set_pc_im(s, s->pc - offset);
1120 gen_exception_internal(excp);
1121 s->is_jmp = DISAS_JUMP;
1122}
1123
73710361
GB
1124static void gen_exception_insn(DisasContext *s, int offset, int excp,
1125 int syn, uint32_t target_el)
d4a2dc67
PM
1126{
1127 gen_set_condexec(s);
1128 gen_set_pc_im(s, s->pc - offset);
73710361 1129 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1130 s->is_jmp = DISAS_JUMP;
1131}
1132
b5ff1b31
FB
1133/* Force a TB lookup after an instruction that changes the CPU state. */
1134static inline void gen_lookup_tb(DisasContext *s)
1135{
a6445c52 1136 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1137 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1138}
1139
b0109805 1140static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1141 TCGv_i32 var)
2c0262af 1142{
1e8d4eec 1143 int val, rm, shift, shiftop;
39d5492a 1144 TCGv_i32 offset;
2c0262af
FB
1145
1146 if (!(insn & (1 << 25))) {
1147 /* immediate */
1148 val = insn & 0xfff;
1149 if (!(insn & (1 << 23)))
1150 val = -val;
537730b9 1151 if (val != 0)
b0109805 1152 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1153 } else {
1154 /* shift/register */
1155 rm = (insn) & 0xf;
1156 shift = (insn >> 7) & 0x1f;
1e8d4eec 1157 shiftop = (insn >> 5) & 3;
b26eefb6 1158 offset = load_reg(s, rm);
9a119ff6 1159 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1160 if (!(insn & (1 << 23)))
b0109805 1161 tcg_gen_sub_i32(var, var, offset);
2c0262af 1162 else
b0109805 1163 tcg_gen_add_i32(var, var, offset);
7d1b0095 1164 tcg_temp_free_i32(offset);
2c0262af
FB
1165 }
1166}
1167
191f9a93 1168static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1169 int extra, TCGv_i32 var)
2c0262af
FB
1170{
1171 int val, rm;
39d5492a 1172 TCGv_i32 offset;
3b46e624 1173
2c0262af
FB
1174 if (insn & (1 << 22)) {
1175 /* immediate */
1176 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1177 if (!(insn & (1 << 23)))
1178 val = -val;
18acad92 1179 val += extra;
537730b9 1180 if (val != 0)
b0109805 1181 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1182 } else {
1183 /* register */
191f9a93 1184 if (extra)
b0109805 1185 tcg_gen_addi_i32(var, var, extra);
2c0262af 1186 rm = (insn) & 0xf;
b26eefb6 1187 offset = load_reg(s, rm);
2c0262af 1188 if (!(insn & (1 << 23)))
b0109805 1189 tcg_gen_sub_i32(var, var, offset);
2c0262af 1190 else
b0109805 1191 tcg_gen_add_i32(var, var, offset);
7d1b0095 1192 tcg_temp_free_i32(offset);
2c0262af
FB
1193 }
1194}
1195
5aaebd13
PM
1196static TCGv_ptr get_fpstatus_ptr(int neon)
1197{
1198 TCGv_ptr statusptr = tcg_temp_new_ptr();
1199 int offset;
1200 if (neon) {
0ecb72a5 1201 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1202 } else {
0ecb72a5 1203 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1204 }
1205 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1206 return statusptr;
1207}
1208
4373f3ce
PB
1209#define VFP_OP2(name) \
1210static inline void gen_vfp_##name(int dp) \
1211{ \
ae1857ec
PM
1212 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1213 if (dp) { \
1214 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1215 } else { \
1216 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1217 } \
1218 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1219}
1220
4373f3ce
PB
1221VFP_OP2(add)
1222VFP_OP2(sub)
1223VFP_OP2(mul)
1224VFP_OP2(div)
1225
1226#undef VFP_OP2
1227
605a6aed
PM
1228static inline void gen_vfp_F1_mul(int dp)
1229{
1230 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1231 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1232 if (dp) {
ae1857ec 1233 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1234 } else {
ae1857ec 1235 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1236 }
ae1857ec 1237 tcg_temp_free_ptr(fpst);
605a6aed
PM
1238}
1239
1240static inline void gen_vfp_F1_neg(int dp)
1241{
1242 /* Like gen_vfp_neg() but put result in F1 */
1243 if (dp) {
1244 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1245 } else {
1246 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1247 }
1248}
1249
4373f3ce
PB
1250static inline void gen_vfp_abs(int dp)
1251{
1252 if (dp)
1253 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1254 else
1255 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1256}
1257
1258static inline void gen_vfp_neg(int dp)
1259{
1260 if (dp)
1261 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1262 else
1263 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1264}
1265
1266static inline void gen_vfp_sqrt(int dp)
1267{
1268 if (dp)
1269 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1270 else
1271 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1272}
1273
1274static inline void gen_vfp_cmp(int dp)
1275{
1276 if (dp)
1277 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1278 else
1279 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1280}
1281
1282static inline void gen_vfp_cmpe(int dp)
1283{
1284 if (dp)
1285 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1286 else
1287 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1288}
1289
1290static inline void gen_vfp_F1_ld0(int dp)
1291{
1292 if (dp)
5b340b51 1293 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1294 else
5b340b51 1295 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1296}
1297
5500b06c
PM
1298#define VFP_GEN_ITOF(name) \
1299static inline void gen_vfp_##name(int dp, int neon) \
1300{ \
5aaebd13 1301 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1302 if (dp) { \
1303 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1304 } else { \
1305 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1306 } \
b7fa9214 1307 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1308}
1309
5500b06c
PM
1310VFP_GEN_ITOF(uito)
1311VFP_GEN_ITOF(sito)
1312#undef VFP_GEN_ITOF
4373f3ce 1313
5500b06c
PM
1314#define VFP_GEN_FTOI(name) \
1315static inline void gen_vfp_##name(int dp, int neon) \
1316{ \
5aaebd13 1317 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1318 if (dp) { \
1319 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1320 } else { \
1321 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1322 } \
b7fa9214 1323 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1324}
1325
5500b06c
PM
1326VFP_GEN_FTOI(toui)
1327VFP_GEN_FTOI(touiz)
1328VFP_GEN_FTOI(tosi)
1329VFP_GEN_FTOI(tosiz)
1330#undef VFP_GEN_FTOI
4373f3ce 1331
16d5b3ca 1332#define VFP_GEN_FIX(name, round) \
5500b06c 1333static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1334{ \
39d5492a 1335 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1336 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1337 if (dp) { \
16d5b3ca
WN
1338 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1339 statusptr); \
5500b06c 1340 } else { \
16d5b3ca
WN
1341 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1342 statusptr); \
5500b06c 1343 } \
b75263d6 1344 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1345 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1346}
16d5b3ca
WN
1347VFP_GEN_FIX(tosh, _round_to_zero)
1348VFP_GEN_FIX(tosl, _round_to_zero)
1349VFP_GEN_FIX(touh, _round_to_zero)
1350VFP_GEN_FIX(toul, _round_to_zero)
1351VFP_GEN_FIX(shto, )
1352VFP_GEN_FIX(slto, )
1353VFP_GEN_FIX(uhto, )
1354VFP_GEN_FIX(ulto, )
4373f3ce 1355#undef VFP_GEN_FIX
9ee6e8bb 1356
39d5492a 1357static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1358{
08307563 1359 if (dp) {
12dcc321 1360 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1361 } else {
12dcc321 1362 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1363 }
b5ff1b31
FB
1364}
1365
39d5492a 1366static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1367{
08307563 1368 if (dp) {
12dcc321 1369 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1370 } else {
12dcc321 1371 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1372 }
b5ff1b31
FB
1373}
1374
8e96005d
FB
1375static inline long
1376vfp_reg_offset (int dp, int reg)
1377{
1378 if (dp)
1379 return offsetof(CPUARMState, vfp.regs[reg]);
1380 else if (reg & 1) {
1381 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1382 + offsetof(CPU_DoubleU, l.upper);
1383 } else {
1384 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1385 + offsetof(CPU_DoubleU, l.lower);
1386 }
1387}
9ee6e8bb
PB
1388
1389/* Return the offset of a 32-bit piece of a NEON register.
1390 zero is the least significant end of the register. */
1391static inline long
1392neon_reg_offset (int reg, int n)
1393{
1394 int sreg;
1395 sreg = reg * 2 + n;
1396 return vfp_reg_offset(0, sreg);
1397}
1398
39d5492a 1399static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1400{
39d5492a 1401 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1402 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1403 return tmp;
1404}
1405
39d5492a 1406static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1407{
1408 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1409 tcg_temp_free_i32(var);
8f8e3aa4
PB
1410}
1411
a7812ae4 1412static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1413{
1414 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1415}
1416
a7812ae4 1417static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1418{
1419 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1420}
1421
4373f3ce
PB
1422#define tcg_gen_ld_f32 tcg_gen_ld_i32
1423#define tcg_gen_ld_f64 tcg_gen_ld_i64
1424#define tcg_gen_st_f32 tcg_gen_st_i32
1425#define tcg_gen_st_f64 tcg_gen_st_i64
1426
b7bcbe95
FB
1427static inline void gen_mov_F0_vreg(int dp, int reg)
1428{
1429 if (dp)
4373f3ce 1430 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1431 else
4373f3ce 1432 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1433}
1434
1435static inline void gen_mov_F1_vreg(int dp, int reg)
1436{
1437 if (dp)
4373f3ce 1438 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1439 else
4373f3ce 1440 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1441}
1442
1443static inline void gen_mov_vreg_F0(int dp, int reg)
1444{
1445 if (dp)
4373f3ce 1446 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1447 else
4373f3ce 1448 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1449}
1450
18c9b560
AZ
1451#define ARM_CP_RW_BIT (1 << 20)
1452
a7812ae4 1453static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1454{
0ecb72a5 1455 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1456}
1457
a7812ae4 1458static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1459{
0ecb72a5 1460 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1461}
1462
39d5492a 1463static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1464{
39d5492a 1465 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1466 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1467 return var;
e677137d
PB
1468}
1469
39d5492a 1470static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1471{
0ecb72a5 1472 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1473 tcg_temp_free_i32(var);
e677137d
PB
1474}
1475
1476static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1477{
1478 iwmmxt_store_reg(cpu_M0, rn);
1479}
1480
1481static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1482{
1483 iwmmxt_load_reg(cpu_M0, rn);
1484}
1485
1486static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1487{
1488 iwmmxt_load_reg(cpu_V1, rn);
1489 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1490}
1491
1492static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1493{
1494 iwmmxt_load_reg(cpu_V1, rn);
1495 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1496}
1497
1498static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1499{
1500 iwmmxt_load_reg(cpu_V1, rn);
1501 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1502}
1503
1504#define IWMMXT_OP(name) \
1505static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1506{ \
1507 iwmmxt_load_reg(cpu_V1, rn); \
1508 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1509}
1510
477955bd
PM
1511#define IWMMXT_OP_ENV(name) \
1512static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1513{ \
1514 iwmmxt_load_reg(cpu_V1, rn); \
1515 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1516}
1517
1518#define IWMMXT_OP_ENV_SIZE(name) \
1519IWMMXT_OP_ENV(name##b) \
1520IWMMXT_OP_ENV(name##w) \
1521IWMMXT_OP_ENV(name##l)
e677137d 1522
477955bd 1523#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1524static inline void gen_op_iwmmxt_##name##_M0(void) \
1525{ \
477955bd 1526 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1527}
1528
1529IWMMXT_OP(maddsq)
1530IWMMXT_OP(madduq)
1531IWMMXT_OP(sadb)
1532IWMMXT_OP(sadw)
1533IWMMXT_OP(mulslw)
1534IWMMXT_OP(mulshw)
1535IWMMXT_OP(mululw)
1536IWMMXT_OP(muluhw)
1537IWMMXT_OP(macsw)
1538IWMMXT_OP(macuw)
1539
477955bd
PM
1540IWMMXT_OP_ENV_SIZE(unpackl)
1541IWMMXT_OP_ENV_SIZE(unpackh)
1542
1543IWMMXT_OP_ENV1(unpacklub)
1544IWMMXT_OP_ENV1(unpackluw)
1545IWMMXT_OP_ENV1(unpacklul)
1546IWMMXT_OP_ENV1(unpackhub)
1547IWMMXT_OP_ENV1(unpackhuw)
1548IWMMXT_OP_ENV1(unpackhul)
1549IWMMXT_OP_ENV1(unpacklsb)
1550IWMMXT_OP_ENV1(unpacklsw)
1551IWMMXT_OP_ENV1(unpacklsl)
1552IWMMXT_OP_ENV1(unpackhsb)
1553IWMMXT_OP_ENV1(unpackhsw)
1554IWMMXT_OP_ENV1(unpackhsl)
1555
1556IWMMXT_OP_ENV_SIZE(cmpeq)
1557IWMMXT_OP_ENV_SIZE(cmpgtu)
1558IWMMXT_OP_ENV_SIZE(cmpgts)
1559
1560IWMMXT_OP_ENV_SIZE(mins)
1561IWMMXT_OP_ENV_SIZE(minu)
1562IWMMXT_OP_ENV_SIZE(maxs)
1563IWMMXT_OP_ENV_SIZE(maxu)
1564
1565IWMMXT_OP_ENV_SIZE(subn)
1566IWMMXT_OP_ENV_SIZE(addn)
1567IWMMXT_OP_ENV_SIZE(subu)
1568IWMMXT_OP_ENV_SIZE(addu)
1569IWMMXT_OP_ENV_SIZE(subs)
1570IWMMXT_OP_ENV_SIZE(adds)
1571
1572IWMMXT_OP_ENV(avgb0)
1573IWMMXT_OP_ENV(avgb1)
1574IWMMXT_OP_ENV(avgw0)
1575IWMMXT_OP_ENV(avgw1)
e677137d 1576
477955bd
PM
1577IWMMXT_OP_ENV(packuw)
1578IWMMXT_OP_ENV(packul)
1579IWMMXT_OP_ENV(packuq)
1580IWMMXT_OP_ENV(packsw)
1581IWMMXT_OP_ENV(packsl)
1582IWMMXT_OP_ENV(packsq)
e677137d 1583
e677137d
PB
1584static void gen_op_iwmmxt_set_mup(void)
1585{
39d5492a 1586 TCGv_i32 tmp;
e677137d
PB
1587 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1588 tcg_gen_ori_i32(tmp, tmp, 2);
1589 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1590}
1591
1592static void gen_op_iwmmxt_set_cup(void)
1593{
39d5492a 1594 TCGv_i32 tmp;
e677137d
PB
1595 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1596 tcg_gen_ori_i32(tmp, tmp, 1);
1597 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1598}
1599
1600static void gen_op_iwmmxt_setpsr_nz(void)
1601{
39d5492a 1602 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1603 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1604 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1605}
1606
1607static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1608{
1609 iwmmxt_load_reg(cpu_V1, rn);
86831435 1610 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1611 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1612}
1613
39d5492a
PM
1614static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1615 TCGv_i32 dest)
18c9b560
AZ
1616{
1617 int rd;
1618 uint32_t offset;
39d5492a 1619 TCGv_i32 tmp;
18c9b560
AZ
1620
1621 rd = (insn >> 16) & 0xf;
da6b5335 1622 tmp = load_reg(s, rd);
18c9b560
AZ
1623
1624 offset = (insn & 0xff) << ((insn >> 7) & 2);
1625 if (insn & (1 << 24)) {
1626 /* Pre indexed */
1627 if (insn & (1 << 23))
da6b5335 1628 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1629 else
da6b5335
FN
1630 tcg_gen_addi_i32(tmp, tmp, -offset);
1631 tcg_gen_mov_i32(dest, tmp);
18c9b560 1632 if (insn & (1 << 21))
da6b5335
FN
1633 store_reg(s, rd, tmp);
1634 else
7d1b0095 1635 tcg_temp_free_i32(tmp);
18c9b560
AZ
1636 } else if (insn & (1 << 21)) {
1637 /* Post indexed */
da6b5335 1638 tcg_gen_mov_i32(dest, tmp);
18c9b560 1639 if (insn & (1 << 23))
da6b5335 1640 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1641 else
da6b5335
FN
1642 tcg_gen_addi_i32(tmp, tmp, -offset);
1643 store_reg(s, rd, tmp);
18c9b560
AZ
1644 } else if (!(insn & (1 << 23)))
1645 return 1;
1646 return 0;
1647}
1648
39d5492a 1649static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1650{
1651 int rd = (insn >> 0) & 0xf;
39d5492a 1652 TCGv_i32 tmp;
18c9b560 1653
da6b5335
FN
1654 if (insn & (1 << 8)) {
1655 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1656 return 1;
da6b5335
FN
1657 } else {
1658 tmp = iwmmxt_load_creg(rd);
1659 }
1660 } else {
7d1b0095 1661 tmp = tcg_temp_new_i32();
da6b5335 1662 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1663 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1664 }
1665 tcg_gen_andi_i32(tmp, tmp, mask);
1666 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1667 tcg_temp_free_i32(tmp);
18c9b560
AZ
1668 return 0;
1669}
1670
a1c7273b 1671/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1672 (ie. an undefined instruction). */
7dcc1f89 1673static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1674{
1675 int rd, wrd;
1676 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1677 TCGv_i32 addr;
1678 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1679
1680 if ((insn & 0x0e000e00) == 0x0c000000) {
1681 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1682 wrd = insn & 0xf;
1683 rdlo = (insn >> 12) & 0xf;
1684 rdhi = (insn >> 16) & 0xf;
1685 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1686 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1687 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1688 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1689 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1690 } else { /* TMCRR */
da6b5335
FN
1691 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1692 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1693 gen_op_iwmmxt_set_mup();
1694 }
1695 return 0;
1696 }
1697
1698 wrd = (insn >> 12) & 0xf;
7d1b0095 1699 addr = tcg_temp_new_i32();
da6b5335 1700 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1701 tcg_temp_free_i32(addr);
18c9b560 1702 return 1;
da6b5335 1703 }
18c9b560
AZ
1704 if (insn & ARM_CP_RW_BIT) {
1705 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1706 tmp = tcg_temp_new_i32();
12dcc321 1707 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1708 iwmmxt_store_creg(wrd, tmp);
18c9b560 1709 } else {
e677137d
PB
1710 i = 1;
1711 if (insn & (1 << 8)) {
1712 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1713 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1714 i = 0;
1715 } else { /* WLDRW wRd */
29531141 1716 tmp = tcg_temp_new_i32();
12dcc321 1717 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1718 }
1719 } else {
29531141 1720 tmp = tcg_temp_new_i32();
e677137d 1721 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1722 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1723 } else { /* WLDRB */
12dcc321 1724 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1725 }
1726 }
1727 if (i) {
1728 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1729 tcg_temp_free_i32(tmp);
e677137d 1730 }
18c9b560
AZ
1731 gen_op_iwmmxt_movq_wRn_M0(wrd);
1732 }
1733 } else {
1734 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1735 tmp = iwmmxt_load_creg(wrd);
12dcc321 1736 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1737 } else {
1738 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1739 tmp = tcg_temp_new_i32();
e677137d
PB
1740 if (insn & (1 << 8)) {
1741 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1742 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1743 } else { /* WSTRW wRd */
ecc7b3aa 1744 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1745 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1746 }
1747 } else {
1748 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1749 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1750 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1751 } else { /* WSTRB */
ecc7b3aa 1752 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1753 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1754 }
1755 }
18c9b560 1756 }
29531141 1757 tcg_temp_free_i32(tmp);
18c9b560 1758 }
7d1b0095 1759 tcg_temp_free_i32(addr);
18c9b560
AZ
1760 return 0;
1761 }
1762
1763 if ((insn & 0x0f000000) != 0x0e000000)
1764 return 1;
1765
1766 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1767 case 0x000: /* WOR */
1768 wrd = (insn >> 12) & 0xf;
1769 rd0 = (insn >> 0) & 0xf;
1770 rd1 = (insn >> 16) & 0xf;
1771 gen_op_iwmmxt_movq_M0_wRn(rd0);
1772 gen_op_iwmmxt_orq_M0_wRn(rd1);
1773 gen_op_iwmmxt_setpsr_nz();
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x011: /* TMCR */
1779 if (insn & 0xf)
1780 return 1;
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
1783 switch (wrd) {
1784 case ARM_IWMMXT_wCID:
1785 case ARM_IWMMXT_wCASF:
1786 break;
1787 case ARM_IWMMXT_wCon:
1788 gen_op_iwmmxt_set_cup();
1789 /* Fall through. */
1790 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1791 tmp = iwmmxt_load_creg(wrd);
1792 tmp2 = load_reg(s, rd);
f669df27 1793 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1794 tcg_temp_free_i32(tmp2);
da6b5335 1795 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1796 break;
1797 case ARM_IWMMXT_wCGR0:
1798 case ARM_IWMMXT_wCGR1:
1799 case ARM_IWMMXT_wCGR2:
1800 case ARM_IWMMXT_wCGR3:
1801 gen_op_iwmmxt_set_cup();
da6b5335
FN
1802 tmp = load_reg(s, rd);
1803 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1804 break;
1805 default:
1806 return 1;
1807 }
1808 break;
1809 case 0x100: /* WXOR */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 0) & 0xf;
1812 rd1 = (insn >> 16) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
1814 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1815 gen_op_iwmmxt_setpsr_nz();
1816 gen_op_iwmmxt_movq_wRn_M0(wrd);
1817 gen_op_iwmmxt_set_mup();
1818 gen_op_iwmmxt_set_cup();
1819 break;
1820 case 0x111: /* TMRC */
1821 if (insn & 0xf)
1822 return 1;
1823 rd = (insn >> 12) & 0xf;
1824 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1825 tmp = iwmmxt_load_creg(wrd);
1826 store_reg(s, rd, tmp);
18c9b560
AZ
1827 break;
1828 case 0x300: /* WANDN */
1829 wrd = (insn >> 12) & 0xf;
1830 rd0 = (insn >> 0) & 0xf;
1831 rd1 = (insn >> 16) & 0xf;
1832 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1833 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1834 gen_op_iwmmxt_andq_M0_wRn(rd1);
1835 gen_op_iwmmxt_setpsr_nz();
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 gen_op_iwmmxt_set_cup();
1839 break;
1840 case 0x200: /* WAND */
1841 wrd = (insn >> 12) & 0xf;
1842 rd0 = (insn >> 0) & 0xf;
1843 rd1 = (insn >> 16) & 0xf;
1844 gen_op_iwmmxt_movq_M0_wRn(rd0);
1845 gen_op_iwmmxt_andq_M0_wRn(rd1);
1846 gen_op_iwmmxt_setpsr_nz();
1847 gen_op_iwmmxt_movq_wRn_M0(wrd);
1848 gen_op_iwmmxt_set_mup();
1849 gen_op_iwmmxt_set_cup();
1850 break;
1851 case 0x810: case 0xa10: /* WMADD */
1852 wrd = (insn >> 12) & 0xf;
1853 rd0 = (insn >> 0) & 0xf;
1854 rd1 = (insn >> 16) & 0xf;
1855 gen_op_iwmmxt_movq_M0_wRn(rd0);
1856 if (insn & (1 << 21))
1857 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1858 else
1859 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1860 gen_op_iwmmxt_movq_wRn_M0(wrd);
1861 gen_op_iwmmxt_set_mup();
1862 break;
1863 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1864 wrd = (insn >> 12) & 0xf;
1865 rd0 = (insn >> 16) & 0xf;
1866 rd1 = (insn >> 0) & 0xf;
1867 gen_op_iwmmxt_movq_M0_wRn(rd0);
1868 switch ((insn >> 22) & 3) {
1869 case 0:
1870 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1871 break;
1872 case 1:
1873 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1874 break;
1875 case 2:
1876 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1877 break;
1878 case 3:
1879 return 1;
1880 }
1881 gen_op_iwmmxt_movq_wRn_M0(wrd);
1882 gen_op_iwmmxt_set_mup();
1883 gen_op_iwmmxt_set_cup();
1884 break;
1885 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1886 wrd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
1888 rd1 = (insn >> 0) & 0xf;
1889 gen_op_iwmmxt_movq_M0_wRn(rd0);
1890 switch ((insn >> 22) & 3) {
1891 case 0:
1892 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1893 break;
1894 case 1:
1895 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1896 break;
1897 case 2:
1898 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1899 break;
1900 case 3:
1901 return 1;
1902 }
1903 gen_op_iwmmxt_movq_wRn_M0(wrd);
1904 gen_op_iwmmxt_set_mup();
1905 gen_op_iwmmxt_set_cup();
1906 break;
1907 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1908 wrd = (insn >> 12) & 0xf;
1909 rd0 = (insn >> 16) & 0xf;
1910 rd1 = (insn >> 0) & 0xf;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
1912 if (insn & (1 << 22))
1913 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1914 else
1915 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1916 if (!(insn & (1 << 20)))
1917 gen_op_iwmmxt_addl_M0_wRn(wrd);
1918 gen_op_iwmmxt_movq_wRn_M0(wrd);
1919 gen_op_iwmmxt_set_mup();
1920 break;
1921 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1922 wrd = (insn >> 12) & 0xf;
1923 rd0 = (insn >> 16) & 0xf;
1924 rd1 = (insn >> 0) & 0xf;
1925 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1926 if (insn & (1 << 21)) {
1927 if (insn & (1 << 20))
1928 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1929 else
1930 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1931 } else {
1932 if (insn & (1 << 20))
1933 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1934 else
1935 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1936 }
18c9b560
AZ
1937 gen_op_iwmmxt_movq_wRn_M0(wrd);
1938 gen_op_iwmmxt_set_mup();
1939 break;
1940 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 16) & 0xf;
1943 rd1 = (insn >> 0) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
1945 if (insn & (1 << 21))
1946 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1947 else
1948 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1949 if (!(insn & (1 << 20))) {
e677137d
PB
1950 iwmmxt_load_reg(cpu_V1, wrd);
1951 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1952 }
1953 gen_op_iwmmxt_movq_wRn_M0(wrd);
1954 gen_op_iwmmxt_set_mup();
1955 break;
1956 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 16) & 0xf;
1959 rd1 = (insn >> 0) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 switch ((insn >> 22) & 3) {
1962 case 0:
1963 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1964 break;
1965 case 1:
1966 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1967 break;
1968 case 2:
1969 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1970 break;
1971 case 3:
1972 return 1;
1973 }
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
1978 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1979 wrd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
1981 rd1 = (insn >> 0) & 0xf;
1982 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1983 if (insn & (1 << 22)) {
1984 if (insn & (1 << 20))
1985 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1986 else
1987 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1988 } else {
1989 if (insn & (1 << 20))
1990 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1991 else
1992 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1993 }
18c9b560
AZ
1994 gen_op_iwmmxt_movq_wRn_M0(wrd);
1995 gen_op_iwmmxt_set_mup();
1996 gen_op_iwmmxt_set_cup();
1997 break;
1998 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 16) & 0xf;
2001 rd1 = (insn >> 0) & 0xf;
2002 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2003 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2004 tcg_gen_andi_i32(tmp, tmp, 7);
2005 iwmmxt_load_reg(cpu_V1, rd1);
2006 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2007 tcg_temp_free_i32(tmp);
18c9b560
AZ
2008 gen_op_iwmmxt_movq_wRn_M0(wrd);
2009 gen_op_iwmmxt_set_mup();
2010 break;
2011 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2012 if (((insn >> 6) & 3) == 3)
2013 return 1;
18c9b560
AZ
2014 rd = (insn >> 12) & 0xf;
2015 wrd = (insn >> 16) & 0xf;
da6b5335 2016 tmp = load_reg(s, rd);
18c9b560
AZ
2017 gen_op_iwmmxt_movq_M0_wRn(wrd);
2018 switch ((insn >> 6) & 3) {
2019 case 0:
da6b5335
FN
2020 tmp2 = tcg_const_i32(0xff);
2021 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2022 break;
2023 case 1:
da6b5335
FN
2024 tmp2 = tcg_const_i32(0xffff);
2025 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2026 break;
2027 case 2:
da6b5335
FN
2028 tmp2 = tcg_const_i32(0xffffffff);
2029 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2030 break;
da6b5335 2031 default:
39d5492a
PM
2032 TCGV_UNUSED_I32(tmp2);
2033 TCGV_UNUSED_I32(tmp3);
18c9b560 2034 }
da6b5335 2035 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2036 tcg_temp_free_i32(tmp3);
2037 tcg_temp_free_i32(tmp2);
7d1b0095 2038 tcg_temp_free_i32(tmp);
18c9b560
AZ
2039 gen_op_iwmmxt_movq_wRn_M0(wrd);
2040 gen_op_iwmmxt_set_mup();
2041 break;
2042 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2043 rd = (insn >> 12) & 0xf;
2044 wrd = (insn >> 16) & 0xf;
da6b5335 2045 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2046 return 1;
2047 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2048 tmp = tcg_temp_new_i32();
18c9b560
AZ
2049 switch ((insn >> 22) & 3) {
2050 case 0:
da6b5335 2051 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2052 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2053 if (insn & 8) {
2054 tcg_gen_ext8s_i32(tmp, tmp);
2055 } else {
2056 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2057 }
2058 break;
2059 case 1:
da6b5335 2060 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2061 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2062 if (insn & 8) {
2063 tcg_gen_ext16s_i32(tmp, tmp);
2064 } else {
2065 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2066 }
2067 break;
2068 case 2:
da6b5335 2069 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2070 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2071 break;
18c9b560 2072 }
da6b5335 2073 store_reg(s, rd, tmp);
18c9b560
AZ
2074 break;
2075 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2076 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2077 return 1;
da6b5335 2078 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2079 switch ((insn >> 22) & 3) {
2080 case 0:
da6b5335 2081 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2082 break;
2083 case 1:
da6b5335 2084 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2085 break;
2086 case 2:
da6b5335 2087 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2088 break;
18c9b560 2089 }
da6b5335
FN
2090 tcg_gen_shli_i32(tmp, tmp, 28);
2091 gen_set_nzcv(tmp);
7d1b0095 2092 tcg_temp_free_i32(tmp);
18c9b560
AZ
2093 break;
2094 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2095 if (((insn >> 6) & 3) == 3)
2096 return 1;
18c9b560
AZ
2097 rd = (insn >> 12) & 0xf;
2098 wrd = (insn >> 16) & 0xf;
da6b5335 2099 tmp = load_reg(s, rd);
18c9b560
AZ
2100 switch ((insn >> 6) & 3) {
2101 case 0:
da6b5335 2102 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2103 break;
2104 case 1:
da6b5335 2105 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 2:
da6b5335 2108 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2109 break;
18c9b560 2110 }
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 break;
2115 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2116 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2117 return 1;
da6b5335 2118 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2119 tmp2 = tcg_temp_new_i32();
da6b5335 2120 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2121 switch ((insn >> 22) & 3) {
2122 case 0:
2123 for (i = 0; i < 7; i ++) {
da6b5335
FN
2124 tcg_gen_shli_i32(tmp2, tmp2, 4);
2125 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2126 }
2127 break;
2128 case 1:
2129 for (i = 0; i < 3; i ++) {
da6b5335
FN
2130 tcg_gen_shli_i32(tmp2, tmp2, 8);
2131 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2132 }
2133 break;
2134 case 2:
da6b5335
FN
2135 tcg_gen_shli_i32(tmp2, tmp2, 16);
2136 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2137 break;
18c9b560 2138 }
da6b5335 2139 gen_set_nzcv(tmp);
7d1b0095
PM
2140 tcg_temp_free_i32(tmp2);
2141 tcg_temp_free_i32(tmp);
18c9b560
AZ
2142 break;
2143 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2144 wrd = (insn >> 12) & 0xf;
2145 rd0 = (insn >> 16) & 0xf;
2146 gen_op_iwmmxt_movq_M0_wRn(rd0);
2147 switch ((insn >> 22) & 3) {
2148 case 0:
e677137d 2149 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2150 break;
2151 case 1:
e677137d 2152 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2153 break;
2154 case 2:
e677137d 2155 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2156 break;
2157 case 3:
2158 return 1;
2159 }
2160 gen_op_iwmmxt_movq_wRn_M0(wrd);
2161 gen_op_iwmmxt_set_mup();
2162 break;
2163 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2164 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2165 return 1;
da6b5335 2166 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2167 tmp2 = tcg_temp_new_i32();
da6b5335 2168 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2169 switch ((insn >> 22) & 3) {
2170 case 0:
2171 for (i = 0; i < 7; i ++) {
da6b5335
FN
2172 tcg_gen_shli_i32(tmp2, tmp2, 4);
2173 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2174 }
2175 break;
2176 case 1:
2177 for (i = 0; i < 3; i ++) {
da6b5335
FN
2178 tcg_gen_shli_i32(tmp2, tmp2, 8);
2179 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2180 }
2181 break;
2182 case 2:
da6b5335
FN
2183 tcg_gen_shli_i32(tmp2, tmp2, 16);
2184 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2185 break;
18c9b560 2186 }
da6b5335 2187 gen_set_nzcv(tmp);
7d1b0095
PM
2188 tcg_temp_free_i32(tmp2);
2189 tcg_temp_free_i32(tmp);
18c9b560
AZ
2190 break;
2191 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2192 rd = (insn >> 12) & 0xf;
2193 rd0 = (insn >> 16) & 0xf;
da6b5335 2194 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2195 return 1;
2196 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2197 tmp = tcg_temp_new_i32();
18c9b560
AZ
2198 switch ((insn >> 22) & 3) {
2199 case 0:
da6b5335 2200 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2201 break;
2202 case 1:
da6b5335 2203 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2204 break;
2205 case 2:
da6b5335 2206 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2207 break;
18c9b560 2208 }
da6b5335 2209 store_reg(s, rd, tmp);
18c9b560
AZ
2210 break;
2211 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2212 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2213 wrd = (insn >> 12) & 0xf;
2214 rd0 = (insn >> 16) & 0xf;
2215 rd1 = (insn >> 0) & 0xf;
2216 gen_op_iwmmxt_movq_M0_wRn(rd0);
2217 switch ((insn >> 22) & 3) {
2218 case 0:
2219 if (insn & (1 << 21))
2220 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2223 break;
2224 case 1:
2225 if (insn & (1 << 21))
2226 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2227 else
2228 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2229 break;
2230 case 2:
2231 if (insn & (1 << 21))
2232 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2233 else
2234 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2235 break;
2236 case 3:
2237 return 1;
2238 }
2239 gen_op_iwmmxt_movq_wRn_M0(wrd);
2240 gen_op_iwmmxt_set_mup();
2241 gen_op_iwmmxt_set_cup();
2242 break;
2243 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2244 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2245 wrd = (insn >> 12) & 0xf;
2246 rd0 = (insn >> 16) & 0xf;
2247 gen_op_iwmmxt_movq_M0_wRn(rd0);
2248 switch ((insn >> 22) & 3) {
2249 case 0:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_unpacklsb_M0();
2252 else
2253 gen_op_iwmmxt_unpacklub_M0();
2254 break;
2255 case 1:
2256 if (insn & (1 << 21))
2257 gen_op_iwmmxt_unpacklsw_M0();
2258 else
2259 gen_op_iwmmxt_unpackluw_M0();
2260 break;
2261 case 2:
2262 if (insn & (1 << 21))
2263 gen_op_iwmmxt_unpacklsl_M0();
2264 else
2265 gen_op_iwmmxt_unpacklul_M0();
2266 break;
2267 case 3:
2268 return 1;
2269 }
2270 gen_op_iwmmxt_movq_wRn_M0(wrd);
2271 gen_op_iwmmxt_set_mup();
2272 gen_op_iwmmxt_set_cup();
2273 break;
2274 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2275 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 gen_op_iwmmxt_movq_M0_wRn(rd0);
2279 switch ((insn >> 22) & 3) {
2280 case 0:
2281 if (insn & (1 << 21))
2282 gen_op_iwmmxt_unpackhsb_M0();
2283 else
2284 gen_op_iwmmxt_unpackhub_M0();
2285 break;
2286 case 1:
2287 if (insn & (1 << 21))
2288 gen_op_iwmmxt_unpackhsw_M0();
2289 else
2290 gen_op_iwmmxt_unpackhuw_M0();
2291 break;
2292 case 2:
2293 if (insn & (1 << 21))
2294 gen_op_iwmmxt_unpackhsl_M0();
2295 else
2296 gen_op_iwmmxt_unpackhul_M0();
2297 break;
2298 case 3:
2299 return 1;
2300 }
2301 gen_op_iwmmxt_movq_wRn_M0(wrd);
2302 gen_op_iwmmxt_set_mup();
2303 gen_op_iwmmxt_set_cup();
2304 break;
2305 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2306 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2307 if (((insn >> 22) & 3) == 0)
2308 return 1;
18c9b560
AZ
2309 wrd = (insn >> 12) & 0xf;
2310 rd0 = (insn >> 16) & 0xf;
2311 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2312 tmp = tcg_temp_new_i32();
da6b5335 2313 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2314 tcg_temp_free_i32(tmp);
18c9b560 2315 return 1;
da6b5335 2316 }
18c9b560 2317 switch ((insn >> 22) & 3) {
18c9b560 2318 case 1:
477955bd 2319 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2320 break;
2321 case 2:
477955bd 2322 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2323 break;
2324 case 3:
477955bd 2325 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2326 break;
2327 }
7d1b0095 2328 tcg_temp_free_i32(tmp);
18c9b560
AZ
2329 gen_op_iwmmxt_movq_wRn_M0(wrd);
2330 gen_op_iwmmxt_set_mup();
2331 gen_op_iwmmxt_set_cup();
2332 break;
2333 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2334 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2335 if (((insn >> 22) & 3) == 0)
2336 return 1;
18c9b560
AZ
2337 wrd = (insn >> 12) & 0xf;
2338 rd0 = (insn >> 16) & 0xf;
2339 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2340 tmp = tcg_temp_new_i32();
da6b5335 2341 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2342 tcg_temp_free_i32(tmp);
18c9b560 2343 return 1;
da6b5335 2344 }
18c9b560 2345 switch ((insn >> 22) & 3) {
18c9b560 2346 case 1:
477955bd 2347 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2348 break;
2349 case 2:
477955bd 2350 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2351 break;
2352 case 3:
477955bd 2353 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2354 break;
2355 }
7d1b0095 2356 tcg_temp_free_i32(tmp);
18c9b560
AZ
2357 gen_op_iwmmxt_movq_wRn_M0(wrd);
2358 gen_op_iwmmxt_set_mup();
2359 gen_op_iwmmxt_set_cup();
2360 break;
2361 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2362 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2363 if (((insn >> 22) & 3) == 0)
2364 return 1;
18c9b560
AZ
2365 wrd = (insn >> 12) & 0xf;
2366 rd0 = (insn >> 16) & 0xf;
2367 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2368 tmp = tcg_temp_new_i32();
da6b5335 2369 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2370 tcg_temp_free_i32(tmp);
18c9b560 2371 return 1;
da6b5335 2372 }
18c9b560 2373 switch ((insn >> 22) & 3) {
18c9b560 2374 case 1:
477955bd 2375 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2376 break;
2377 case 2:
477955bd 2378 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2379 break;
2380 case 3:
477955bd 2381 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2382 break;
2383 }
7d1b0095 2384 tcg_temp_free_i32(tmp);
18c9b560
AZ
2385 gen_op_iwmmxt_movq_wRn_M0(wrd);
2386 gen_op_iwmmxt_set_mup();
2387 gen_op_iwmmxt_set_cup();
2388 break;
2389 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2390 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2391 if (((insn >> 22) & 3) == 0)
2392 return 1;
18c9b560
AZ
2393 wrd = (insn >> 12) & 0xf;
2394 rd0 = (insn >> 16) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2396 tmp = tcg_temp_new_i32();
18c9b560 2397 switch ((insn >> 22) & 3) {
18c9b560 2398 case 1:
da6b5335 2399 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2400 tcg_temp_free_i32(tmp);
18c9b560 2401 return 1;
da6b5335 2402 }
477955bd 2403 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2404 break;
2405 case 2:
da6b5335 2406 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2407 tcg_temp_free_i32(tmp);
18c9b560 2408 return 1;
da6b5335 2409 }
477955bd 2410 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2411 break;
2412 case 3:
da6b5335 2413 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2414 tcg_temp_free_i32(tmp);
18c9b560 2415 return 1;
da6b5335 2416 }
477955bd 2417 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2418 break;
2419 }
7d1b0095 2420 tcg_temp_free_i32(tmp);
18c9b560
AZ
2421 gen_op_iwmmxt_movq_wRn_M0(wrd);
2422 gen_op_iwmmxt_set_mup();
2423 gen_op_iwmmxt_set_cup();
2424 break;
2425 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2426 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2427 wrd = (insn >> 12) & 0xf;
2428 rd0 = (insn >> 16) & 0xf;
2429 rd1 = (insn >> 0) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
2431 switch ((insn >> 22) & 3) {
2432 case 0:
2433 if (insn & (1 << 21))
2434 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2435 else
2436 gen_op_iwmmxt_minub_M0_wRn(rd1);
2437 break;
2438 case 1:
2439 if (insn & (1 << 21))
2440 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2441 else
2442 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2443 break;
2444 case 2:
2445 if (insn & (1 << 21))
2446 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2447 else
2448 gen_op_iwmmxt_minul_M0_wRn(rd1);
2449 break;
2450 case 3:
2451 return 1;
2452 }
2453 gen_op_iwmmxt_movq_wRn_M0(wrd);
2454 gen_op_iwmmxt_set_mup();
2455 break;
2456 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2457 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2458 wrd = (insn >> 12) & 0xf;
2459 rd0 = (insn >> 16) & 0xf;
2460 rd1 = (insn >> 0) & 0xf;
2461 gen_op_iwmmxt_movq_M0_wRn(rd0);
2462 switch ((insn >> 22) & 3) {
2463 case 0:
2464 if (insn & (1 << 21))
2465 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2466 else
2467 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2468 break;
2469 case 1:
2470 if (insn & (1 << 21))
2471 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2472 else
2473 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2474 break;
2475 case 2:
2476 if (insn & (1 << 21))
2477 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2478 else
2479 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2480 break;
2481 case 3:
2482 return 1;
2483 }
2484 gen_op_iwmmxt_movq_wRn_M0(wrd);
2485 gen_op_iwmmxt_set_mup();
2486 break;
2487 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2488 case 0x402: case 0x502: case 0x602: case 0x702:
2489 wrd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 rd1 = (insn >> 0) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2493 tmp = tcg_const_i32((insn >> 20) & 3);
2494 iwmmxt_load_reg(cpu_V1, rd1);
2495 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2496 tcg_temp_free_i32(tmp);
18c9b560
AZ
2497 gen_op_iwmmxt_movq_wRn_M0(wrd);
2498 gen_op_iwmmxt_set_mup();
2499 break;
2500 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2501 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2502 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2503 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2504 wrd = (insn >> 12) & 0xf;
2505 rd0 = (insn >> 16) & 0xf;
2506 rd1 = (insn >> 0) & 0xf;
2507 gen_op_iwmmxt_movq_M0_wRn(rd0);
2508 switch ((insn >> 20) & 0xf) {
2509 case 0x0:
2510 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2511 break;
2512 case 0x1:
2513 gen_op_iwmmxt_subub_M0_wRn(rd1);
2514 break;
2515 case 0x3:
2516 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2517 break;
2518 case 0x4:
2519 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2520 break;
2521 case 0x5:
2522 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2523 break;
2524 case 0x7:
2525 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2526 break;
2527 case 0x8:
2528 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2529 break;
2530 case 0x9:
2531 gen_op_iwmmxt_subul_M0_wRn(rd1);
2532 break;
2533 case 0xb:
2534 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2535 break;
2536 default:
2537 return 1;
2538 }
2539 gen_op_iwmmxt_movq_wRn_M0(wrd);
2540 gen_op_iwmmxt_set_mup();
2541 gen_op_iwmmxt_set_cup();
2542 break;
2543 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2544 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2545 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2546 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2547 wrd = (insn >> 12) & 0xf;
2548 rd0 = (insn >> 16) & 0xf;
2549 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2550 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2551 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2552 tcg_temp_free_i32(tmp);
18c9b560
AZ
2553 gen_op_iwmmxt_movq_wRn_M0(wrd);
2554 gen_op_iwmmxt_set_mup();
2555 gen_op_iwmmxt_set_cup();
2556 break;
2557 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2558 case 0x418: case 0x518: case 0x618: case 0x718:
2559 case 0x818: case 0x918: case 0xa18: case 0xb18:
2560 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2561 wrd = (insn >> 12) & 0xf;
2562 rd0 = (insn >> 16) & 0xf;
2563 rd1 = (insn >> 0) & 0xf;
2564 gen_op_iwmmxt_movq_M0_wRn(rd0);
2565 switch ((insn >> 20) & 0xf) {
2566 case 0x0:
2567 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2568 break;
2569 case 0x1:
2570 gen_op_iwmmxt_addub_M0_wRn(rd1);
2571 break;
2572 case 0x3:
2573 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2574 break;
2575 case 0x4:
2576 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2577 break;
2578 case 0x5:
2579 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2580 break;
2581 case 0x7:
2582 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2583 break;
2584 case 0x8:
2585 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2586 break;
2587 case 0x9:
2588 gen_op_iwmmxt_addul_M0_wRn(rd1);
2589 break;
2590 case 0xb:
2591 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2592 break;
2593 default:
2594 return 1;
2595 }
2596 gen_op_iwmmxt_movq_wRn_M0(wrd);
2597 gen_op_iwmmxt_set_mup();
2598 gen_op_iwmmxt_set_cup();
2599 break;
2600 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2601 case 0x408: case 0x508: case 0x608: case 0x708:
2602 case 0x808: case 0x908: case 0xa08: case 0xb08:
2603 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2604 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2605 return 1;
18c9b560
AZ
2606 wrd = (insn >> 12) & 0xf;
2607 rd0 = (insn >> 16) & 0xf;
2608 rd1 = (insn >> 0) & 0xf;
2609 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2610 switch ((insn >> 22) & 3) {
18c9b560
AZ
2611 case 1:
2612 if (insn & (1 << 21))
2613 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2614 else
2615 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2616 break;
2617 case 2:
2618 if (insn & (1 << 21))
2619 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2620 else
2621 gen_op_iwmmxt_packul_M0_wRn(rd1);
2622 break;
2623 case 3:
2624 if (insn & (1 << 21))
2625 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2626 else
2627 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2628 break;
2629 }
2630 gen_op_iwmmxt_movq_wRn_M0(wrd);
2631 gen_op_iwmmxt_set_mup();
2632 gen_op_iwmmxt_set_cup();
2633 break;
2634 case 0x201: case 0x203: case 0x205: case 0x207:
2635 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2636 case 0x211: case 0x213: case 0x215: case 0x217:
2637 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2638 wrd = (insn >> 5) & 0xf;
2639 rd0 = (insn >> 12) & 0xf;
2640 rd1 = (insn >> 0) & 0xf;
2641 if (rd0 == 0xf || rd1 == 0xf)
2642 return 1;
2643 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2644 tmp = load_reg(s, rd0);
2645 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2646 switch ((insn >> 16) & 0xf) {
2647 case 0x0: /* TMIA */
da6b5335 2648 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2649 break;
2650 case 0x8: /* TMIAPH */
da6b5335 2651 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2652 break;
2653 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2654 if (insn & (1 << 16))
da6b5335 2655 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2656 if (insn & (1 << 17))
da6b5335
FN
2657 tcg_gen_shri_i32(tmp2, tmp2, 16);
2658 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2659 break;
2660 default:
7d1b0095
PM
2661 tcg_temp_free_i32(tmp2);
2662 tcg_temp_free_i32(tmp);
18c9b560
AZ
2663 return 1;
2664 }
7d1b0095
PM
2665 tcg_temp_free_i32(tmp2);
2666 tcg_temp_free_i32(tmp);
18c9b560
AZ
2667 gen_op_iwmmxt_movq_wRn_M0(wrd);
2668 gen_op_iwmmxt_set_mup();
2669 break;
2670 default:
2671 return 1;
2672 }
2673
2674 return 0;
2675}
2676
a1c7273b 2677/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2678 (ie. an undefined instruction). */
7dcc1f89 2679static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2680{
2681 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2682 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2683
2684 if ((insn & 0x0ff00f10) == 0x0e200010) {
2685 /* Multiply with Internal Accumulate Format */
2686 rd0 = (insn >> 12) & 0xf;
2687 rd1 = insn & 0xf;
2688 acc = (insn >> 5) & 7;
2689
2690 if (acc != 0)
2691 return 1;
2692
3a554c0f
FN
2693 tmp = load_reg(s, rd0);
2694 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2695 switch ((insn >> 16) & 0xf) {
2696 case 0x0: /* MIA */
3a554c0f 2697 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2698 break;
2699 case 0x8: /* MIAPH */
3a554c0f 2700 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2701 break;
2702 case 0xc: /* MIABB */
2703 case 0xd: /* MIABT */
2704 case 0xe: /* MIATB */
2705 case 0xf: /* MIATT */
18c9b560 2706 if (insn & (1 << 16))
3a554c0f 2707 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2708 if (insn & (1 << 17))
3a554c0f
FN
2709 tcg_gen_shri_i32(tmp2, tmp2, 16);
2710 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2711 break;
2712 default:
2713 return 1;
2714 }
7d1b0095
PM
2715 tcg_temp_free_i32(tmp2);
2716 tcg_temp_free_i32(tmp);
18c9b560
AZ
2717
2718 gen_op_iwmmxt_movq_wRn_M0(acc);
2719 return 0;
2720 }
2721
2722 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2723 /* Internal Accumulator Access Format */
2724 rdhi = (insn >> 16) & 0xf;
2725 rdlo = (insn >> 12) & 0xf;
2726 acc = insn & 7;
2727
2728 if (acc != 0)
2729 return 1;
2730
2731 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2732 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2733 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2734 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2735 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2736 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2737 } else { /* MAR */
3a554c0f
FN
2738 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2739 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2740 }
2741 return 0;
2742 }
2743
2744 return 1;
2745}
2746
9ee6e8bb
PB
2747#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2748#define VFP_SREG(insn, bigbit, smallbit) \
2749 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2750#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2751 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2752 reg = (((insn) >> (bigbit)) & 0x0f) \
2753 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2754 } else { \
2755 if (insn & (1 << (smallbit))) \
2756 return 1; \
2757 reg = ((insn) >> (bigbit)) & 0x0f; \
2758 }} while (0)
2759
2760#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2761#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2762#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2763#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2764#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2765#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2766
4373f3ce 2767/* Move between integer and VFP cores. */
39d5492a 2768static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2769{
39d5492a 2770 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2771 tcg_gen_mov_i32(tmp, cpu_F0s);
2772 return tmp;
2773}
2774
39d5492a 2775static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2776{
2777 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2778 tcg_temp_free_i32(tmp);
4373f3ce
PB
2779}
2780
39d5492a 2781static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2782{
39d5492a 2783 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2784 if (shift)
2785 tcg_gen_shri_i32(var, var, shift);
86831435 2786 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2787 tcg_gen_shli_i32(tmp, var, 8);
2788 tcg_gen_or_i32(var, var, tmp);
2789 tcg_gen_shli_i32(tmp, var, 16);
2790 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2791 tcg_temp_free_i32(tmp);
ad69471c
PB
2792}
2793
39d5492a 2794static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2795{
39d5492a 2796 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2797 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2798 tcg_gen_shli_i32(tmp, var, 16);
2799 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2800 tcg_temp_free_i32(tmp);
ad69471c
PB
2801}
2802
39d5492a 2803static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2804{
39d5492a 2805 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2806 tcg_gen_andi_i32(var, var, 0xffff0000);
2807 tcg_gen_shri_i32(tmp, var, 16);
2808 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2809 tcg_temp_free_i32(tmp);
ad69471c
PB
2810}
2811
39d5492a 2812static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2813{
2814 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2815 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2816 switch (size) {
2817 case 0:
12dcc321 2818 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2819 gen_neon_dup_u8(tmp, 0);
2820 break;
2821 case 1:
12dcc321 2822 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2823 gen_neon_dup_low16(tmp);
2824 break;
2825 case 2:
12dcc321 2826 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2827 break;
2828 default: /* Avoid compiler warnings. */
2829 abort();
2830 }
2831 return tmp;
2832}
2833
04731fb5
WN
2834static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2835 uint32_t dp)
2836{
2837 uint32_t cc = extract32(insn, 20, 2);
2838
2839 if (dp) {
2840 TCGv_i64 frn, frm, dest;
2841 TCGv_i64 tmp, zero, zf, nf, vf;
2842
2843 zero = tcg_const_i64(0);
2844
2845 frn = tcg_temp_new_i64();
2846 frm = tcg_temp_new_i64();
2847 dest = tcg_temp_new_i64();
2848
2849 zf = tcg_temp_new_i64();
2850 nf = tcg_temp_new_i64();
2851 vf = tcg_temp_new_i64();
2852
2853 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2854 tcg_gen_ext_i32_i64(nf, cpu_NF);
2855 tcg_gen_ext_i32_i64(vf, cpu_VF);
2856
2857 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2858 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2859 switch (cc) {
2860 case 0: /* eq: Z */
2861 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2862 frn, frm);
2863 break;
2864 case 1: /* vs: V */
2865 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2866 frn, frm);
2867 break;
2868 case 2: /* ge: N == V -> N ^ V == 0 */
2869 tmp = tcg_temp_new_i64();
2870 tcg_gen_xor_i64(tmp, vf, nf);
2871 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2872 frn, frm);
2873 tcg_temp_free_i64(tmp);
2874 break;
2875 case 3: /* gt: !Z && N == V */
2876 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2877 frn, frm);
2878 tmp = tcg_temp_new_i64();
2879 tcg_gen_xor_i64(tmp, vf, nf);
2880 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2881 dest, frm);
2882 tcg_temp_free_i64(tmp);
2883 break;
2884 }
2885 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2886 tcg_temp_free_i64(frn);
2887 tcg_temp_free_i64(frm);
2888 tcg_temp_free_i64(dest);
2889
2890 tcg_temp_free_i64(zf);
2891 tcg_temp_free_i64(nf);
2892 tcg_temp_free_i64(vf);
2893
2894 tcg_temp_free_i64(zero);
2895 } else {
2896 TCGv_i32 frn, frm, dest;
2897 TCGv_i32 tmp, zero;
2898
2899 zero = tcg_const_i32(0);
2900
2901 frn = tcg_temp_new_i32();
2902 frm = tcg_temp_new_i32();
2903 dest = tcg_temp_new_i32();
2904 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2905 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2906 switch (cc) {
2907 case 0: /* eq: Z */
2908 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2909 frn, frm);
2910 break;
2911 case 1: /* vs: V */
2912 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2913 frn, frm);
2914 break;
2915 case 2: /* ge: N == V -> N ^ V == 0 */
2916 tmp = tcg_temp_new_i32();
2917 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2918 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2919 frn, frm);
2920 tcg_temp_free_i32(tmp);
2921 break;
2922 case 3: /* gt: !Z && N == V */
2923 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2924 frn, frm);
2925 tmp = tcg_temp_new_i32();
2926 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2927 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2928 dest, frm);
2929 tcg_temp_free_i32(tmp);
2930 break;
2931 }
2932 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2933 tcg_temp_free_i32(frn);
2934 tcg_temp_free_i32(frm);
2935 tcg_temp_free_i32(dest);
2936
2937 tcg_temp_free_i32(zero);
2938 }
2939
2940 return 0;
2941}
2942
40cfacdd
WN
2943static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2944 uint32_t rm, uint32_t dp)
2945{
2946 uint32_t vmin = extract32(insn, 6, 1);
2947 TCGv_ptr fpst = get_fpstatus_ptr(0);
2948
2949 if (dp) {
2950 TCGv_i64 frn, frm, dest;
2951
2952 frn = tcg_temp_new_i64();
2953 frm = tcg_temp_new_i64();
2954 dest = tcg_temp_new_i64();
2955
2956 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2957 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2958 if (vmin) {
f71a2ae5 2959 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2960 } else {
f71a2ae5 2961 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2962 }
2963 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2964 tcg_temp_free_i64(frn);
2965 tcg_temp_free_i64(frm);
2966 tcg_temp_free_i64(dest);
2967 } else {
2968 TCGv_i32 frn, frm, dest;
2969
2970 frn = tcg_temp_new_i32();
2971 frm = tcg_temp_new_i32();
2972 dest = tcg_temp_new_i32();
2973
2974 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2975 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2976 if (vmin) {
f71a2ae5 2977 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2978 } else {
f71a2ae5 2979 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2980 }
2981 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2982 tcg_temp_free_i32(frn);
2983 tcg_temp_free_i32(frm);
2984 tcg_temp_free_i32(dest);
2985 }
2986
2987 tcg_temp_free_ptr(fpst);
2988 return 0;
2989}
2990
7655f39b
WN
2991static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2992 int rounding)
2993{
2994 TCGv_ptr fpst = get_fpstatus_ptr(0);
2995 TCGv_i32 tcg_rmode;
2996
2997 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2998 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2999
3000 if (dp) {
3001 TCGv_i64 tcg_op;
3002 TCGv_i64 tcg_res;
3003 tcg_op = tcg_temp_new_i64();
3004 tcg_res = tcg_temp_new_i64();
3005 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3006 gen_helper_rintd(tcg_res, tcg_op, fpst);
3007 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3008 tcg_temp_free_i64(tcg_op);
3009 tcg_temp_free_i64(tcg_res);
3010 } else {
3011 TCGv_i32 tcg_op;
3012 TCGv_i32 tcg_res;
3013 tcg_op = tcg_temp_new_i32();
3014 tcg_res = tcg_temp_new_i32();
3015 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3016 gen_helper_rints(tcg_res, tcg_op, fpst);
3017 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3018 tcg_temp_free_i32(tcg_op);
3019 tcg_temp_free_i32(tcg_res);
3020 }
3021
3022 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3023 tcg_temp_free_i32(tcg_rmode);
3024
3025 tcg_temp_free_ptr(fpst);
3026 return 0;
3027}
3028
c9975a83
WN
3029static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3030 int rounding)
3031{
3032 bool is_signed = extract32(insn, 7, 1);
3033 TCGv_ptr fpst = get_fpstatus_ptr(0);
3034 TCGv_i32 tcg_rmode, tcg_shift;
3035
3036 tcg_shift = tcg_const_i32(0);
3037
3038 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3039 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3040
3041 if (dp) {
3042 TCGv_i64 tcg_double, tcg_res;
3043 TCGv_i32 tcg_tmp;
3044 /* Rd is encoded as a single precision register even when the source
3045 * is double precision.
3046 */
3047 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3048 tcg_double = tcg_temp_new_i64();
3049 tcg_res = tcg_temp_new_i64();
3050 tcg_tmp = tcg_temp_new_i32();
3051 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3052 if (is_signed) {
3053 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3054 } else {
3055 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3056 }
ecc7b3aa 3057 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3058 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3059 tcg_temp_free_i32(tcg_tmp);
3060 tcg_temp_free_i64(tcg_res);
3061 tcg_temp_free_i64(tcg_double);
3062 } else {
3063 TCGv_i32 tcg_single, tcg_res;
3064 tcg_single = tcg_temp_new_i32();
3065 tcg_res = tcg_temp_new_i32();
3066 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3067 if (is_signed) {
3068 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3069 } else {
3070 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3071 }
3072 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3073 tcg_temp_free_i32(tcg_res);
3074 tcg_temp_free_i32(tcg_single);
3075 }
3076
3077 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3078 tcg_temp_free_i32(tcg_rmode);
3079
3080 tcg_temp_free_i32(tcg_shift);
3081
3082 tcg_temp_free_ptr(fpst);
3083
3084 return 0;
3085}
7655f39b
WN
3086
3087/* Table for converting the most common AArch32 encoding of
3088 * rounding mode to arm_fprounding order (which matches the
3089 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3090 */
3091static const uint8_t fp_decode_rm[] = {
3092 FPROUNDING_TIEAWAY,
3093 FPROUNDING_TIEEVEN,
3094 FPROUNDING_POSINF,
3095 FPROUNDING_NEGINF,
3096};
3097
7dcc1f89 3098static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3099{
3100 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3101
d614a513 3102 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3103 return 1;
3104 }
3105
3106 if (dp) {
3107 VFP_DREG_D(rd, insn);
3108 VFP_DREG_N(rn, insn);
3109 VFP_DREG_M(rm, insn);
3110 } else {
3111 rd = VFP_SREG_D(insn);
3112 rn = VFP_SREG_N(insn);
3113 rm = VFP_SREG_M(insn);
3114 }
3115
3116 if ((insn & 0x0f800e50) == 0x0e000a00) {
3117 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3118 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3119 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3120 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3121 /* VRINTA, VRINTN, VRINTP, VRINTM */
3122 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3123 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3124 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3125 /* VCVTA, VCVTN, VCVTP, VCVTM */
3126 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3127 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3128 }
3129 return 1;
3130}
3131
a1c7273b 3132/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3133 (ie. an undefined instruction). */
7dcc1f89 3134static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3135{
3136 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3137 int dp, veclen;
39d5492a
PM
3138 TCGv_i32 addr;
3139 TCGv_i32 tmp;
3140 TCGv_i32 tmp2;
b7bcbe95 3141
d614a513 3142 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3143 return 1;
d614a513 3144 }
40f137e1 3145
2c7ffc41
PM
3146 /* FIXME: this access check should not take precedence over UNDEF
3147 * for invalid encodings; we will generate incorrect syndrome information
3148 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3149 */
9dbbc748 3150 if (s->fp_excp_el) {
2c7ffc41 3151 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3152 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3153 return 0;
3154 }
3155
5df8bac1 3156 if (!s->vfp_enabled) {
9ee6e8bb 3157 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3158 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3159 return 1;
3160 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3161 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3162 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3163 return 1;
a50c0f51 3164 }
40f137e1 3165 }
6a57f3eb
WN
3166
3167 if (extract32(insn, 28, 4) == 0xf) {
3168 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3169 * only used in v8 and above.
3170 */
7dcc1f89 3171 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3172 }
3173
b7bcbe95
FB
3174 dp = ((insn & 0xf00) == 0xb00);
3175 switch ((insn >> 24) & 0xf) {
3176 case 0xe:
3177 if (insn & (1 << 4)) {
3178 /* single register transfer */
b7bcbe95
FB
3179 rd = (insn >> 12) & 0xf;
3180 if (dp) {
9ee6e8bb
PB
3181 int size;
3182 int pass;
3183
3184 VFP_DREG_N(rn, insn);
3185 if (insn & 0xf)
b7bcbe95 3186 return 1;
9ee6e8bb 3187 if (insn & 0x00c00060
d614a513 3188 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3189 return 1;
d614a513 3190 }
9ee6e8bb
PB
3191
3192 pass = (insn >> 21) & 1;
3193 if (insn & (1 << 22)) {
3194 size = 0;
3195 offset = ((insn >> 5) & 3) * 8;
3196 } else if (insn & (1 << 5)) {
3197 size = 1;
3198 offset = (insn & (1 << 6)) ? 16 : 0;
3199 } else {
3200 size = 2;
3201 offset = 0;
3202 }
18c9b560 3203 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3204 /* vfp->arm */
ad69471c 3205 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3206 switch (size) {
3207 case 0:
9ee6e8bb 3208 if (offset)
ad69471c 3209 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3210 if (insn & (1 << 23))
ad69471c 3211 gen_uxtb(tmp);
9ee6e8bb 3212 else
ad69471c 3213 gen_sxtb(tmp);
9ee6e8bb
PB
3214 break;
3215 case 1:
9ee6e8bb
PB
3216 if (insn & (1 << 23)) {
3217 if (offset) {
ad69471c 3218 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3219 } else {
ad69471c 3220 gen_uxth(tmp);
9ee6e8bb
PB
3221 }
3222 } else {
3223 if (offset) {
ad69471c 3224 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3225 } else {
ad69471c 3226 gen_sxth(tmp);
9ee6e8bb
PB
3227 }
3228 }
3229 break;
3230 case 2:
9ee6e8bb
PB
3231 break;
3232 }
ad69471c 3233 store_reg(s, rd, tmp);
b7bcbe95
FB
3234 } else {
3235 /* arm->vfp */
ad69471c 3236 tmp = load_reg(s, rd);
9ee6e8bb
PB
3237 if (insn & (1 << 23)) {
3238 /* VDUP */
3239 if (size == 0) {
ad69471c 3240 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3241 } else if (size == 1) {
ad69471c 3242 gen_neon_dup_low16(tmp);
9ee6e8bb 3243 }
cbbccffc 3244 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3245 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3246 tcg_gen_mov_i32(tmp2, tmp);
3247 neon_store_reg(rn, n, tmp2);
3248 }
3249 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3250 } else {
3251 /* VMOV */
3252 switch (size) {
3253 case 0:
ad69471c 3254 tmp2 = neon_load_reg(rn, pass);
d593c48e 3255 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3256 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3257 break;
3258 case 1:
ad69471c 3259 tmp2 = neon_load_reg(rn, pass);
d593c48e 3260 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3261 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3262 break;
3263 case 2:
9ee6e8bb
PB
3264 break;
3265 }
ad69471c 3266 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3267 }
b7bcbe95 3268 }
9ee6e8bb
PB
3269 } else { /* !dp */
3270 if ((insn & 0x6f) != 0x00)
3271 return 1;
3272 rn = VFP_SREG_N(insn);
18c9b560 3273 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3274 /* vfp->arm */
3275 if (insn & (1 << 21)) {
3276 /* system register */
40f137e1 3277 rn >>= 1;
9ee6e8bb 3278
b7bcbe95 3279 switch (rn) {
40f137e1 3280 case ARM_VFP_FPSID:
4373f3ce 3281 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3282 VFP3 restricts all id registers to privileged
3283 accesses. */
3284 if (IS_USER(s)
d614a513 3285 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3286 return 1;
d614a513 3287 }
4373f3ce 3288 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3289 break;
40f137e1 3290 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3291 if (IS_USER(s))
3292 return 1;
4373f3ce 3293 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3294 break;
40f137e1
PB
3295 case ARM_VFP_FPINST:
3296 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3297 /* Not present in VFP3. */
3298 if (IS_USER(s)
d614a513 3299 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3300 return 1;
d614a513 3301 }
4373f3ce 3302 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3303 break;
40f137e1 3304 case ARM_VFP_FPSCR:
601d70b9 3305 if (rd == 15) {
4373f3ce
PB
3306 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3307 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3308 } else {
7d1b0095 3309 tmp = tcg_temp_new_i32();
4373f3ce
PB
3310 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3311 }
b7bcbe95 3312 break;
a50c0f51 3313 case ARM_VFP_MVFR2:
d614a513 3314 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3315 return 1;
3316 }
3317 /* fall through */
9ee6e8bb
PB
3318 case ARM_VFP_MVFR0:
3319 case ARM_VFP_MVFR1:
3320 if (IS_USER(s)
d614a513 3321 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3322 return 1;
d614a513 3323 }
4373f3ce 3324 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3325 break;
b7bcbe95
FB
3326 default:
3327 return 1;
3328 }
3329 } else {
3330 gen_mov_F0_vreg(0, rn);
4373f3ce 3331 tmp = gen_vfp_mrs();
b7bcbe95
FB
3332 }
3333 if (rd == 15) {
b5ff1b31 3334 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3335 gen_set_nzcv(tmp);
7d1b0095 3336 tcg_temp_free_i32(tmp);
4373f3ce
PB
3337 } else {
3338 store_reg(s, rd, tmp);
3339 }
b7bcbe95
FB
3340 } else {
3341 /* arm->vfp */
b7bcbe95 3342 if (insn & (1 << 21)) {
40f137e1 3343 rn >>= 1;
b7bcbe95
FB
3344 /* system register */
3345 switch (rn) {
40f137e1 3346 case ARM_VFP_FPSID:
9ee6e8bb
PB
3347 case ARM_VFP_MVFR0:
3348 case ARM_VFP_MVFR1:
b7bcbe95
FB
3349 /* Writes are ignored. */
3350 break;
40f137e1 3351 case ARM_VFP_FPSCR:
e4c1cfa5 3352 tmp = load_reg(s, rd);
4373f3ce 3353 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3354 tcg_temp_free_i32(tmp);
b5ff1b31 3355 gen_lookup_tb(s);
b7bcbe95 3356 break;
40f137e1 3357 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3358 if (IS_USER(s))
3359 return 1;
71b3c3de
JR
3360 /* TODO: VFP subarchitecture support.
3361 * For now, keep the EN bit only */
e4c1cfa5 3362 tmp = load_reg(s, rd);
71b3c3de 3363 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3364 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3365 gen_lookup_tb(s);
3366 break;
3367 case ARM_VFP_FPINST:
3368 case ARM_VFP_FPINST2:
23adb861
PM
3369 if (IS_USER(s)) {
3370 return 1;
3371 }
e4c1cfa5 3372 tmp = load_reg(s, rd);
4373f3ce 3373 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3374 break;
b7bcbe95
FB
3375 default:
3376 return 1;
3377 }
3378 } else {
e4c1cfa5 3379 tmp = load_reg(s, rd);
4373f3ce 3380 gen_vfp_msr(tmp);
b7bcbe95
FB
3381 gen_mov_vreg_F0(0, rn);
3382 }
3383 }
3384 }
3385 } else {
3386 /* data processing */
3387 /* The opcode is in bits 23, 21, 20 and 6. */
3388 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3389 if (dp) {
3390 if (op == 15) {
3391 /* rn is opcode */
3392 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3393 } else {
3394 /* rn is register number */
9ee6e8bb 3395 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3396 }
3397
239c20c7
WN
3398 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3399 ((rn & 0x1e) == 0x6))) {
3400 /* Integer or single/half precision destination. */
9ee6e8bb 3401 rd = VFP_SREG_D(insn);
b7bcbe95 3402 } else {
9ee6e8bb 3403 VFP_DREG_D(rd, insn);
b7bcbe95 3404 }
04595bf6 3405 if (op == 15 &&
239c20c7
WN
3406 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3407 ((rn & 0x1e) == 0x4))) {
3408 /* VCVT from int or half precision is always from S reg
3409 * regardless of dp bit. VCVT with immediate frac_bits
3410 * has same format as SREG_M.
04595bf6
PM
3411 */
3412 rm = VFP_SREG_M(insn);
b7bcbe95 3413 } else {
9ee6e8bb 3414 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3415 }
3416 } else {
9ee6e8bb 3417 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3418 if (op == 15 && rn == 15) {
3419 /* Double precision destination. */
9ee6e8bb
PB
3420 VFP_DREG_D(rd, insn);
3421 } else {
3422 rd = VFP_SREG_D(insn);
3423 }
04595bf6
PM
3424 /* NB that we implicitly rely on the encoding for the frac_bits
3425 * in VCVT of fixed to float being the same as that of an SREG_M
3426 */
9ee6e8bb 3427 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3428 }
3429
69d1fc22 3430 veclen = s->vec_len;
b7bcbe95
FB
3431 if (op == 15 && rn > 3)
3432 veclen = 0;
3433
3434 /* Shut up compiler warnings. */
3435 delta_m = 0;
3436 delta_d = 0;
3437 bank_mask = 0;
3b46e624 3438
b7bcbe95
FB
3439 if (veclen > 0) {
3440 if (dp)
3441 bank_mask = 0xc;
3442 else
3443 bank_mask = 0x18;
3444
3445 /* Figure out what type of vector operation this is. */
3446 if ((rd & bank_mask) == 0) {
3447 /* scalar */
3448 veclen = 0;
3449 } else {
3450 if (dp)
69d1fc22 3451 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3452 else
69d1fc22 3453 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3454
3455 if ((rm & bank_mask) == 0) {
3456 /* mixed scalar/vector */
3457 delta_m = 0;
3458 } else {
3459 /* vector */
3460 delta_m = delta_d;
3461 }
3462 }
3463 }
3464
3465 /* Load the initial operands. */
3466 if (op == 15) {
3467 switch (rn) {
3468 case 16:
3469 case 17:
3470 /* Integer source */
3471 gen_mov_F0_vreg(0, rm);
3472 break;
3473 case 8:
3474 case 9:
3475 /* Compare */
3476 gen_mov_F0_vreg(dp, rd);
3477 gen_mov_F1_vreg(dp, rm);
3478 break;
3479 case 10:
3480 case 11:
3481 /* Compare with zero */
3482 gen_mov_F0_vreg(dp, rd);
3483 gen_vfp_F1_ld0(dp);
3484 break;
9ee6e8bb
PB
3485 case 20:
3486 case 21:
3487 case 22:
3488 case 23:
644ad806
PB
3489 case 28:
3490 case 29:
3491 case 30:
3492 case 31:
9ee6e8bb
PB
3493 /* Source and destination the same. */
3494 gen_mov_F0_vreg(dp, rd);
3495 break;
6e0c0ed1
PM
3496 case 4:
3497 case 5:
3498 case 6:
3499 case 7:
239c20c7
WN
3500 /* VCVTB, VCVTT: only present with the halfprec extension
3501 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3502 * (we choose to UNDEF)
6e0c0ed1 3503 */
d614a513
PM
3504 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3505 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3506 return 1;
3507 }
239c20c7
WN
3508 if (!extract32(rn, 1, 1)) {
3509 /* Half precision source. */
3510 gen_mov_F0_vreg(0, rm);
3511 break;
3512 }
6e0c0ed1 3513 /* Otherwise fall through */
b7bcbe95
FB
3514 default:
3515 /* One source operand. */
3516 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3517 break;
b7bcbe95
FB
3518 }
3519 } else {
3520 /* Two source operands. */
3521 gen_mov_F0_vreg(dp, rn);
3522 gen_mov_F1_vreg(dp, rm);
3523 }
3524
3525 for (;;) {
3526 /* Perform the calculation. */
3527 switch (op) {
605a6aed
PM
3528 case 0: /* VMLA: fd + (fn * fm) */
3529 /* Note that order of inputs to the add matters for NaNs */
3530 gen_vfp_F1_mul(dp);
3531 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3532 gen_vfp_add(dp);
3533 break;
605a6aed 3534 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3535 gen_vfp_mul(dp);
605a6aed
PM
3536 gen_vfp_F1_neg(dp);
3537 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3538 gen_vfp_add(dp);
3539 break;
605a6aed
PM
3540 case 2: /* VNMLS: -fd + (fn * fm) */
3541 /* Note that it isn't valid to replace (-A + B) with (B - A)
3542 * or similar plausible looking simplifications
3543 * because this will give wrong results for NaNs.
3544 */
3545 gen_vfp_F1_mul(dp);
3546 gen_mov_F0_vreg(dp, rd);
3547 gen_vfp_neg(dp);
3548 gen_vfp_add(dp);
b7bcbe95 3549 break;
605a6aed 3550 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3551 gen_vfp_mul(dp);
605a6aed
PM
3552 gen_vfp_F1_neg(dp);
3553 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3554 gen_vfp_neg(dp);
605a6aed 3555 gen_vfp_add(dp);
b7bcbe95
FB
3556 break;
3557 case 4: /* mul: fn * fm */
3558 gen_vfp_mul(dp);
3559 break;
3560 case 5: /* nmul: -(fn * fm) */
3561 gen_vfp_mul(dp);
3562 gen_vfp_neg(dp);
3563 break;
3564 case 6: /* add: fn + fm */
3565 gen_vfp_add(dp);
3566 break;
3567 case 7: /* sub: fn - fm */
3568 gen_vfp_sub(dp);
3569 break;
3570 case 8: /* div: fn / fm */
3571 gen_vfp_div(dp);
3572 break;
da97f52c
PM
3573 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3574 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3575 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3576 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3577 /* These are fused multiply-add, and must be done as one
3578 * floating point operation with no rounding between the
3579 * multiplication and addition steps.
3580 * NB that doing the negations here as separate steps is
3581 * correct : an input NaN should come out with its sign bit
3582 * flipped if it is a negated-input.
3583 */
d614a513 3584 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3585 return 1;
3586 }
3587 if (dp) {
3588 TCGv_ptr fpst;
3589 TCGv_i64 frd;
3590 if (op & 1) {
3591 /* VFNMS, VFMS */
3592 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3593 }
3594 frd = tcg_temp_new_i64();
3595 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3596 if (op & 2) {
3597 /* VFNMA, VFNMS */
3598 gen_helper_vfp_negd(frd, frd);
3599 }
3600 fpst = get_fpstatus_ptr(0);
3601 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3602 cpu_F1d, frd, fpst);
3603 tcg_temp_free_ptr(fpst);
3604 tcg_temp_free_i64(frd);
3605 } else {
3606 TCGv_ptr fpst;
3607 TCGv_i32 frd;
3608 if (op & 1) {
3609 /* VFNMS, VFMS */
3610 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3611 }
3612 frd = tcg_temp_new_i32();
3613 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3614 if (op & 2) {
3615 gen_helper_vfp_negs(frd, frd);
3616 }
3617 fpst = get_fpstatus_ptr(0);
3618 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3619 cpu_F1s, frd, fpst);
3620 tcg_temp_free_ptr(fpst);
3621 tcg_temp_free_i32(frd);
3622 }
3623 break;
9ee6e8bb 3624 case 14: /* fconst */
d614a513
PM
3625 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3626 return 1;
3627 }
9ee6e8bb
PB
3628
3629 n = (insn << 12) & 0x80000000;
3630 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3631 if (dp) {
3632 if (i & 0x40)
3633 i |= 0x3f80;
3634 else
3635 i |= 0x4000;
3636 n |= i << 16;
4373f3ce 3637 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3638 } else {
3639 if (i & 0x40)
3640 i |= 0x780;
3641 else
3642 i |= 0x800;
3643 n |= i << 19;
5b340b51 3644 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3645 }
9ee6e8bb 3646 break;
b7bcbe95
FB
3647 case 15: /* extension space */
3648 switch (rn) {
3649 case 0: /* cpy */
3650 /* no-op */
3651 break;
3652 case 1: /* abs */
3653 gen_vfp_abs(dp);
3654 break;
3655 case 2: /* neg */
3656 gen_vfp_neg(dp);
3657 break;
3658 case 3: /* sqrt */
3659 gen_vfp_sqrt(dp);
3660 break;
239c20c7 3661 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3662 tmp = gen_vfp_mrs();
3663 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3664 if (dp) {
3665 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3666 cpu_env);
3667 } else {
3668 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3669 cpu_env);
3670 }
7d1b0095 3671 tcg_temp_free_i32(tmp);
60011498 3672 break;
239c20c7 3673 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3674 tmp = gen_vfp_mrs();
3675 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3676 if (dp) {
3677 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3678 cpu_env);
3679 } else {
3680 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3681 cpu_env);
3682 }
7d1b0095 3683 tcg_temp_free_i32(tmp);
60011498 3684 break;
239c20c7 3685 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3686 tmp = tcg_temp_new_i32();
239c20c7
WN
3687 if (dp) {
3688 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3689 cpu_env);
3690 } else {
3691 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3692 cpu_env);
3693 }
60011498
PB
3694 gen_mov_F0_vreg(0, rd);
3695 tmp2 = gen_vfp_mrs();
3696 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3697 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3698 tcg_temp_free_i32(tmp2);
60011498
PB
3699 gen_vfp_msr(tmp);
3700 break;
239c20c7 3701 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3702 tmp = tcg_temp_new_i32();
239c20c7
WN
3703 if (dp) {
3704 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3705 cpu_env);
3706 } else {
3707 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3708 cpu_env);
3709 }
60011498
PB
3710 tcg_gen_shli_i32(tmp, tmp, 16);
3711 gen_mov_F0_vreg(0, rd);
3712 tmp2 = gen_vfp_mrs();
3713 tcg_gen_ext16u_i32(tmp2, tmp2);
3714 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3715 tcg_temp_free_i32(tmp2);
60011498
PB
3716 gen_vfp_msr(tmp);
3717 break;
b7bcbe95
FB
3718 case 8: /* cmp */
3719 gen_vfp_cmp(dp);
3720 break;
3721 case 9: /* cmpe */
3722 gen_vfp_cmpe(dp);
3723 break;
3724 case 10: /* cmpz */
3725 gen_vfp_cmp(dp);
3726 break;
3727 case 11: /* cmpez */
3728 gen_vfp_F1_ld0(dp);
3729 gen_vfp_cmpe(dp);
3730 break;
664c6733
WN
3731 case 12: /* vrintr */
3732 {
3733 TCGv_ptr fpst = get_fpstatus_ptr(0);
3734 if (dp) {
3735 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3736 } else {
3737 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3738 }
3739 tcg_temp_free_ptr(fpst);
3740 break;
3741 }
a290c62a
WN
3742 case 13: /* vrintz */
3743 {
3744 TCGv_ptr fpst = get_fpstatus_ptr(0);
3745 TCGv_i32 tcg_rmode;
3746 tcg_rmode = tcg_const_i32(float_round_to_zero);
3747 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3748 if (dp) {
3749 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3750 } else {
3751 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3752 }
3753 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3754 tcg_temp_free_i32(tcg_rmode);
3755 tcg_temp_free_ptr(fpst);
3756 break;
3757 }
4e82bc01
WN
3758 case 14: /* vrintx */
3759 {
3760 TCGv_ptr fpst = get_fpstatus_ptr(0);
3761 if (dp) {
3762 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3763 } else {
3764 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3765 }
3766 tcg_temp_free_ptr(fpst);
3767 break;
3768 }
b7bcbe95
FB
3769 case 15: /* single<->double conversion */
3770 if (dp)
4373f3ce 3771 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3772 else
4373f3ce 3773 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3774 break;
3775 case 16: /* fuito */
5500b06c 3776 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3777 break;
3778 case 17: /* fsito */
5500b06c 3779 gen_vfp_sito(dp, 0);
b7bcbe95 3780 break;
9ee6e8bb 3781 case 20: /* fshto */
d614a513
PM
3782 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3783 return 1;
3784 }
5500b06c 3785 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3786 break;
3787 case 21: /* fslto */
d614a513
PM
3788 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3789 return 1;
3790 }
5500b06c 3791 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3792 break;
3793 case 22: /* fuhto */
d614a513
PM
3794 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3795 return 1;
3796 }
5500b06c 3797 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3798 break;
3799 case 23: /* fulto */
d614a513
PM
3800 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3801 return 1;
3802 }
5500b06c 3803 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3804 break;
b7bcbe95 3805 case 24: /* ftoui */
5500b06c 3806 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3807 break;
3808 case 25: /* ftouiz */
5500b06c 3809 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3810 break;
3811 case 26: /* ftosi */
5500b06c 3812 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3813 break;
3814 case 27: /* ftosiz */
5500b06c 3815 gen_vfp_tosiz(dp, 0);
b7bcbe95 3816 break;
9ee6e8bb 3817 case 28: /* ftosh */
d614a513
PM
3818 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3819 return 1;
3820 }
5500b06c 3821 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3822 break;
3823 case 29: /* ftosl */
d614a513
PM
3824 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3825 return 1;
3826 }
5500b06c 3827 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3828 break;
3829 case 30: /* ftouh */
d614a513
PM
3830 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3831 return 1;
3832 }
5500b06c 3833 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3834 break;
3835 case 31: /* ftoul */
d614a513
PM
3836 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3837 return 1;
3838 }
5500b06c 3839 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3840 break;
b7bcbe95 3841 default: /* undefined */
b7bcbe95
FB
3842 return 1;
3843 }
3844 break;
3845 default: /* undefined */
b7bcbe95
FB
3846 return 1;
3847 }
3848
3849 /* Write back the result. */
239c20c7
WN
3850 if (op == 15 && (rn >= 8 && rn <= 11)) {
3851 /* Comparison, do nothing. */
3852 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3853 (rn & 0x1e) == 0x6)) {
3854 /* VCVT double to int: always integer result.
3855 * VCVT double to half precision is always a single
3856 * precision result.
3857 */
b7bcbe95 3858 gen_mov_vreg_F0(0, rd);
239c20c7 3859 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3860 /* conversion */
3861 gen_mov_vreg_F0(!dp, rd);
239c20c7 3862 } else {
b7bcbe95 3863 gen_mov_vreg_F0(dp, rd);
239c20c7 3864 }
b7bcbe95
FB
3865
3866 /* break out of the loop if we have finished */
3867 if (veclen == 0)
3868 break;
3869
3870 if (op == 15 && delta_m == 0) {
3871 /* single source one-many */
3872 while (veclen--) {
3873 rd = ((rd + delta_d) & (bank_mask - 1))
3874 | (rd & bank_mask);
3875 gen_mov_vreg_F0(dp, rd);
3876 }
3877 break;
3878 }
3879 /* Setup the next operands. */
3880 veclen--;
3881 rd = ((rd + delta_d) & (bank_mask - 1))
3882 | (rd & bank_mask);
3883
3884 if (op == 15) {
3885 /* One source operand. */
3886 rm = ((rm + delta_m) & (bank_mask - 1))
3887 | (rm & bank_mask);
3888 gen_mov_F0_vreg(dp, rm);
3889 } else {
3890 /* Two source operands. */
3891 rn = ((rn + delta_d) & (bank_mask - 1))
3892 | (rn & bank_mask);
3893 gen_mov_F0_vreg(dp, rn);
3894 if (delta_m) {
3895 rm = ((rm + delta_m) & (bank_mask - 1))
3896 | (rm & bank_mask);
3897 gen_mov_F1_vreg(dp, rm);
3898 }
3899 }
3900 }
3901 }
3902 break;
3903 case 0xc:
3904 case 0xd:
8387da81 3905 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3906 /* two-register transfer */
3907 rn = (insn >> 16) & 0xf;
3908 rd = (insn >> 12) & 0xf;
3909 if (dp) {
9ee6e8bb
PB
3910 VFP_DREG_M(rm, insn);
3911 } else {
3912 rm = VFP_SREG_M(insn);
3913 }
b7bcbe95 3914
18c9b560 3915 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3916 /* vfp->arm */
3917 if (dp) {
4373f3ce
PB
3918 gen_mov_F0_vreg(0, rm * 2);
3919 tmp = gen_vfp_mrs();
3920 store_reg(s, rd, tmp);
3921 gen_mov_F0_vreg(0, rm * 2 + 1);
3922 tmp = gen_vfp_mrs();
3923 store_reg(s, rn, tmp);
b7bcbe95
FB
3924 } else {
3925 gen_mov_F0_vreg(0, rm);
4373f3ce 3926 tmp = gen_vfp_mrs();
8387da81 3927 store_reg(s, rd, tmp);
b7bcbe95 3928 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3929 tmp = gen_vfp_mrs();
8387da81 3930 store_reg(s, rn, tmp);
b7bcbe95
FB
3931 }
3932 } else {
3933 /* arm->vfp */
3934 if (dp) {
4373f3ce
PB
3935 tmp = load_reg(s, rd);
3936 gen_vfp_msr(tmp);
3937 gen_mov_vreg_F0(0, rm * 2);
3938 tmp = load_reg(s, rn);
3939 gen_vfp_msr(tmp);
3940 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3941 } else {
8387da81 3942 tmp = load_reg(s, rd);
4373f3ce 3943 gen_vfp_msr(tmp);
b7bcbe95 3944 gen_mov_vreg_F0(0, rm);
8387da81 3945 tmp = load_reg(s, rn);
4373f3ce 3946 gen_vfp_msr(tmp);
b7bcbe95
FB
3947 gen_mov_vreg_F0(0, rm + 1);
3948 }
3949 }
3950 } else {
3951 /* Load/store */
3952 rn = (insn >> 16) & 0xf;
3953 if (dp)
9ee6e8bb 3954 VFP_DREG_D(rd, insn);
b7bcbe95 3955 else
9ee6e8bb 3956 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3957 if ((insn & 0x01200000) == 0x01000000) {
3958 /* Single load/store */
3959 offset = (insn & 0xff) << 2;
3960 if ((insn & (1 << 23)) == 0)
3961 offset = -offset;
934814f1
PM
3962 if (s->thumb && rn == 15) {
3963 /* This is actually UNPREDICTABLE */
3964 addr = tcg_temp_new_i32();
3965 tcg_gen_movi_i32(addr, s->pc & ~2);
3966 } else {
3967 addr = load_reg(s, rn);
3968 }
312eea9f 3969 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3970 if (insn & (1 << 20)) {
312eea9f 3971 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3972 gen_mov_vreg_F0(dp, rd);
3973 } else {
3974 gen_mov_F0_vreg(dp, rd);
312eea9f 3975 gen_vfp_st(s, dp, addr);
b7bcbe95 3976 }
7d1b0095 3977 tcg_temp_free_i32(addr);
b7bcbe95
FB
3978 } else {
3979 /* load/store multiple */
934814f1 3980 int w = insn & (1 << 21);
b7bcbe95
FB
3981 if (dp)
3982 n = (insn >> 1) & 0x7f;
3983 else
3984 n = insn & 0xff;
3985
934814f1
PM
3986 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3987 /* P == U , W == 1 => UNDEF */
3988 return 1;
3989 }
3990 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3991 /* UNPREDICTABLE cases for bad immediates: we choose to
3992 * UNDEF to avoid generating huge numbers of TCG ops
3993 */
3994 return 1;
3995 }
3996 if (rn == 15 && w) {
3997 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3998 return 1;
3999 }
4000
4001 if (s->thumb && rn == 15) {
4002 /* This is actually UNPREDICTABLE */
4003 addr = tcg_temp_new_i32();
4004 tcg_gen_movi_i32(addr, s->pc & ~2);
4005 } else {
4006 addr = load_reg(s, rn);
4007 }
b7bcbe95 4008 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4009 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4010
4011 if (dp)
4012 offset = 8;
4013 else
4014 offset = 4;
4015 for (i = 0; i < n; i++) {
18c9b560 4016 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4017 /* load */
312eea9f 4018 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4019 gen_mov_vreg_F0(dp, rd + i);
4020 } else {
4021 /* store */
4022 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4023 gen_vfp_st(s, dp, addr);
b7bcbe95 4024 }
312eea9f 4025 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4026 }
934814f1 4027 if (w) {
b7bcbe95
FB
4028 /* writeback */
4029 if (insn & (1 << 24))
4030 offset = -offset * n;
4031 else if (dp && (insn & 1))
4032 offset = 4;
4033 else
4034 offset = 0;
4035
4036 if (offset != 0)
312eea9f
FN
4037 tcg_gen_addi_i32(addr, addr, offset);
4038 store_reg(s, rn, addr);
4039 } else {
7d1b0095 4040 tcg_temp_free_i32(addr);
b7bcbe95
FB
4041 }
4042 }
4043 }
4044 break;
4045 default:
4046 /* Should never happen. */
4047 return 1;
4048 }
4049 return 0;
4050}
4051
90aa39a1 4052static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4053{
90aa39a1
SF
4054#ifndef CONFIG_USER_ONLY
4055 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4056 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4057#else
4058 return true;
4059#endif
4060}
6e256c93 4061
90aa39a1
SF
4062static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4063{
4064 if (use_goto_tb(s, dest)) {
57fec1fe 4065 tcg_gen_goto_tb(n);
eaed129d 4066 gen_set_pc_im(s, dest);
90aa39a1 4067 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4068 } else {
eaed129d 4069 gen_set_pc_im(s, dest);
57fec1fe 4070 tcg_gen_exit_tb(0);
6e256c93 4071 }
c53be334
FB
4072}
4073
8aaca4c0
FB
4074static inline void gen_jmp (DisasContext *s, uint32_t dest)
4075{
50225ad0 4076 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 4077 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4078 if (s->thumb)
d9ba4830
PB
4079 dest |= 1;
4080 gen_bx_im(s, dest);
8aaca4c0 4081 } else {
6e256c93 4082 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4083 s->is_jmp = DISAS_TB_JUMP;
4084 }
4085}
4086
39d5492a 4087static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4088{
ee097184 4089 if (x)
d9ba4830 4090 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4091 else
d9ba4830 4092 gen_sxth(t0);
ee097184 4093 if (y)
d9ba4830 4094 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4095 else
d9ba4830
PB
4096 gen_sxth(t1);
4097 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4098}
4099
4100/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4101static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4102{
b5ff1b31
FB
4103 uint32_t mask;
4104
4105 mask = 0;
4106 if (flags & (1 << 0))
4107 mask |= 0xff;
4108 if (flags & (1 << 1))
4109 mask |= 0xff00;
4110 if (flags & (1 << 2))
4111 mask |= 0xff0000;
4112 if (flags & (1 << 3))
4113 mask |= 0xff000000;
9ee6e8bb 4114
2ae23e75 4115 /* Mask out undefined bits. */
9ee6e8bb 4116 mask &= ~CPSR_RESERVED;
d614a513 4117 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4118 mask &= ~CPSR_T;
d614a513
PM
4119 }
4120 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4121 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4122 }
4123 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4124 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4125 }
4126 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4127 mask &= ~CPSR_IT;
d614a513 4128 }
4051e12c
PM
4129 /* Mask out execution state and reserved bits. */
4130 if (!spsr) {
4131 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4132 }
b5ff1b31
FB
4133 /* Mask out privileged bits. */
4134 if (IS_USER(s))
9ee6e8bb 4135 mask &= CPSR_USER;
b5ff1b31
FB
4136 return mask;
4137}
4138
2fbac54b 4139/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4140static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4141{
39d5492a 4142 TCGv_i32 tmp;
b5ff1b31
FB
4143 if (spsr) {
4144 /* ??? This is also undefined in system mode. */
4145 if (IS_USER(s))
4146 return 1;
d9ba4830
PB
4147
4148 tmp = load_cpu_field(spsr);
4149 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4150 tcg_gen_andi_i32(t0, t0, mask);
4151 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4152 store_cpu_field(tmp, spsr);
b5ff1b31 4153 } else {
2fbac54b 4154 gen_set_cpsr(t0, mask);
b5ff1b31 4155 }
7d1b0095 4156 tcg_temp_free_i32(t0);
b5ff1b31
FB
4157 gen_lookup_tb(s);
4158 return 0;
4159}
4160
2fbac54b
FN
4161/* Returns nonzero if access to the PSR is not permitted. */
4162static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4163{
39d5492a 4164 TCGv_i32 tmp;
7d1b0095 4165 tmp = tcg_temp_new_i32();
2fbac54b
FN
4166 tcg_gen_movi_i32(tmp, val);
4167 return gen_set_psr(s, mask, spsr, tmp);
4168}
4169
8bfd0550
PM
4170static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4171 int *tgtmode, int *regno)
4172{
4173 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4174 * the target mode and register number, and identify the various
4175 * unpredictable cases.
4176 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4177 * + executed in user mode
4178 * + using R15 as the src/dest register
4179 * + accessing an unimplemented register
4180 * + accessing a register that's inaccessible at current PL/security state*
4181 * + accessing a register that you could access with a different insn
4182 * We choose to UNDEF in all these cases.
4183 * Since we don't know which of the various AArch32 modes we are in
4184 * we have to defer some checks to runtime.
4185 * Accesses to Monitor mode registers from Secure EL1 (which implies
4186 * that EL3 is AArch64) must trap to EL3.
4187 *
4188 * If the access checks fail this function will emit code to take
4189 * an exception and return false. Otherwise it will return true,
4190 * and set *tgtmode and *regno appropriately.
4191 */
4192 int exc_target = default_exception_el(s);
4193
4194 /* These instructions are present only in ARMv8, or in ARMv7 with the
4195 * Virtualization Extensions.
4196 */
4197 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4198 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4199 goto undef;
4200 }
4201
4202 if (IS_USER(s) || rn == 15) {
4203 goto undef;
4204 }
4205
4206 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4207 * of registers into (r, sysm).
4208 */
4209 if (r) {
4210 /* SPSRs for other modes */
4211 switch (sysm) {
4212 case 0xe: /* SPSR_fiq */
4213 *tgtmode = ARM_CPU_MODE_FIQ;
4214 break;
4215 case 0x10: /* SPSR_irq */
4216 *tgtmode = ARM_CPU_MODE_IRQ;
4217 break;
4218 case 0x12: /* SPSR_svc */
4219 *tgtmode = ARM_CPU_MODE_SVC;
4220 break;
4221 case 0x14: /* SPSR_abt */
4222 *tgtmode = ARM_CPU_MODE_ABT;
4223 break;
4224 case 0x16: /* SPSR_und */
4225 *tgtmode = ARM_CPU_MODE_UND;
4226 break;
4227 case 0x1c: /* SPSR_mon */
4228 *tgtmode = ARM_CPU_MODE_MON;
4229 break;
4230 case 0x1e: /* SPSR_hyp */
4231 *tgtmode = ARM_CPU_MODE_HYP;
4232 break;
4233 default: /* unallocated */
4234 goto undef;
4235 }
4236 /* We arbitrarily assign SPSR a register number of 16. */
4237 *regno = 16;
4238 } else {
4239 /* general purpose registers for other modes */
4240 switch (sysm) {
4241 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4242 *tgtmode = ARM_CPU_MODE_USR;
4243 *regno = sysm + 8;
4244 break;
4245 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4246 *tgtmode = ARM_CPU_MODE_FIQ;
4247 *regno = sysm;
4248 break;
4249 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4250 *tgtmode = ARM_CPU_MODE_IRQ;
4251 *regno = sysm & 1 ? 13 : 14;
4252 break;
4253 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4254 *tgtmode = ARM_CPU_MODE_SVC;
4255 *regno = sysm & 1 ? 13 : 14;
4256 break;
4257 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4258 *tgtmode = ARM_CPU_MODE_ABT;
4259 *regno = sysm & 1 ? 13 : 14;
4260 break;
4261 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4262 *tgtmode = ARM_CPU_MODE_UND;
4263 *regno = sysm & 1 ? 13 : 14;
4264 break;
4265 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4266 *tgtmode = ARM_CPU_MODE_MON;
4267 *regno = sysm & 1 ? 13 : 14;
4268 break;
4269 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4270 *tgtmode = ARM_CPU_MODE_HYP;
4271 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4272 *regno = sysm & 1 ? 13 : 17;
4273 break;
4274 default: /* unallocated */
4275 goto undef;
4276 }
4277 }
4278
4279 /* Catch the 'accessing inaccessible register' cases we can detect
4280 * at translate time.
4281 */
4282 switch (*tgtmode) {
4283 case ARM_CPU_MODE_MON:
4284 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4285 goto undef;
4286 }
4287 if (s->current_el == 1) {
4288 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4289 * then accesses to Mon registers trap to EL3
4290 */
4291 exc_target = 3;
4292 goto undef;
4293 }
4294 break;
4295 case ARM_CPU_MODE_HYP:
4296 /* Note that we can forbid accesses from EL2 here because they
4297 * must be from Hyp mode itself
4298 */
4299 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4300 goto undef;
4301 }
4302 break;
4303 default:
4304 break;
4305 }
4306
4307 return true;
4308
4309undef:
4310 /* If we get here then some access check did not pass */
4311 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4312 return false;
4313}
4314
4315static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4316{
4317 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4318 int tgtmode = 0, regno = 0;
4319
4320 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4321 return;
4322 }
4323
4324 /* Sync state because msr_banked() can raise exceptions */
4325 gen_set_condexec(s);
4326 gen_set_pc_im(s, s->pc - 4);
4327 tcg_reg = load_reg(s, rn);
4328 tcg_tgtmode = tcg_const_i32(tgtmode);
4329 tcg_regno = tcg_const_i32(regno);
4330 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4331 tcg_temp_free_i32(tcg_tgtmode);
4332 tcg_temp_free_i32(tcg_regno);
4333 tcg_temp_free_i32(tcg_reg);
4334 s->is_jmp = DISAS_UPDATE;
4335}
4336
4337static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4338{
4339 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4340 int tgtmode = 0, regno = 0;
4341
4342 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4343 return;
4344 }
4345
4346 /* Sync state because mrs_banked() can raise exceptions */
4347 gen_set_condexec(s);
4348 gen_set_pc_im(s, s->pc - 4);
4349 tcg_reg = tcg_temp_new_i32();
4350 tcg_tgtmode = tcg_const_i32(tgtmode);
4351 tcg_regno = tcg_const_i32(regno);
4352 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4353 tcg_temp_free_i32(tcg_tgtmode);
4354 tcg_temp_free_i32(tcg_regno);
4355 store_reg(s, rn, tcg_reg);
4356 s->is_jmp = DISAS_UPDATE;
4357}
4358
e9bb4aa9 4359/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4360static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4361{
39d5492a 4362 TCGv_i32 tmp;
e9bb4aa9 4363 store_reg(s, 15, pc);
d9ba4830 4364 tmp = load_cpu_field(spsr);
235ea1f5 4365 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 4366 tcg_temp_free_i32(tmp);
577bf808 4367 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
4368}
4369
b0109805 4370/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4371static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4372{
235ea1f5 4373 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4374 tcg_temp_free_i32(cpsr);
b0109805 4375 store_reg(s, 15, pc);
577bf808 4376 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4377}
3b46e624 4378
9ee6e8bb
PB
4379static void gen_nop_hint(DisasContext *s, int val)
4380{
4381 switch (val) {
c87e5a61
PM
4382 case 1: /* yield */
4383 gen_set_pc_im(s, s->pc);
4384 s->is_jmp = DISAS_YIELD;
4385 break;
9ee6e8bb 4386 case 3: /* wfi */
eaed129d 4387 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4388 s->is_jmp = DISAS_WFI;
4389 break;
4390 case 2: /* wfe */
72c1d3af
PM
4391 gen_set_pc_im(s, s->pc);
4392 s->is_jmp = DISAS_WFE;
4393 break;
9ee6e8bb 4394 case 4: /* sev */
12b10571
MR
4395 case 5: /* sevl */
4396 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4397 default: /* nop */
4398 break;
4399 }
4400}
99c475ab 4401
ad69471c 4402#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4403
39d5492a 4404static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4405{
4406 switch (size) {
dd8fbd78
FN
4407 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4408 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4409 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4410 default: abort();
9ee6e8bb 4411 }
9ee6e8bb
PB
4412}
4413
39d5492a 4414static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4415{
4416 switch (size) {
dd8fbd78
FN
4417 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4418 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4419 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4420 default: return;
4421 }
4422}
4423
4424/* 32-bit pairwise ops end up the same as the elementwise versions. */
4425#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4426#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4427#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4428#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4429
ad69471c
PB
4430#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4431 switch ((size << 1) | u) { \
4432 case 0: \
dd8fbd78 4433 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4434 break; \
4435 case 1: \
dd8fbd78 4436 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4437 break; \
4438 case 2: \
dd8fbd78 4439 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4440 break; \
4441 case 3: \
dd8fbd78 4442 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4443 break; \
4444 case 4: \
dd8fbd78 4445 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4446 break; \
4447 case 5: \
dd8fbd78 4448 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4449 break; \
4450 default: return 1; \
4451 }} while (0)
9ee6e8bb
PB
4452
4453#define GEN_NEON_INTEGER_OP(name) do { \
4454 switch ((size << 1) | u) { \
ad69471c 4455 case 0: \
dd8fbd78 4456 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4457 break; \
4458 case 1: \
dd8fbd78 4459 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4460 break; \
4461 case 2: \
dd8fbd78 4462 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4463 break; \
4464 case 3: \
dd8fbd78 4465 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4466 break; \
4467 case 4: \
dd8fbd78 4468 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4469 break; \
4470 case 5: \
dd8fbd78 4471 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4472 break; \
9ee6e8bb
PB
4473 default: return 1; \
4474 }} while (0)
4475
39d5492a 4476static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4477{
39d5492a 4478 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4479 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4480 return tmp;
9ee6e8bb
PB
4481}
4482
39d5492a 4483static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4484{
dd8fbd78 4485 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4486 tcg_temp_free_i32(var);
9ee6e8bb
PB
4487}
4488
39d5492a 4489static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4490{
39d5492a 4491 TCGv_i32 tmp;
9ee6e8bb 4492 if (size == 1) {
0fad6efc
PM
4493 tmp = neon_load_reg(reg & 7, reg >> 4);
4494 if (reg & 8) {
dd8fbd78 4495 gen_neon_dup_high16(tmp);
0fad6efc
PM
4496 } else {
4497 gen_neon_dup_low16(tmp);
dd8fbd78 4498 }
0fad6efc
PM
4499 } else {
4500 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4501 }
dd8fbd78 4502 return tmp;
9ee6e8bb
PB
4503}
4504
02acedf9 4505static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4506{
39d5492a 4507 TCGv_i32 tmp, tmp2;
600b828c 4508 if (!q && size == 2) {
02acedf9
PM
4509 return 1;
4510 }
4511 tmp = tcg_const_i32(rd);
4512 tmp2 = tcg_const_i32(rm);
4513 if (q) {
4514 switch (size) {
4515 case 0:
02da0b2d 4516 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4517 break;
4518 case 1:
02da0b2d 4519 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4520 break;
4521 case 2:
02da0b2d 4522 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4523 break;
4524 default:
4525 abort();
4526 }
4527 } else {
4528 switch (size) {
4529 case 0:
02da0b2d 4530 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4531 break;
4532 case 1:
02da0b2d 4533 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4534 break;
4535 default:
4536 abort();
4537 }
4538 }
4539 tcg_temp_free_i32(tmp);
4540 tcg_temp_free_i32(tmp2);
4541 return 0;
19457615
FN
4542}
4543
d68a6f3a 4544static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4545{
39d5492a 4546 TCGv_i32 tmp, tmp2;
600b828c 4547 if (!q && size == 2) {
d68a6f3a
PM
4548 return 1;
4549 }
4550 tmp = tcg_const_i32(rd);
4551 tmp2 = tcg_const_i32(rm);
4552 if (q) {
4553 switch (size) {
4554 case 0:
02da0b2d 4555 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4556 break;
4557 case 1:
02da0b2d 4558 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4559 break;
4560 case 2:
02da0b2d 4561 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4562 break;
4563 default:
4564 abort();
4565 }
4566 } else {
4567 switch (size) {
4568 case 0:
02da0b2d 4569 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4570 break;
4571 case 1:
02da0b2d 4572 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4573 break;
4574 default:
4575 abort();
4576 }
4577 }
4578 tcg_temp_free_i32(tmp);
4579 tcg_temp_free_i32(tmp2);
4580 return 0;
19457615
FN
4581}
4582
39d5492a 4583static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4584{
39d5492a 4585 TCGv_i32 rd, tmp;
19457615 4586
7d1b0095
PM
4587 rd = tcg_temp_new_i32();
4588 tmp = tcg_temp_new_i32();
19457615
FN
4589
4590 tcg_gen_shli_i32(rd, t0, 8);
4591 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4592 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4593 tcg_gen_or_i32(rd, rd, tmp);
4594
4595 tcg_gen_shri_i32(t1, t1, 8);
4596 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4597 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4598 tcg_gen_or_i32(t1, t1, tmp);
4599 tcg_gen_mov_i32(t0, rd);
4600
7d1b0095
PM
4601 tcg_temp_free_i32(tmp);
4602 tcg_temp_free_i32(rd);
19457615
FN
4603}
4604
39d5492a 4605static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4606{
39d5492a 4607 TCGv_i32 rd, tmp;
19457615 4608
7d1b0095
PM
4609 rd = tcg_temp_new_i32();
4610 tmp = tcg_temp_new_i32();
19457615
FN
4611
4612 tcg_gen_shli_i32(rd, t0, 16);
4613 tcg_gen_andi_i32(tmp, t1, 0xffff);
4614 tcg_gen_or_i32(rd, rd, tmp);
4615 tcg_gen_shri_i32(t1, t1, 16);
4616 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4617 tcg_gen_or_i32(t1, t1, tmp);
4618 tcg_gen_mov_i32(t0, rd);
4619
7d1b0095
PM
4620 tcg_temp_free_i32(tmp);
4621 tcg_temp_free_i32(rd);
19457615
FN
4622}
4623
4624
9ee6e8bb
PB
4625static struct {
4626 int nregs;
4627 int interleave;
4628 int spacing;
4629} neon_ls_element_type[11] = {
4630 {4, 4, 1},
4631 {4, 4, 2},
4632 {4, 1, 1},
4633 {4, 2, 1},
4634 {3, 3, 1},
4635 {3, 3, 2},
4636 {3, 1, 1},
4637 {1, 1, 1},
4638 {2, 2, 1},
4639 {2, 2, 2},
4640 {2, 1, 1}
4641};
4642
4643/* Translate a NEON load/store element instruction. Return nonzero if the
4644 instruction is invalid. */
7dcc1f89 4645static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4646{
4647 int rd, rn, rm;
4648 int op;
4649 int nregs;
4650 int interleave;
84496233 4651 int spacing;
9ee6e8bb
PB
4652 int stride;
4653 int size;
4654 int reg;
4655 int pass;
4656 int load;
4657 int shift;
9ee6e8bb 4658 int n;
39d5492a
PM
4659 TCGv_i32 addr;
4660 TCGv_i32 tmp;
4661 TCGv_i32 tmp2;
84496233 4662 TCGv_i64 tmp64;
9ee6e8bb 4663
2c7ffc41
PM
4664 /* FIXME: this access check should not take precedence over UNDEF
4665 * for invalid encodings; we will generate incorrect syndrome information
4666 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4667 */
9dbbc748 4668 if (s->fp_excp_el) {
2c7ffc41 4669 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4670 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4671 return 0;
4672 }
4673
5df8bac1 4674 if (!s->vfp_enabled)
9ee6e8bb
PB
4675 return 1;
4676 VFP_DREG_D(rd, insn);
4677 rn = (insn >> 16) & 0xf;
4678 rm = insn & 0xf;
4679 load = (insn & (1 << 21)) != 0;
4680 if ((insn & (1 << 23)) == 0) {
4681 /* Load store all elements. */
4682 op = (insn >> 8) & 0xf;
4683 size = (insn >> 6) & 3;
84496233 4684 if (op > 10)
9ee6e8bb 4685 return 1;
f2dd89d0
PM
4686 /* Catch UNDEF cases for bad values of align field */
4687 switch (op & 0xc) {
4688 case 4:
4689 if (((insn >> 5) & 1) == 1) {
4690 return 1;
4691 }
4692 break;
4693 case 8:
4694 if (((insn >> 4) & 3) == 3) {
4695 return 1;
4696 }
4697 break;
4698 default:
4699 break;
4700 }
9ee6e8bb
PB
4701 nregs = neon_ls_element_type[op].nregs;
4702 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4703 spacing = neon_ls_element_type[op].spacing;
4704 if (size == 3 && (interleave | spacing) != 1)
4705 return 1;
e318a60b 4706 addr = tcg_temp_new_i32();
dcc65026 4707 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4708 stride = (1 << size) * interleave;
4709 for (reg = 0; reg < nregs; reg++) {
4710 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4711 load_reg_var(s, addr, rn);
4712 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4713 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4714 load_reg_var(s, addr, rn);
4715 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4716 }
84496233 4717 if (size == 3) {
8ed1237d 4718 tmp64 = tcg_temp_new_i64();
84496233 4719 if (load) {
12dcc321 4720 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4721 neon_store_reg64(tmp64, rd);
84496233 4722 } else {
84496233 4723 neon_load_reg64(tmp64, rd);
12dcc321 4724 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4725 }
8ed1237d 4726 tcg_temp_free_i64(tmp64);
84496233
JR
4727 tcg_gen_addi_i32(addr, addr, stride);
4728 } else {
4729 for (pass = 0; pass < 2; pass++) {
4730 if (size == 2) {
4731 if (load) {
58ab8e96 4732 tmp = tcg_temp_new_i32();
12dcc321 4733 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4734 neon_store_reg(rd, pass, tmp);
4735 } else {
4736 tmp = neon_load_reg(rd, pass);
12dcc321 4737 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4738 tcg_temp_free_i32(tmp);
84496233 4739 }
1b2b1e54 4740 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4741 } else if (size == 1) {
4742 if (load) {
58ab8e96 4743 tmp = tcg_temp_new_i32();
12dcc321 4744 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4745 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4746 tmp2 = tcg_temp_new_i32();
12dcc321 4747 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4748 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4749 tcg_gen_shli_i32(tmp2, tmp2, 16);
4750 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4751 tcg_temp_free_i32(tmp2);
84496233
JR
4752 neon_store_reg(rd, pass, tmp);
4753 } else {
4754 tmp = neon_load_reg(rd, pass);
7d1b0095 4755 tmp2 = tcg_temp_new_i32();
84496233 4756 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4757 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4758 tcg_temp_free_i32(tmp);
84496233 4759 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4760 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4761 tcg_temp_free_i32(tmp2);
1b2b1e54 4762 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4763 }
84496233
JR
4764 } else /* size == 0 */ {
4765 if (load) {
39d5492a 4766 TCGV_UNUSED_I32(tmp2);
84496233 4767 for (n = 0; n < 4; n++) {
58ab8e96 4768 tmp = tcg_temp_new_i32();
12dcc321 4769 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4770 tcg_gen_addi_i32(addr, addr, stride);
4771 if (n == 0) {
4772 tmp2 = tmp;
4773 } else {
41ba8341
PB
4774 tcg_gen_shli_i32(tmp, tmp, n * 8);
4775 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4776 tcg_temp_free_i32(tmp);
84496233 4777 }
9ee6e8bb 4778 }
84496233
JR
4779 neon_store_reg(rd, pass, tmp2);
4780 } else {
4781 tmp2 = neon_load_reg(rd, pass);
4782 for (n = 0; n < 4; n++) {
7d1b0095 4783 tmp = tcg_temp_new_i32();
84496233
JR
4784 if (n == 0) {
4785 tcg_gen_mov_i32(tmp, tmp2);
4786 } else {
4787 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4788 }
12dcc321 4789 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4790 tcg_temp_free_i32(tmp);
84496233
JR
4791 tcg_gen_addi_i32(addr, addr, stride);
4792 }
7d1b0095 4793 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4794 }
4795 }
4796 }
4797 }
84496233 4798 rd += spacing;
9ee6e8bb 4799 }
e318a60b 4800 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4801 stride = nregs * 8;
4802 } else {
4803 size = (insn >> 10) & 3;
4804 if (size == 3) {
4805 /* Load single element to all lanes. */
8e18cde3
PM
4806 int a = (insn >> 4) & 1;
4807 if (!load) {
9ee6e8bb 4808 return 1;
8e18cde3 4809 }
9ee6e8bb
PB
4810 size = (insn >> 6) & 3;
4811 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4812
4813 if (size == 3) {
4814 if (nregs != 4 || a == 0) {
9ee6e8bb 4815 return 1;
99c475ab 4816 }
8e18cde3
PM
4817 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4818 size = 2;
4819 }
4820 if (nregs == 1 && a == 1 && size == 0) {
4821 return 1;
4822 }
4823 if (nregs == 3 && a == 1) {
4824 return 1;
4825 }
e318a60b 4826 addr = tcg_temp_new_i32();
8e18cde3
PM
4827 load_reg_var(s, addr, rn);
4828 if (nregs == 1) {
4829 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4830 tmp = gen_load_and_replicate(s, addr, size);
4831 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4832 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4833 if (insn & (1 << 5)) {
4834 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4835 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4836 }
4837 tcg_temp_free_i32(tmp);
4838 } else {
4839 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4840 stride = (insn & (1 << 5)) ? 2 : 1;
4841 for (reg = 0; reg < nregs; reg++) {
4842 tmp = gen_load_and_replicate(s, addr, size);
4843 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4844 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4845 tcg_temp_free_i32(tmp);
4846 tcg_gen_addi_i32(addr, addr, 1 << size);
4847 rd += stride;
4848 }
9ee6e8bb 4849 }
e318a60b 4850 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4851 stride = (1 << size) * nregs;
4852 } else {
4853 /* Single element. */
93262b16 4854 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4855 pass = (insn >> 7) & 1;
4856 switch (size) {
4857 case 0:
4858 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4859 stride = 1;
4860 break;
4861 case 1:
4862 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4863 stride = (insn & (1 << 5)) ? 2 : 1;
4864 break;
4865 case 2:
4866 shift = 0;
9ee6e8bb
PB
4867 stride = (insn & (1 << 6)) ? 2 : 1;
4868 break;
4869 default:
4870 abort();
4871 }
4872 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4873 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4874 switch (nregs) {
4875 case 1:
4876 if (((idx & (1 << size)) != 0) ||
4877 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4878 return 1;
4879 }
4880 break;
4881 case 3:
4882 if ((idx & 1) != 0) {
4883 return 1;
4884 }
4885 /* fall through */
4886 case 2:
4887 if (size == 2 && (idx & 2) != 0) {
4888 return 1;
4889 }
4890 break;
4891 case 4:
4892 if ((size == 2) && ((idx & 3) == 3)) {
4893 return 1;
4894 }
4895 break;
4896 default:
4897 abort();
4898 }
4899 if ((rd + stride * (nregs - 1)) > 31) {
4900 /* Attempts to write off the end of the register file
4901 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4902 * the neon_load_reg() would write off the end of the array.
4903 */
4904 return 1;
4905 }
e318a60b 4906 addr = tcg_temp_new_i32();
dcc65026 4907 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4908 for (reg = 0; reg < nregs; reg++) {
4909 if (load) {
58ab8e96 4910 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4911 switch (size) {
4912 case 0:
12dcc321 4913 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4914 break;
4915 case 1:
12dcc321 4916 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4917 break;
4918 case 2:
12dcc321 4919 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4920 break;
a50f5b91
PB
4921 default: /* Avoid compiler warnings. */
4922 abort();
9ee6e8bb
PB
4923 }
4924 if (size != 2) {
8f8e3aa4 4925 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4926 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4927 shift, size ? 16 : 8);
7d1b0095 4928 tcg_temp_free_i32(tmp2);
9ee6e8bb 4929 }
8f8e3aa4 4930 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4931 } else { /* Store */
8f8e3aa4
PB
4932 tmp = neon_load_reg(rd, pass);
4933 if (shift)
4934 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4935 switch (size) {
4936 case 0:
12dcc321 4937 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4938 break;
4939 case 1:
12dcc321 4940 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4941 break;
4942 case 2:
12dcc321 4943 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4944 break;
99c475ab 4945 }
58ab8e96 4946 tcg_temp_free_i32(tmp);
99c475ab 4947 }
9ee6e8bb 4948 rd += stride;
1b2b1e54 4949 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4950 }
e318a60b 4951 tcg_temp_free_i32(addr);
9ee6e8bb 4952 stride = nregs * (1 << size);
99c475ab 4953 }
9ee6e8bb
PB
4954 }
4955 if (rm != 15) {
39d5492a 4956 TCGv_i32 base;
b26eefb6
PB
4957
4958 base = load_reg(s, rn);
9ee6e8bb 4959 if (rm == 13) {
b26eefb6 4960 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4961 } else {
39d5492a 4962 TCGv_i32 index;
b26eefb6
PB
4963 index = load_reg(s, rm);
4964 tcg_gen_add_i32(base, base, index);
7d1b0095 4965 tcg_temp_free_i32(index);
9ee6e8bb 4966 }
b26eefb6 4967 store_reg(s, rn, base);
9ee6e8bb
PB
4968 }
4969 return 0;
4970}
3b46e624 4971
8f8e3aa4 4972/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4973static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4974{
4975 tcg_gen_and_i32(t, t, c);
f669df27 4976 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4977 tcg_gen_or_i32(dest, t, f);
4978}
4979
39d5492a 4980static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4981{
4982 switch (size) {
4983 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4984 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4985 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4986 default: abort();
4987 }
4988}
4989
39d5492a 4990static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4991{
4992 switch (size) {
02da0b2d
PM
4993 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4994 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4995 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4996 default: abort();
4997 }
4998}
4999
39d5492a 5000static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5001{
5002 switch (size) {
02da0b2d
PM
5003 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5004 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5005 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5006 default: abort();
5007 }
5008}
5009
39d5492a 5010static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5011{
5012 switch (size) {
02da0b2d
PM
5013 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5014 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5015 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5016 default: abort();
5017 }
5018}
5019
39d5492a 5020static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5021 int q, int u)
5022{
5023 if (q) {
5024 if (u) {
5025 switch (size) {
5026 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5027 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5028 default: abort();
5029 }
5030 } else {
5031 switch (size) {
5032 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5033 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5034 default: abort();
5035 }
5036 }
5037 } else {
5038 if (u) {
5039 switch (size) {
b408a9b0
CL
5040 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5041 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5042 default: abort();
5043 }
5044 } else {
5045 switch (size) {
5046 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5047 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5048 default: abort();
5049 }
5050 }
5051 }
5052}
5053
39d5492a 5054static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5055{
5056 if (u) {
5057 switch (size) {
5058 case 0: gen_helper_neon_widen_u8(dest, src); break;
5059 case 1: gen_helper_neon_widen_u16(dest, src); break;
5060 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5061 default: abort();
5062 }
5063 } else {
5064 switch (size) {
5065 case 0: gen_helper_neon_widen_s8(dest, src); break;
5066 case 1: gen_helper_neon_widen_s16(dest, src); break;
5067 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5068 default: abort();
5069 }
5070 }
7d1b0095 5071 tcg_temp_free_i32(src);
ad69471c
PB
5072}
5073
5074static inline void gen_neon_addl(int size)
5075{
5076 switch (size) {
5077 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5078 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5079 case 2: tcg_gen_add_i64(CPU_V001); break;
5080 default: abort();
5081 }
5082}
5083
5084static inline void gen_neon_subl(int size)
5085{
5086 switch (size) {
5087 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5088 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5089 case 2: tcg_gen_sub_i64(CPU_V001); break;
5090 default: abort();
5091 }
5092}
5093
a7812ae4 5094static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5095{
5096 switch (size) {
5097 case 0: gen_helper_neon_negl_u16(var, var); break;
5098 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5099 case 2:
5100 tcg_gen_neg_i64(var, var);
5101 break;
ad69471c
PB
5102 default: abort();
5103 }
5104}
5105
a7812ae4 5106static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5107{
5108 switch (size) {
02da0b2d
PM
5109 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5110 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5111 default: abort();
5112 }
5113}
5114
39d5492a
PM
5115static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5116 int size, int u)
ad69471c 5117{
a7812ae4 5118 TCGv_i64 tmp;
ad69471c
PB
5119
5120 switch ((size << 1) | u) {
5121 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5122 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5123 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5124 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5125 case 4:
5126 tmp = gen_muls_i64_i32(a, b);
5127 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5128 tcg_temp_free_i64(tmp);
ad69471c
PB
5129 break;
5130 case 5:
5131 tmp = gen_mulu_i64_i32(a, b);
5132 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5133 tcg_temp_free_i64(tmp);
ad69471c
PB
5134 break;
5135 default: abort();
5136 }
c6067f04
CL
5137
5138 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5139 Don't forget to clean them now. */
5140 if (size < 2) {
7d1b0095
PM
5141 tcg_temp_free_i32(a);
5142 tcg_temp_free_i32(b);
c6067f04 5143 }
ad69471c
PB
5144}
5145
39d5492a
PM
5146static void gen_neon_narrow_op(int op, int u, int size,
5147 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5148{
5149 if (op) {
5150 if (u) {
5151 gen_neon_unarrow_sats(size, dest, src);
5152 } else {
5153 gen_neon_narrow(size, dest, src);
5154 }
5155 } else {
5156 if (u) {
5157 gen_neon_narrow_satu(size, dest, src);
5158 } else {
5159 gen_neon_narrow_sats(size, dest, src);
5160 }
5161 }
5162}
5163
62698be3
PM
5164/* Symbolic constants for op fields for Neon 3-register same-length.
5165 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5166 * table A7-9.
5167 */
5168#define NEON_3R_VHADD 0
5169#define NEON_3R_VQADD 1
5170#define NEON_3R_VRHADD 2
5171#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5172#define NEON_3R_VHSUB 4
5173#define NEON_3R_VQSUB 5
5174#define NEON_3R_VCGT 6
5175#define NEON_3R_VCGE 7
5176#define NEON_3R_VSHL 8
5177#define NEON_3R_VQSHL 9
5178#define NEON_3R_VRSHL 10
5179#define NEON_3R_VQRSHL 11
5180#define NEON_3R_VMAX 12
5181#define NEON_3R_VMIN 13
5182#define NEON_3R_VABD 14
5183#define NEON_3R_VABA 15
5184#define NEON_3R_VADD_VSUB 16
5185#define NEON_3R_VTST_VCEQ 17
5186#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5187#define NEON_3R_VMUL 19
5188#define NEON_3R_VPMAX 20
5189#define NEON_3R_VPMIN 21
5190#define NEON_3R_VQDMULH_VQRDMULH 22
5191#define NEON_3R_VPADD 23
f1ecb913 5192#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5193#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5194#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5195#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5196#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5197#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5198#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5199#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5200
5201static const uint8_t neon_3r_sizes[] = {
5202 [NEON_3R_VHADD] = 0x7,
5203 [NEON_3R_VQADD] = 0xf,
5204 [NEON_3R_VRHADD] = 0x7,
5205 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5206 [NEON_3R_VHSUB] = 0x7,
5207 [NEON_3R_VQSUB] = 0xf,
5208 [NEON_3R_VCGT] = 0x7,
5209 [NEON_3R_VCGE] = 0x7,
5210 [NEON_3R_VSHL] = 0xf,
5211 [NEON_3R_VQSHL] = 0xf,
5212 [NEON_3R_VRSHL] = 0xf,
5213 [NEON_3R_VQRSHL] = 0xf,
5214 [NEON_3R_VMAX] = 0x7,
5215 [NEON_3R_VMIN] = 0x7,
5216 [NEON_3R_VABD] = 0x7,
5217 [NEON_3R_VABA] = 0x7,
5218 [NEON_3R_VADD_VSUB] = 0xf,
5219 [NEON_3R_VTST_VCEQ] = 0x7,
5220 [NEON_3R_VML] = 0x7,
5221 [NEON_3R_VMUL] = 0x7,
5222 [NEON_3R_VPMAX] = 0x7,
5223 [NEON_3R_VPMIN] = 0x7,
5224 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5225 [NEON_3R_VPADD] = 0x7,
f1ecb913 5226 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5227 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5228 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5229 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5230 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5231 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5232 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5233 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5234};
5235
600b828c
PM
5236/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5237 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5238 * table A7-13.
5239 */
5240#define NEON_2RM_VREV64 0
5241#define NEON_2RM_VREV32 1
5242#define NEON_2RM_VREV16 2
5243#define NEON_2RM_VPADDL 4
5244#define NEON_2RM_VPADDL_U 5
9d935509
AB
5245#define NEON_2RM_AESE 6 /* Includes AESD */
5246#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5247#define NEON_2RM_VCLS 8
5248#define NEON_2RM_VCLZ 9
5249#define NEON_2RM_VCNT 10
5250#define NEON_2RM_VMVN 11
5251#define NEON_2RM_VPADAL 12
5252#define NEON_2RM_VPADAL_U 13
5253#define NEON_2RM_VQABS 14
5254#define NEON_2RM_VQNEG 15
5255#define NEON_2RM_VCGT0 16
5256#define NEON_2RM_VCGE0 17
5257#define NEON_2RM_VCEQ0 18
5258#define NEON_2RM_VCLE0 19
5259#define NEON_2RM_VCLT0 20
f1ecb913 5260#define NEON_2RM_SHA1H 21
600b828c
PM
5261#define NEON_2RM_VABS 22
5262#define NEON_2RM_VNEG 23
5263#define NEON_2RM_VCGT0_F 24
5264#define NEON_2RM_VCGE0_F 25
5265#define NEON_2RM_VCEQ0_F 26
5266#define NEON_2RM_VCLE0_F 27
5267#define NEON_2RM_VCLT0_F 28
5268#define NEON_2RM_VABS_F 30
5269#define NEON_2RM_VNEG_F 31
5270#define NEON_2RM_VSWP 32
5271#define NEON_2RM_VTRN 33
5272#define NEON_2RM_VUZP 34
5273#define NEON_2RM_VZIP 35
5274#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5275#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5276#define NEON_2RM_VSHLL 38
f1ecb913 5277#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5278#define NEON_2RM_VRINTN 40
2ce70625 5279#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5280#define NEON_2RM_VRINTA 42
5281#define NEON_2RM_VRINTZ 43
600b828c 5282#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5283#define NEON_2RM_VRINTM 45
600b828c 5284#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5285#define NEON_2RM_VRINTP 47
901ad525
WN
5286#define NEON_2RM_VCVTAU 48
5287#define NEON_2RM_VCVTAS 49
5288#define NEON_2RM_VCVTNU 50
5289#define NEON_2RM_VCVTNS 51
5290#define NEON_2RM_VCVTPU 52
5291#define NEON_2RM_VCVTPS 53
5292#define NEON_2RM_VCVTMU 54
5293#define NEON_2RM_VCVTMS 55
600b828c
PM
5294#define NEON_2RM_VRECPE 56
5295#define NEON_2RM_VRSQRTE 57
5296#define NEON_2RM_VRECPE_F 58
5297#define NEON_2RM_VRSQRTE_F 59
5298#define NEON_2RM_VCVT_FS 60
5299#define NEON_2RM_VCVT_FU 61
5300#define NEON_2RM_VCVT_SF 62
5301#define NEON_2RM_VCVT_UF 63
5302
5303static int neon_2rm_is_float_op(int op)
5304{
5305 /* Return true if this neon 2reg-misc op is float-to-float */
5306 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5307 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5308 op == NEON_2RM_VRINTM ||
5309 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5310 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5311}
5312
5313/* Each entry in this array has bit n set if the insn allows
5314 * size value n (otherwise it will UNDEF). Since unallocated
5315 * op values will have no bits set they always UNDEF.
5316 */
5317static const uint8_t neon_2rm_sizes[] = {
5318 [NEON_2RM_VREV64] = 0x7,
5319 [NEON_2RM_VREV32] = 0x3,
5320 [NEON_2RM_VREV16] = 0x1,
5321 [NEON_2RM_VPADDL] = 0x7,
5322 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5323 [NEON_2RM_AESE] = 0x1,
5324 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5325 [NEON_2RM_VCLS] = 0x7,
5326 [NEON_2RM_VCLZ] = 0x7,
5327 [NEON_2RM_VCNT] = 0x1,
5328 [NEON_2RM_VMVN] = 0x1,
5329 [NEON_2RM_VPADAL] = 0x7,
5330 [NEON_2RM_VPADAL_U] = 0x7,
5331 [NEON_2RM_VQABS] = 0x7,
5332 [NEON_2RM_VQNEG] = 0x7,
5333 [NEON_2RM_VCGT0] = 0x7,
5334 [NEON_2RM_VCGE0] = 0x7,
5335 [NEON_2RM_VCEQ0] = 0x7,
5336 [NEON_2RM_VCLE0] = 0x7,
5337 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5338 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5339 [NEON_2RM_VABS] = 0x7,
5340 [NEON_2RM_VNEG] = 0x7,
5341 [NEON_2RM_VCGT0_F] = 0x4,
5342 [NEON_2RM_VCGE0_F] = 0x4,
5343 [NEON_2RM_VCEQ0_F] = 0x4,
5344 [NEON_2RM_VCLE0_F] = 0x4,
5345 [NEON_2RM_VCLT0_F] = 0x4,
5346 [NEON_2RM_VABS_F] = 0x4,
5347 [NEON_2RM_VNEG_F] = 0x4,
5348 [NEON_2RM_VSWP] = 0x1,
5349 [NEON_2RM_VTRN] = 0x7,
5350 [NEON_2RM_VUZP] = 0x7,
5351 [NEON_2RM_VZIP] = 0x7,
5352 [NEON_2RM_VMOVN] = 0x7,
5353 [NEON_2RM_VQMOVN] = 0x7,
5354 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5355 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5356 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5357 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5358 [NEON_2RM_VRINTA] = 0x4,
5359 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5360 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5361 [NEON_2RM_VRINTM] = 0x4,
600b828c 5362 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5363 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5364 [NEON_2RM_VCVTAU] = 0x4,
5365 [NEON_2RM_VCVTAS] = 0x4,
5366 [NEON_2RM_VCVTNU] = 0x4,
5367 [NEON_2RM_VCVTNS] = 0x4,
5368 [NEON_2RM_VCVTPU] = 0x4,
5369 [NEON_2RM_VCVTPS] = 0x4,
5370 [NEON_2RM_VCVTMU] = 0x4,
5371 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5372 [NEON_2RM_VRECPE] = 0x4,
5373 [NEON_2RM_VRSQRTE] = 0x4,
5374 [NEON_2RM_VRECPE_F] = 0x4,
5375 [NEON_2RM_VRSQRTE_F] = 0x4,
5376 [NEON_2RM_VCVT_FS] = 0x4,
5377 [NEON_2RM_VCVT_FU] = 0x4,
5378 [NEON_2RM_VCVT_SF] = 0x4,
5379 [NEON_2RM_VCVT_UF] = 0x4,
5380};
5381
9ee6e8bb
PB
5382/* Translate a NEON data processing instruction. Return nonzero if the
5383 instruction is invalid.
ad69471c
PB
5384 We process data in a mixture of 32-bit and 64-bit chunks.
5385 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5386
7dcc1f89 5387static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5388{
5389 int op;
5390 int q;
5391 int rd, rn, rm;
5392 int size;
5393 int shift;
5394 int pass;
5395 int count;
5396 int pairwise;
5397 int u;
ca9a32e4 5398 uint32_t imm, mask;
39d5492a 5399 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5400 TCGv_i64 tmp64;
9ee6e8bb 5401
2c7ffc41
PM
5402 /* FIXME: this access check should not take precedence over UNDEF
5403 * for invalid encodings; we will generate incorrect syndrome information
5404 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5405 */
9dbbc748 5406 if (s->fp_excp_el) {
2c7ffc41 5407 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5408 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5409 return 0;
5410 }
5411
5df8bac1 5412 if (!s->vfp_enabled)
9ee6e8bb
PB
5413 return 1;
5414 q = (insn & (1 << 6)) != 0;
5415 u = (insn >> 24) & 1;
5416 VFP_DREG_D(rd, insn);
5417 VFP_DREG_N(rn, insn);
5418 VFP_DREG_M(rm, insn);
5419 size = (insn >> 20) & 3;
5420 if ((insn & (1 << 23)) == 0) {
5421 /* Three register same length. */
5422 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5423 /* Catch invalid op and bad size combinations: UNDEF */
5424 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5425 return 1;
5426 }
25f84f79
PM
5427 /* All insns of this form UNDEF for either this condition or the
5428 * superset of cases "Q==1"; we catch the latter later.
5429 */
5430 if (q && ((rd | rn | rm) & 1)) {
5431 return 1;
5432 }
f1ecb913
AB
5433 /*
5434 * The SHA-1/SHA-256 3-register instructions require special treatment
5435 * here, as their size field is overloaded as an op type selector, and
5436 * they all consume their input in a single pass.
5437 */
5438 if (op == NEON_3R_SHA) {
5439 if (!q) {
5440 return 1;
5441 }
5442 if (!u) { /* SHA-1 */
d614a513 5443 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5444 return 1;
5445 }
5446 tmp = tcg_const_i32(rd);
5447 tmp2 = tcg_const_i32(rn);
5448 tmp3 = tcg_const_i32(rm);
5449 tmp4 = tcg_const_i32(size);
5450 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5451 tcg_temp_free_i32(tmp4);
5452 } else { /* SHA-256 */
d614a513 5453 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5454 return 1;
5455 }
5456 tmp = tcg_const_i32(rd);
5457 tmp2 = tcg_const_i32(rn);
5458 tmp3 = tcg_const_i32(rm);
5459 switch (size) {
5460 case 0:
5461 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5462 break;
5463 case 1:
5464 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5465 break;
5466 case 2:
5467 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5468 break;
5469 }
5470 }
5471 tcg_temp_free_i32(tmp);
5472 tcg_temp_free_i32(tmp2);
5473 tcg_temp_free_i32(tmp3);
5474 return 0;
5475 }
62698be3
PM
5476 if (size == 3 && op != NEON_3R_LOGIC) {
5477 /* 64-bit element instructions. */
9ee6e8bb 5478 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5479 neon_load_reg64(cpu_V0, rn + pass);
5480 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5481 switch (op) {
62698be3 5482 case NEON_3R_VQADD:
9ee6e8bb 5483 if (u) {
02da0b2d
PM
5484 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5485 cpu_V0, cpu_V1);
2c0262af 5486 } else {
02da0b2d
PM
5487 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5488 cpu_V0, cpu_V1);
2c0262af 5489 }
9ee6e8bb 5490 break;
62698be3 5491 case NEON_3R_VQSUB:
9ee6e8bb 5492 if (u) {
02da0b2d
PM
5493 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5494 cpu_V0, cpu_V1);
ad69471c 5495 } else {
02da0b2d
PM
5496 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5497 cpu_V0, cpu_V1);
ad69471c
PB
5498 }
5499 break;
62698be3 5500 case NEON_3R_VSHL:
ad69471c
PB
5501 if (u) {
5502 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5503 } else {
5504 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5505 }
5506 break;
62698be3 5507 case NEON_3R_VQSHL:
ad69471c 5508 if (u) {
02da0b2d
PM
5509 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5510 cpu_V1, cpu_V0);
ad69471c 5511 } else {
02da0b2d
PM
5512 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5513 cpu_V1, cpu_V0);
ad69471c
PB
5514 }
5515 break;
62698be3 5516 case NEON_3R_VRSHL:
ad69471c
PB
5517 if (u) {
5518 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5519 } else {
ad69471c
PB
5520 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5521 }
5522 break;
62698be3 5523 case NEON_3R_VQRSHL:
ad69471c 5524 if (u) {
02da0b2d
PM
5525 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5526 cpu_V1, cpu_V0);
ad69471c 5527 } else {
02da0b2d
PM
5528 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5529 cpu_V1, cpu_V0);
1e8d4eec 5530 }
9ee6e8bb 5531 break;
62698be3 5532 case NEON_3R_VADD_VSUB:
9ee6e8bb 5533 if (u) {
ad69471c 5534 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5535 } else {
ad69471c 5536 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5537 }
5538 break;
5539 default:
5540 abort();
2c0262af 5541 }
ad69471c 5542 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5543 }
9ee6e8bb 5544 return 0;
2c0262af 5545 }
25f84f79 5546 pairwise = 0;
9ee6e8bb 5547 switch (op) {
62698be3
PM
5548 case NEON_3R_VSHL:
5549 case NEON_3R_VQSHL:
5550 case NEON_3R_VRSHL:
5551 case NEON_3R_VQRSHL:
9ee6e8bb 5552 {
ad69471c
PB
5553 int rtmp;
5554 /* Shift instruction operands are reversed. */
5555 rtmp = rn;
9ee6e8bb 5556 rn = rm;
ad69471c 5557 rm = rtmp;
9ee6e8bb 5558 }
2c0262af 5559 break;
25f84f79
PM
5560 case NEON_3R_VPADD:
5561 if (u) {
5562 return 1;
5563 }
5564 /* Fall through */
62698be3
PM
5565 case NEON_3R_VPMAX:
5566 case NEON_3R_VPMIN:
9ee6e8bb 5567 pairwise = 1;
2c0262af 5568 break;
25f84f79
PM
5569 case NEON_3R_FLOAT_ARITH:
5570 pairwise = (u && size < 2); /* if VPADD (float) */
5571 break;
5572 case NEON_3R_FLOAT_MINMAX:
5573 pairwise = u; /* if VPMIN/VPMAX (float) */
5574 break;
5575 case NEON_3R_FLOAT_CMP:
5576 if (!u && size) {
5577 /* no encoding for U=0 C=1x */
5578 return 1;
5579 }
5580 break;
5581 case NEON_3R_FLOAT_ACMP:
5582 if (!u) {
5583 return 1;
5584 }
5585 break;
505935fc
WN
5586 case NEON_3R_FLOAT_MISC:
5587 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5588 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5589 return 1;
5590 }
2c0262af 5591 break;
25f84f79
PM
5592 case NEON_3R_VMUL:
5593 if (u && (size != 0)) {
5594 /* UNDEF on invalid size for polynomial subcase */
5595 return 1;
5596 }
2c0262af 5597 break;
da97f52c 5598 case NEON_3R_VFM:
d614a513 5599 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5600 return 1;
5601 }
5602 break;
9ee6e8bb 5603 default:
2c0262af 5604 break;
9ee6e8bb 5605 }
dd8fbd78 5606
25f84f79
PM
5607 if (pairwise && q) {
5608 /* All the pairwise insns UNDEF if Q is set */
5609 return 1;
5610 }
5611
9ee6e8bb
PB
5612 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5613
5614 if (pairwise) {
5615 /* Pairwise. */
a5a14945
JR
5616 if (pass < 1) {
5617 tmp = neon_load_reg(rn, 0);
5618 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5619 } else {
a5a14945
JR
5620 tmp = neon_load_reg(rm, 0);
5621 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5622 }
5623 } else {
5624 /* Elementwise. */
dd8fbd78
FN
5625 tmp = neon_load_reg(rn, pass);
5626 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5627 }
5628 switch (op) {
62698be3 5629 case NEON_3R_VHADD:
9ee6e8bb
PB
5630 GEN_NEON_INTEGER_OP(hadd);
5631 break;
62698be3 5632 case NEON_3R_VQADD:
02da0b2d 5633 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5634 break;
62698be3 5635 case NEON_3R_VRHADD:
9ee6e8bb 5636 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5637 break;
62698be3 5638 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5639 switch ((u << 2) | size) {
5640 case 0: /* VAND */
dd8fbd78 5641 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5642 break;
5643 case 1: /* BIC */
f669df27 5644 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5645 break;
5646 case 2: /* VORR */
dd8fbd78 5647 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5648 break;
5649 case 3: /* VORN */
f669df27 5650 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5651 break;
5652 case 4: /* VEOR */
dd8fbd78 5653 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5654 break;
5655 case 5: /* VBSL */
dd8fbd78
FN
5656 tmp3 = neon_load_reg(rd, pass);
5657 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5658 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5659 break;
5660 case 6: /* VBIT */
dd8fbd78
FN
5661 tmp3 = neon_load_reg(rd, pass);
5662 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5663 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5664 break;
5665 case 7: /* VBIF */
dd8fbd78
FN
5666 tmp3 = neon_load_reg(rd, pass);
5667 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5668 tcg_temp_free_i32(tmp3);
9ee6e8bb 5669 break;
2c0262af
FB
5670 }
5671 break;
62698be3 5672 case NEON_3R_VHSUB:
9ee6e8bb
PB
5673 GEN_NEON_INTEGER_OP(hsub);
5674 break;
62698be3 5675 case NEON_3R_VQSUB:
02da0b2d 5676 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5677 break;
62698be3 5678 case NEON_3R_VCGT:
9ee6e8bb
PB
5679 GEN_NEON_INTEGER_OP(cgt);
5680 break;
62698be3 5681 case NEON_3R_VCGE:
9ee6e8bb
PB
5682 GEN_NEON_INTEGER_OP(cge);
5683 break;
62698be3 5684 case NEON_3R_VSHL:
ad69471c 5685 GEN_NEON_INTEGER_OP(shl);
2c0262af 5686 break;
62698be3 5687 case NEON_3R_VQSHL:
02da0b2d 5688 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5689 break;
62698be3 5690 case NEON_3R_VRSHL:
ad69471c 5691 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5692 break;
62698be3 5693 case NEON_3R_VQRSHL:
02da0b2d 5694 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5695 break;
62698be3 5696 case NEON_3R_VMAX:
9ee6e8bb
PB
5697 GEN_NEON_INTEGER_OP(max);
5698 break;
62698be3 5699 case NEON_3R_VMIN:
9ee6e8bb
PB
5700 GEN_NEON_INTEGER_OP(min);
5701 break;
62698be3 5702 case NEON_3R_VABD:
9ee6e8bb
PB
5703 GEN_NEON_INTEGER_OP(abd);
5704 break;
62698be3 5705 case NEON_3R_VABA:
9ee6e8bb 5706 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5707 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5708 tmp2 = neon_load_reg(rd, pass);
5709 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5710 break;
62698be3 5711 case NEON_3R_VADD_VSUB:
9ee6e8bb 5712 if (!u) { /* VADD */
62698be3 5713 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5714 } else { /* VSUB */
5715 switch (size) {
dd8fbd78
FN
5716 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5717 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5718 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5719 default: abort();
9ee6e8bb
PB
5720 }
5721 }
5722 break;
62698be3 5723 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5724 if (!u) { /* VTST */
5725 switch (size) {
dd8fbd78
FN
5726 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5727 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5728 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5729 default: abort();
9ee6e8bb
PB
5730 }
5731 } else { /* VCEQ */
5732 switch (size) {
dd8fbd78
FN
5733 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5734 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5735 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5736 default: abort();
9ee6e8bb
PB
5737 }
5738 }
5739 break;
62698be3 5740 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5741 switch (size) {
dd8fbd78
FN
5742 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5743 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5744 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5745 default: abort();
9ee6e8bb 5746 }
7d1b0095 5747 tcg_temp_free_i32(tmp2);
dd8fbd78 5748 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5749 if (u) { /* VMLS */
dd8fbd78 5750 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5751 } else { /* VMLA */
dd8fbd78 5752 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5753 }
5754 break;
62698be3 5755 case NEON_3R_VMUL:
9ee6e8bb 5756 if (u) { /* polynomial */
dd8fbd78 5757 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5758 } else { /* Integer */
5759 switch (size) {
dd8fbd78
FN
5760 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5761 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5762 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5763 default: abort();
9ee6e8bb
PB
5764 }
5765 }
5766 break;
62698be3 5767 case NEON_3R_VPMAX:
9ee6e8bb
PB
5768 GEN_NEON_INTEGER_OP(pmax);
5769 break;
62698be3 5770 case NEON_3R_VPMIN:
9ee6e8bb
PB
5771 GEN_NEON_INTEGER_OP(pmin);
5772 break;
62698be3 5773 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5774 if (!u) { /* VQDMULH */
5775 switch (size) {
02da0b2d
PM
5776 case 1:
5777 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5778 break;
5779 case 2:
5780 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5781 break;
62698be3 5782 default: abort();
9ee6e8bb 5783 }
62698be3 5784 } else { /* VQRDMULH */
9ee6e8bb 5785 switch (size) {
02da0b2d
PM
5786 case 1:
5787 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5788 break;
5789 case 2:
5790 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5791 break;
62698be3 5792 default: abort();
9ee6e8bb
PB
5793 }
5794 }
5795 break;
62698be3 5796 case NEON_3R_VPADD:
9ee6e8bb 5797 switch (size) {
dd8fbd78
FN
5798 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5799 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5800 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5801 default: abort();
9ee6e8bb
PB
5802 }
5803 break;
62698be3 5804 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5805 {
5806 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5807 switch ((u << 2) | size) {
5808 case 0: /* VADD */
aa47cfdd
PM
5809 case 4: /* VPADD */
5810 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5811 break;
5812 case 2: /* VSUB */
aa47cfdd 5813 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5814 break;
5815 case 6: /* VABD */
aa47cfdd 5816 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5817 break;
5818 default:
62698be3 5819 abort();
9ee6e8bb 5820 }
aa47cfdd 5821 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5822 break;
aa47cfdd 5823 }
62698be3 5824 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5825 {
5826 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5827 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5828 if (!u) {
7d1b0095 5829 tcg_temp_free_i32(tmp2);
dd8fbd78 5830 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5831 if (size == 0) {
aa47cfdd 5832 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5833 } else {
aa47cfdd 5834 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5835 }
5836 }
aa47cfdd 5837 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5838 break;
aa47cfdd 5839 }
62698be3 5840 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5841 {
5842 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5843 if (!u) {
aa47cfdd 5844 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5845 } else {
aa47cfdd
PM
5846 if (size == 0) {
5847 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5848 } else {
5849 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5850 }
b5ff1b31 5851 }
aa47cfdd 5852 tcg_temp_free_ptr(fpstatus);
2c0262af 5853 break;
aa47cfdd 5854 }
62698be3 5855 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5856 {
5857 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5858 if (size == 0) {
5859 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5860 } else {
5861 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5862 }
5863 tcg_temp_free_ptr(fpstatus);
2c0262af 5864 break;
aa47cfdd 5865 }
62698be3 5866 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5867 {
5868 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5869 if (size == 0) {
f71a2ae5 5870 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5871 } else {
f71a2ae5 5872 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5873 }
5874 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5875 break;
aa47cfdd 5876 }
505935fc
WN
5877 case NEON_3R_FLOAT_MISC:
5878 if (u) {
5879 /* VMAXNM/VMINNM */
5880 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5881 if (size == 0) {
f71a2ae5 5882 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5883 } else {
f71a2ae5 5884 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5885 }
5886 tcg_temp_free_ptr(fpstatus);
5887 } else {
5888 if (size == 0) {
5889 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5890 } else {
5891 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5892 }
5893 }
2c0262af 5894 break;
da97f52c
PM
5895 case NEON_3R_VFM:
5896 {
5897 /* VFMA, VFMS: fused multiply-add */
5898 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5899 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5900 if (size) {
5901 /* VFMS */
5902 gen_helper_vfp_negs(tmp, tmp);
5903 }
5904 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5905 tcg_temp_free_i32(tmp3);
5906 tcg_temp_free_ptr(fpstatus);
5907 break;
5908 }
9ee6e8bb
PB
5909 default:
5910 abort();
2c0262af 5911 }
7d1b0095 5912 tcg_temp_free_i32(tmp2);
dd8fbd78 5913
9ee6e8bb
PB
5914 /* Save the result. For elementwise operations we can put it
5915 straight into the destination register. For pairwise operations
5916 we have to be careful to avoid clobbering the source operands. */
5917 if (pairwise && rd == rm) {
dd8fbd78 5918 neon_store_scratch(pass, tmp);
9ee6e8bb 5919 } else {
dd8fbd78 5920 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5921 }
5922
5923 } /* for pass */
5924 if (pairwise && rd == rm) {
5925 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5926 tmp = neon_load_scratch(pass);
5927 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5928 }
5929 }
ad69471c 5930 /* End of 3 register same size operations. */
9ee6e8bb
PB
5931 } else if (insn & (1 << 4)) {
5932 if ((insn & 0x00380080) != 0) {
5933 /* Two registers and shift. */
5934 op = (insn >> 8) & 0xf;
5935 if (insn & (1 << 7)) {
cc13115b
PM
5936 /* 64-bit shift. */
5937 if (op > 7) {
5938 return 1;
5939 }
9ee6e8bb
PB
5940 size = 3;
5941 } else {
5942 size = 2;
5943 while ((insn & (1 << (size + 19))) == 0)
5944 size--;
5945 }
5946 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5947 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5948 by immediate using the variable shift operations. */
5949 if (op < 8) {
5950 /* Shift by immediate:
5951 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5952 if (q && ((rd | rm) & 1)) {
5953 return 1;
5954 }
5955 if (!u && (op == 4 || op == 6)) {
5956 return 1;
5957 }
9ee6e8bb
PB
5958 /* Right shifts are encoded as N - shift, where N is the
5959 element size in bits. */
5960 if (op <= 4)
5961 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5962 if (size == 3) {
5963 count = q + 1;
5964 } else {
5965 count = q ? 4: 2;
5966 }
5967 switch (size) {
5968 case 0:
5969 imm = (uint8_t) shift;
5970 imm |= imm << 8;
5971 imm |= imm << 16;
5972 break;
5973 case 1:
5974 imm = (uint16_t) shift;
5975 imm |= imm << 16;
5976 break;
5977 case 2:
5978 case 3:
5979 imm = shift;
5980 break;
5981 default:
5982 abort();
5983 }
5984
5985 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5986 if (size == 3) {
5987 neon_load_reg64(cpu_V0, rm + pass);
5988 tcg_gen_movi_i64(cpu_V1, imm);
5989 switch (op) {
5990 case 0: /* VSHR */
5991 case 1: /* VSRA */
5992 if (u)
5993 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5994 else
ad69471c 5995 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5996 break;
ad69471c
PB
5997 case 2: /* VRSHR */
5998 case 3: /* VRSRA */
5999 if (u)
6000 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6001 else
ad69471c 6002 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6003 break;
ad69471c 6004 case 4: /* VSRI */
ad69471c
PB
6005 case 5: /* VSHL, VSLI */
6006 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6007 break;
0322b26e 6008 case 6: /* VQSHLU */
02da0b2d
PM
6009 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6010 cpu_V0, cpu_V1);
ad69471c 6011 break;
0322b26e
PM
6012 case 7: /* VQSHL */
6013 if (u) {
02da0b2d 6014 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6015 cpu_V0, cpu_V1);
6016 } else {
02da0b2d 6017 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6018 cpu_V0, cpu_V1);
6019 }
9ee6e8bb 6020 break;
9ee6e8bb 6021 }
ad69471c
PB
6022 if (op == 1 || op == 3) {
6023 /* Accumulate. */
5371cb81 6024 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6025 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6026 } else if (op == 4 || (op == 5 && u)) {
6027 /* Insert */
923e6509
CL
6028 neon_load_reg64(cpu_V1, rd + pass);
6029 uint64_t mask;
6030 if (shift < -63 || shift > 63) {
6031 mask = 0;
6032 } else {
6033 if (op == 4) {
6034 mask = 0xffffffffffffffffull >> -shift;
6035 } else {
6036 mask = 0xffffffffffffffffull << shift;
6037 }
6038 }
6039 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6040 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6041 }
6042 neon_store_reg64(cpu_V0, rd + pass);
6043 } else { /* size < 3 */
6044 /* Operands in T0 and T1. */
dd8fbd78 6045 tmp = neon_load_reg(rm, pass);
7d1b0095 6046 tmp2 = tcg_temp_new_i32();
dd8fbd78 6047 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6048 switch (op) {
6049 case 0: /* VSHR */
6050 case 1: /* VSRA */
6051 GEN_NEON_INTEGER_OP(shl);
6052 break;
6053 case 2: /* VRSHR */
6054 case 3: /* VRSRA */
6055 GEN_NEON_INTEGER_OP(rshl);
6056 break;
6057 case 4: /* VSRI */
ad69471c
PB
6058 case 5: /* VSHL, VSLI */
6059 switch (size) {
dd8fbd78
FN
6060 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6063 default: abort();
ad69471c
PB
6064 }
6065 break;
0322b26e 6066 case 6: /* VQSHLU */
ad69471c 6067 switch (size) {
0322b26e 6068 case 0:
02da0b2d
PM
6069 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6070 tmp, tmp2);
0322b26e
PM
6071 break;
6072 case 1:
02da0b2d
PM
6073 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6074 tmp, tmp2);
0322b26e
PM
6075 break;
6076 case 2:
02da0b2d
PM
6077 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6078 tmp, tmp2);
0322b26e
PM
6079 break;
6080 default:
cc13115b 6081 abort();
ad69471c
PB
6082 }
6083 break;
0322b26e 6084 case 7: /* VQSHL */
02da0b2d 6085 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6086 break;
ad69471c 6087 }
7d1b0095 6088 tcg_temp_free_i32(tmp2);
ad69471c
PB
6089
6090 if (op == 1 || op == 3) {
6091 /* Accumulate. */
dd8fbd78 6092 tmp2 = neon_load_reg(rd, pass);
5371cb81 6093 gen_neon_add(size, tmp, tmp2);
7d1b0095 6094 tcg_temp_free_i32(tmp2);
ad69471c
PB
6095 } else if (op == 4 || (op == 5 && u)) {
6096 /* Insert */
6097 switch (size) {
6098 case 0:
6099 if (op == 4)
ca9a32e4 6100 mask = 0xff >> -shift;
ad69471c 6101 else
ca9a32e4
JR
6102 mask = (uint8_t)(0xff << shift);
6103 mask |= mask << 8;
6104 mask |= mask << 16;
ad69471c
PB
6105 break;
6106 case 1:
6107 if (op == 4)
ca9a32e4 6108 mask = 0xffff >> -shift;
ad69471c 6109 else
ca9a32e4
JR
6110 mask = (uint16_t)(0xffff << shift);
6111 mask |= mask << 16;
ad69471c
PB
6112 break;
6113 case 2:
ca9a32e4
JR
6114 if (shift < -31 || shift > 31) {
6115 mask = 0;
6116 } else {
6117 if (op == 4)
6118 mask = 0xffffffffu >> -shift;
6119 else
6120 mask = 0xffffffffu << shift;
6121 }
ad69471c
PB
6122 break;
6123 default:
6124 abort();
6125 }
dd8fbd78 6126 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6127 tcg_gen_andi_i32(tmp, tmp, mask);
6128 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6129 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6130 tcg_temp_free_i32(tmp2);
ad69471c 6131 }
dd8fbd78 6132 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6133 }
6134 } /* for pass */
6135 } else if (op < 10) {
ad69471c 6136 /* Shift by immediate and narrow:
9ee6e8bb 6137 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6138 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6139 if (rm & 1) {
6140 return 1;
6141 }
9ee6e8bb
PB
6142 shift = shift - (1 << (size + 3));
6143 size++;
92cdfaeb 6144 if (size == 3) {
a7812ae4 6145 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6146 neon_load_reg64(cpu_V0, rm);
6147 neon_load_reg64(cpu_V1, rm + 1);
6148 for (pass = 0; pass < 2; pass++) {
6149 TCGv_i64 in;
6150 if (pass == 0) {
6151 in = cpu_V0;
6152 } else {
6153 in = cpu_V1;
6154 }
ad69471c 6155 if (q) {
0b36f4cd 6156 if (input_unsigned) {
92cdfaeb 6157 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6158 } else {
92cdfaeb 6159 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6160 }
ad69471c 6161 } else {
0b36f4cd 6162 if (input_unsigned) {
92cdfaeb 6163 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6164 } else {
92cdfaeb 6165 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6166 }
ad69471c 6167 }
7d1b0095 6168 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6169 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6170 neon_store_reg(rd, pass, tmp);
6171 } /* for pass */
6172 tcg_temp_free_i64(tmp64);
6173 } else {
6174 if (size == 1) {
6175 imm = (uint16_t)shift;
6176 imm |= imm << 16;
2c0262af 6177 } else {
92cdfaeb
PM
6178 /* size == 2 */
6179 imm = (uint32_t)shift;
6180 }
6181 tmp2 = tcg_const_i32(imm);
6182 tmp4 = neon_load_reg(rm + 1, 0);
6183 tmp5 = neon_load_reg(rm + 1, 1);
6184 for (pass = 0; pass < 2; pass++) {
6185 if (pass == 0) {
6186 tmp = neon_load_reg(rm, 0);
6187 } else {
6188 tmp = tmp4;
6189 }
0b36f4cd
CL
6190 gen_neon_shift_narrow(size, tmp, tmp2, q,
6191 input_unsigned);
92cdfaeb
PM
6192 if (pass == 0) {
6193 tmp3 = neon_load_reg(rm, 1);
6194 } else {
6195 tmp3 = tmp5;
6196 }
0b36f4cd
CL
6197 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6198 input_unsigned);
36aa55dc 6199 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6200 tcg_temp_free_i32(tmp);
6201 tcg_temp_free_i32(tmp3);
6202 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6203 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6204 neon_store_reg(rd, pass, tmp);
6205 } /* for pass */
c6067f04 6206 tcg_temp_free_i32(tmp2);
b75263d6 6207 }
9ee6e8bb 6208 } else if (op == 10) {
cc13115b
PM
6209 /* VSHLL, VMOVL */
6210 if (q || (rd & 1)) {
9ee6e8bb 6211 return 1;
cc13115b 6212 }
ad69471c
PB
6213 tmp = neon_load_reg(rm, 0);
6214 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6215 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6216 if (pass == 1)
6217 tmp = tmp2;
6218
6219 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6220
9ee6e8bb
PB
6221 if (shift != 0) {
6222 /* The shift is less than the width of the source
ad69471c
PB
6223 type, so we can just shift the whole register. */
6224 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6225 /* Widen the result of shift: we need to clear
6226 * the potential overflow bits resulting from
6227 * left bits of the narrow input appearing as
6228 * right bits of left the neighbour narrow
6229 * input. */
ad69471c
PB
6230 if (size < 2 || !u) {
6231 uint64_t imm64;
6232 if (size == 0) {
6233 imm = (0xffu >> (8 - shift));
6234 imm |= imm << 16;
acdf01ef 6235 } else if (size == 1) {
ad69471c 6236 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6237 } else {
6238 /* size == 2 */
6239 imm = 0xffffffff >> (32 - shift);
6240 }
6241 if (size < 2) {
6242 imm64 = imm | (((uint64_t)imm) << 32);
6243 } else {
6244 imm64 = imm;
9ee6e8bb 6245 }
acdf01ef 6246 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6247 }
6248 }
ad69471c 6249 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6250 }
f73534a5 6251 } else if (op >= 14) {
9ee6e8bb 6252 /* VCVT fixed-point. */
cc13115b
PM
6253 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6254 return 1;
6255 }
f73534a5
PM
6256 /* We have already masked out the must-be-1 top bit of imm6,
6257 * hence this 32-shift where the ARM ARM has 64-imm6.
6258 */
6259 shift = 32 - shift;
9ee6e8bb 6260 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6261 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6262 if (!(op & 1)) {
9ee6e8bb 6263 if (u)
5500b06c 6264 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6265 else
5500b06c 6266 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6267 } else {
6268 if (u)
5500b06c 6269 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6270 else
5500b06c 6271 gen_vfp_tosl(0, shift, 1);
2c0262af 6272 }
4373f3ce 6273 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6274 }
6275 } else {
9ee6e8bb
PB
6276 return 1;
6277 }
6278 } else { /* (insn & 0x00380080) == 0 */
6279 int invert;
7d80fee5
PM
6280 if (q && (rd & 1)) {
6281 return 1;
6282 }
9ee6e8bb
PB
6283
6284 op = (insn >> 8) & 0xf;
6285 /* One register and immediate. */
6286 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6287 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6288 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6289 * We choose to not special-case this and will behave as if a
6290 * valid constant encoding of 0 had been given.
6291 */
9ee6e8bb
PB
6292 switch (op) {
6293 case 0: case 1:
6294 /* no-op */
6295 break;
6296 case 2: case 3:
6297 imm <<= 8;
6298 break;
6299 case 4: case 5:
6300 imm <<= 16;
6301 break;
6302 case 6: case 7:
6303 imm <<= 24;
6304 break;
6305 case 8: case 9:
6306 imm |= imm << 16;
6307 break;
6308 case 10: case 11:
6309 imm = (imm << 8) | (imm << 24);
6310 break;
6311 case 12:
8e31209e 6312 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6313 break;
6314 case 13:
6315 imm = (imm << 16) | 0xffff;
6316 break;
6317 case 14:
6318 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6319 if (invert)
6320 imm = ~imm;
6321 break;
6322 case 15:
7d80fee5
PM
6323 if (invert) {
6324 return 1;
6325 }
9ee6e8bb
PB
6326 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6327 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6328 break;
6329 }
6330 if (invert)
6331 imm = ~imm;
6332
9ee6e8bb
PB
6333 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6334 if (op & 1 && op < 12) {
ad69471c 6335 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6336 if (invert) {
6337 /* The immediate value has already been inverted, so
6338 BIC becomes AND. */
ad69471c 6339 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6340 } else {
ad69471c 6341 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6342 }
9ee6e8bb 6343 } else {
ad69471c 6344 /* VMOV, VMVN. */
7d1b0095 6345 tmp = tcg_temp_new_i32();
9ee6e8bb 6346 if (op == 14 && invert) {
a5a14945 6347 int n;
ad69471c
PB
6348 uint32_t val;
6349 val = 0;
9ee6e8bb
PB
6350 for (n = 0; n < 4; n++) {
6351 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6352 val |= 0xff << (n * 8);
9ee6e8bb 6353 }
ad69471c
PB
6354 tcg_gen_movi_i32(tmp, val);
6355 } else {
6356 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6357 }
9ee6e8bb 6358 }
ad69471c 6359 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6360 }
6361 }
e4b3861d 6362 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6363 if (size != 3) {
6364 op = (insn >> 8) & 0xf;
6365 if ((insn & (1 << 6)) == 0) {
6366 /* Three registers of different lengths. */
6367 int src1_wide;
6368 int src2_wide;
6369 int prewiden;
526d0096
PM
6370 /* undefreq: bit 0 : UNDEF if size == 0
6371 * bit 1 : UNDEF if size == 1
6372 * bit 2 : UNDEF if size == 2
6373 * bit 3 : UNDEF if U == 1
6374 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6375 */
6376 int undefreq;
6377 /* prewiden, src1_wide, src2_wide, undefreq */
6378 static const int neon_3reg_wide[16][4] = {
6379 {1, 0, 0, 0}, /* VADDL */
6380 {1, 1, 0, 0}, /* VADDW */
6381 {1, 0, 0, 0}, /* VSUBL */
6382 {1, 1, 0, 0}, /* VSUBW */
6383 {0, 1, 1, 0}, /* VADDHN */
6384 {0, 0, 0, 0}, /* VABAL */
6385 {0, 1, 1, 0}, /* VSUBHN */
6386 {0, 0, 0, 0}, /* VABDL */
6387 {0, 0, 0, 0}, /* VMLAL */
526d0096 6388 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6389 {0, 0, 0, 0}, /* VMLSL */
526d0096 6390 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6391 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6392 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6393 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6394 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6395 };
6396
6397 prewiden = neon_3reg_wide[op][0];
6398 src1_wide = neon_3reg_wide[op][1];
6399 src2_wide = neon_3reg_wide[op][2];
695272dc 6400 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6401
526d0096
PM
6402 if ((undefreq & (1 << size)) ||
6403 ((undefreq & 8) && u)) {
695272dc
PM
6404 return 1;
6405 }
6406 if ((src1_wide && (rn & 1)) ||
6407 (src2_wide && (rm & 1)) ||
6408 (!src2_wide && (rd & 1))) {
ad69471c 6409 return 1;
695272dc 6410 }
ad69471c 6411
4e624eda
PM
6412 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6413 * outside the loop below as it only performs a single pass.
6414 */
6415 if (op == 14 && size == 2) {
6416 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6417
d614a513 6418 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6419 return 1;
6420 }
6421 tcg_rn = tcg_temp_new_i64();
6422 tcg_rm = tcg_temp_new_i64();
6423 tcg_rd = tcg_temp_new_i64();
6424 neon_load_reg64(tcg_rn, rn);
6425 neon_load_reg64(tcg_rm, rm);
6426 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6427 neon_store_reg64(tcg_rd, rd);
6428 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6429 neon_store_reg64(tcg_rd, rd + 1);
6430 tcg_temp_free_i64(tcg_rn);
6431 tcg_temp_free_i64(tcg_rm);
6432 tcg_temp_free_i64(tcg_rd);
6433 return 0;
6434 }
6435
9ee6e8bb
PB
6436 /* Avoid overlapping operands. Wide source operands are
6437 always aligned so will never overlap with wide
6438 destinations in problematic ways. */
8f8e3aa4 6439 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6440 tmp = neon_load_reg(rm, 1);
6441 neon_store_scratch(2, tmp);
8f8e3aa4 6442 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6443 tmp = neon_load_reg(rn, 1);
6444 neon_store_scratch(2, tmp);
9ee6e8bb 6445 }
39d5492a 6446 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6447 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6448 if (src1_wide) {
6449 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6450 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6451 } else {
ad69471c 6452 if (pass == 1 && rd == rn) {
dd8fbd78 6453 tmp = neon_load_scratch(2);
9ee6e8bb 6454 } else {
ad69471c
PB
6455 tmp = neon_load_reg(rn, pass);
6456 }
6457 if (prewiden) {
6458 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6459 }
6460 }
ad69471c
PB
6461 if (src2_wide) {
6462 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6463 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6464 } else {
ad69471c 6465 if (pass == 1 && rd == rm) {
dd8fbd78 6466 tmp2 = neon_load_scratch(2);
9ee6e8bb 6467 } else {
ad69471c
PB
6468 tmp2 = neon_load_reg(rm, pass);
6469 }
6470 if (prewiden) {
6471 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6472 }
9ee6e8bb
PB
6473 }
6474 switch (op) {
6475 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6476 gen_neon_addl(size);
9ee6e8bb 6477 break;
79b0e534 6478 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6479 gen_neon_subl(size);
9ee6e8bb
PB
6480 break;
6481 case 5: case 7: /* VABAL, VABDL */
6482 switch ((size << 1) | u) {
ad69471c
PB
6483 case 0:
6484 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6485 break;
6486 case 1:
6487 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6488 break;
6489 case 2:
6490 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6491 break;
6492 case 3:
6493 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6494 break;
6495 case 4:
6496 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6497 break;
6498 case 5:
6499 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6500 break;
9ee6e8bb
PB
6501 default: abort();
6502 }
7d1b0095
PM
6503 tcg_temp_free_i32(tmp2);
6504 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6505 break;
6506 case 8: case 9: case 10: case 11: case 12: case 13:
6507 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6508 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6509 break;
6510 case 14: /* Polynomial VMULL */
e5ca24cb 6511 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6512 tcg_temp_free_i32(tmp2);
6513 tcg_temp_free_i32(tmp);
e5ca24cb 6514 break;
695272dc
PM
6515 default: /* 15 is RESERVED: caught earlier */
6516 abort();
9ee6e8bb 6517 }
ebcd88ce
PM
6518 if (op == 13) {
6519 /* VQDMULL */
6520 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6521 neon_store_reg64(cpu_V0, rd + pass);
6522 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6523 /* Accumulate. */
ebcd88ce 6524 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6525 switch (op) {
4dc064e6
PM
6526 case 10: /* VMLSL */
6527 gen_neon_negl(cpu_V0, size);
6528 /* Fall through */
6529 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6530 gen_neon_addl(size);
9ee6e8bb
PB
6531 break;
6532 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6533 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6534 if (op == 11) {
6535 gen_neon_negl(cpu_V0, size);
6536 }
ad69471c
PB
6537 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6538 break;
9ee6e8bb
PB
6539 default:
6540 abort();
6541 }
ad69471c 6542 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6543 } else if (op == 4 || op == 6) {
6544 /* Narrowing operation. */
7d1b0095 6545 tmp = tcg_temp_new_i32();
79b0e534 6546 if (!u) {
9ee6e8bb 6547 switch (size) {
ad69471c
PB
6548 case 0:
6549 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6550 break;
6551 case 1:
6552 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6553 break;
6554 case 2:
6555 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6556 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6557 break;
9ee6e8bb
PB
6558 default: abort();
6559 }
6560 } else {
6561 switch (size) {
ad69471c
PB
6562 case 0:
6563 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6564 break;
6565 case 1:
6566 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6567 break;
6568 case 2:
6569 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6570 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6571 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6572 break;
9ee6e8bb
PB
6573 default: abort();
6574 }
6575 }
ad69471c
PB
6576 if (pass == 0) {
6577 tmp3 = tmp;
6578 } else {
6579 neon_store_reg(rd, 0, tmp3);
6580 neon_store_reg(rd, 1, tmp);
6581 }
9ee6e8bb
PB
6582 } else {
6583 /* Write back the result. */
ad69471c 6584 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6585 }
6586 }
6587 } else {
3e3326df
PM
6588 /* Two registers and a scalar. NB that for ops of this form
6589 * the ARM ARM labels bit 24 as Q, but it is in our variable
6590 * 'u', not 'q'.
6591 */
6592 if (size == 0) {
6593 return 1;
6594 }
9ee6e8bb 6595 switch (op) {
9ee6e8bb 6596 case 1: /* Float VMLA scalar */
9ee6e8bb 6597 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6598 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6599 if (size == 1) {
6600 return 1;
6601 }
6602 /* fall through */
6603 case 0: /* Integer VMLA scalar */
6604 case 4: /* Integer VMLS scalar */
6605 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6606 case 12: /* VQDMULH scalar */
6607 case 13: /* VQRDMULH scalar */
3e3326df
PM
6608 if (u && ((rd | rn) & 1)) {
6609 return 1;
6610 }
dd8fbd78
FN
6611 tmp = neon_get_scalar(size, rm);
6612 neon_store_scratch(0, tmp);
9ee6e8bb 6613 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6614 tmp = neon_load_scratch(0);
6615 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6616 if (op == 12) {
6617 if (size == 1) {
02da0b2d 6618 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6619 } else {
02da0b2d 6620 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6621 }
6622 } else if (op == 13) {
6623 if (size == 1) {
02da0b2d 6624 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6625 } else {
02da0b2d 6626 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6627 }
6628 } else if (op & 1) {
aa47cfdd
PM
6629 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6630 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6631 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6632 } else {
6633 switch (size) {
dd8fbd78
FN
6634 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6635 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6636 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6637 default: abort();
9ee6e8bb
PB
6638 }
6639 }
7d1b0095 6640 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6641 if (op < 8) {
6642 /* Accumulate. */
dd8fbd78 6643 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6644 switch (op) {
6645 case 0:
dd8fbd78 6646 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6647 break;
6648 case 1:
aa47cfdd
PM
6649 {
6650 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6651 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6652 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6653 break;
aa47cfdd 6654 }
9ee6e8bb 6655 case 4:
dd8fbd78 6656 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6657 break;
6658 case 5:
aa47cfdd
PM
6659 {
6660 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6661 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6662 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6663 break;
aa47cfdd 6664 }
9ee6e8bb
PB
6665 default:
6666 abort();
6667 }
7d1b0095 6668 tcg_temp_free_i32(tmp2);
9ee6e8bb 6669 }
dd8fbd78 6670 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6671 }
6672 break;
9ee6e8bb 6673 case 3: /* VQDMLAL scalar */
9ee6e8bb 6674 case 7: /* VQDMLSL scalar */
9ee6e8bb 6675 case 11: /* VQDMULL scalar */
3e3326df 6676 if (u == 1) {
ad69471c 6677 return 1;
3e3326df
PM
6678 }
6679 /* fall through */
6680 case 2: /* VMLAL sclar */
6681 case 6: /* VMLSL scalar */
6682 case 10: /* VMULL scalar */
6683 if (rd & 1) {
6684 return 1;
6685 }
dd8fbd78 6686 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6687 /* We need a copy of tmp2 because gen_neon_mull
6688 * deletes it during pass 0. */
7d1b0095 6689 tmp4 = tcg_temp_new_i32();
c6067f04 6690 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6691 tmp3 = neon_load_reg(rn, 1);
ad69471c 6692
9ee6e8bb 6693 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6694 if (pass == 0) {
6695 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6696 } else {
dd8fbd78 6697 tmp = tmp3;
c6067f04 6698 tmp2 = tmp4;
9ee6e8bb 6699 }
ad69471c 6700 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6701 if (op != 11) {
6702 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6703 }
9ee6e8bb 6704 switch (op) {
4dc064e6
PM
6705 case 6:
6706 gen_neon_negl(cpu_V0, size);
6707 /* Fall through */
6708 case 2:
ad69471c 6709 gen_neon_addl(size);
9ee6e8bb
PB
6710 break;
6711 case 3: case 7:
ad69471c 6712 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6713 if (op == 7) {
6714 gen_neon_negl(cpu_V0, size);
6715 }
ad69471c 6716 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6717 break;
6718 case 10:
6719 /* no-op */
6720 break;
6721 case 11:
ad69471c 6722 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6723 break;
6724 default:
6725 abort();
6726 }
ad69471c 6727 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6728 }
dd8fbd78 6729
dd8fbd78 6730
9ee6e8bb
PB
6731 break;
6732 default: /* 14 and 15 are RESERVED */
6733 return 1;
6734 }
6735 }
6736 } else { /* size == 3 */
6737 if (!u) {
6738 /* Extract. */
9ee6e8bb 6739 imm = (insn >> 8) & 0xf;
ad69471c
PB
6740
6741 if (imm > 7 && !q)
6742 return 1;
6743
52579ea1
PM
6744 if (q && ((rd | rn | rm) & 1)) {
6745 return 1;
6746 }
6747
ad69471c
PB
6748 if (imm == 0) {
6749 neon_load_reg64(cpu_V0, rn);
6750 if (q) {
6751 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6752 }
ad69471c
PB
6753 } else if (imm == 8) {
6754 neon_load_reg64(cpu_V0, rn + 1);
6755 if (q) {
6756 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6757 }
ad69471c 6758 } else if (q) {
a7812ae4 6759 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6760 if (imm < 8) {
6761 neon_load_reg64(cpu_V0, rn);
a7812ae4 6762 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6763 } else {
6764 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6765 neon_load_reg64(tmp64, rm);
ad69471c
PB
6766 }
6767 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6768 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6769 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6770 if (imm < 8) {
6771 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6772 } else {
ad69471c
PB
6773 neon_load_reg64(cpu_V1, rm + 1);
6774 imm -= 8;
9ee6e8bb 6775 }
ad69471c 6776 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6777 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6778 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6779 tcg_temp_free_i64(tmp64);
ad69471c 6780 } else {
a7812ae4 6781 /* BUGFIX */
ad69471c 6782 neon_load_reg64(cpu_V0, rn);
a7812ae4 6783 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6784 neon_load_reg64(cpu_V1, rm);
a7812ae4 6785 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6786 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6787 }
6788 neon_store_reg64(cpu_V0, rd);
6789 if (q) {
6790 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6791 }
6792 } else if ((insn & (1 << 11)) == 0) {
6793 /* Two register misc. */
6794 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6795 size = (insn >> 18) & 3;
600b828c
PM
6796 /* UNDEF for unknown op values and bad op-size combinations */
6797 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6798 return 1;
6799 }
fc2a9b37
PM
6800 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6801 q && ((rm | rd) & 1)) {
6802 return 1;
6803 }
9ee6e8bb 6804 switch (op) {
600b828c 6805 case NEON_2RM_VREV64:
9ee6e8bb 6806 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6807 tmp = neon_load_reg(rm, pass * 2);
6808 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6809 switch (size) {
dd8fbd78
FN
6810 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6811 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6812 case 2: /* no-op */ break;
6813 default: abort();
6814 }
dd8fbd78 6815 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6816 if (size == 2) {
dd8fbd78 6817 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6818 } else {
9ee6e8bb 6819 switch (size) {
dd8fbd78
FN
6820 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6821 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6822 default: abort();
6823 }
dd8fbd78 6824 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6825 }
6826 }
6827 break;
600b828c
PM
6828 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6829 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6830 for (pass = 0; pass < q + 1; pass++) {
6831 tmp = neon_load_reg(rm, pass * 2);
6832 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6833 tmp = neon_load_reg(rm, pass * 2 + 1);
6834 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6835 switch (size) {
6836 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6837 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6838 case 2: tcg_gen_add_i64(CPU_V001); break;
6839 default: abort();
6840 }
600b828c 6841 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6842 /* Accumulate. */
ad69471c
PB
6843 neon_load_reg64(cpu_V1, rd + pass);
6844 gen_neon_addl(size);
9ee6e8bb 6845 }
ad69471c 6846 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6847 }
6848 break;
600b828c 6849 case NEON_2RM_VTRN:
9ee6e8bb 6850 if (size == 2) {
a5a14945 6851 int n;
9ee6e8bb 6852 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6853 tmp = neon_load_reg(rm, n);
6854 tmp2 = neon_load_reg(rd, n + 1);
6855 neon_store_reg(rm, n, tmp2);
6856 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6857 }
6858 } else {
6859 goto elementwise;
6860 }
6861 break;
600b828c 6862 case NEON_2RM_VUZP:
02acedf9 6863 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6864 return 1;
9ee6e8bb
PB
6865 }
6866 break;
600b828c 6867 case NEON_2RM_VZIP:
d68a6f3a 6868 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6869 return 1;
9ee6e8bb
PB
6870 }
6871 break;
600b828c
PM
6872 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6873 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6874 if (rm & 1) {
6875 return 1;
6876 }
39d5492a 6877 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6878 for (pass = 0; pass < 2; pass++) {
ad69471c 6879 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6880 tmp = tcg_temp_new_i32();
600b828c
PM
6881 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6882 tmp, cpu_V0);
ad69471c
PB
6883 if (pass == 0) {
6884 tmp2 = tmp;
6885 } else {
6886 neon_store_reg(rd, 0, tmp2);
6887 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6888 }
9ee6e8bb
PB
6889 }
6890 break;
600b828c 6891 case NEON_2RM_VSHLL:
fc2a9b37 6892 if (q || (rd & 1)) {
9ee6e8bb 6893 return 1;
600b828c 6894 }
ad69471c
PB
6895 tmp = neon_load_reg(rm, 0);
6896 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6897 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6898 if (pass == 1)
6899 tmp = tmp2;
6900 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6901 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6902 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6903 }
6904 break;
600b828c 6905 case NEON_2RM_VCVT_F16_F32:
d614a513 6906 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6907 q || (rm & 1)) {
6908 return 1;
6909 }
7d1b0095
PM
6910 tmp = tcg_temp_new_i32();
6911 tmp2 = tcg_temp_new_i32();
60011498 6912 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6913 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6914 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6915 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6916 tcg_gen_shli_i32(tmp2, tmp2, 16);
6917 tcg_gen_or_i32(tmp2, tmp2, tmp);
6918 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6919 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6920 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6921 neon_store_reg(rd, 0, tmp2);
7d1b0095 6922 tmp2 = tcg_temp_new_i32();
2d981da7 6923 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6924 tcg_gen_shli_i32(tmp2, tmp2, 16);
6925 tcg_gen_or_i32(tmp2, tmp2, tmp);
6926 neon_store_reg(rd, 1, tmp2);
7d1b0095 6927 tcg_temp_free_i32(tmp);
60011498 6928 break;
600b828c 6929 case NEON_2RM_VCVT_F32_F16:
d614a513 6930 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6931 q || (rd & 1)) {
6932 return 1;
6933 }
7d1b0095 6934 tmp3 = tcg_temp_new_i32();
60011498
PB
6935 tmp = neon_load_reg(rm, 0);
6936 tmp2 = neon_load_reg(rm, 1);
6937 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6938 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6939 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6940 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6941 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6942 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6943 tcg_temp_free_i32(tmp);
60011498 6944 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6945 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6946 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6947 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6948 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6949 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6950 tcg_temp_free_i32(tmp2);
6951 tcg_temp_free_i32(tmp3);
60011498 6952 break;
9d935509 6953 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6954 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6955 || ((rm | rd) & 1)) {
6956 return 1;
6957 }
6958 tmp = tcg_const_i32(rd);
6959 tmp2 = tcg_const_i32(rm);
6960
6961 /* Bit 6 is the lowest opcode bit; it distinguishes between
6962 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6963 */
6964 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6965
6966 if (op == NEON_2RM_AESE) {
6967 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6968 } else {
6969 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6970 }
6971 tcg_temp_free_i32(tmp);
6972 tcg_temp_free_i32(tmp2);
6973 tcg_temp_free_i32(tmp3);
6974 break;
f1ecb913 6975 case NEON_2RM_SHA1H:
d614a513 6976 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
6977 || ((rm | rd) & 1)) {
6978 return 1;
6979 }
6980 tmp = tcg_const_i32(rd);
6981 tmp2 = tcg_const_i32(rm);
6982
6983 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6984
6985 tcg_temp_free_i32(tmp);
6986 tcg_temp_free_i32(tmp2);
6987 break;
6988 case NEON_2RM_SHA1SU1:
6989 if ((rm | rd) & 1) {
6990 return 1;
6991 }
6992 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6993 if (q) {
d614a513 6994 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
6995 return 1;
6996 }
d614a513 6997 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
6998 return 1;
6999 }
7000 tmp = tcg_const_i32(rd);
7001 tmp2 = tcg_const_i32(rm);
7002 if (q) {
7003 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7004 } else {
7005 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7006 }
7007 tcg_temp_free_i32(tmp);
7008 tcg_temp_free_i32(tmp2);
7009 break;
9ee6e8bb
PB
7010 default:
7011 elementwise:
7012 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7013 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7014 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7015 neon_reg_offset(rm, pass));
39d5492a 7016 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7017 } else {
dd8fbd78 7018 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7019 }
7020 switch (op) {
600b828c 7021 case NEON_2RM_VREV32:
9ee6e8bb 7022 switch (size) {
dd8fbd78
FN
7023 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7024 case 1: gen_swap_half(tmp); break;
600b828c 7025 default: abort();
9ee6e8bb
PB
7026 }
7027 break;
600b828c 7028 case NEON_2RM_VREV16:
dd8fbd78 7029 gen_rev16(tmp);
9ee6e8bb 7030 break;
600b828c 7031 case NEON_2RM_VCLS:
9ee6e8bb 7032 switch (size) {
dd8fbd78
FN
7033 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7034 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7035 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7036 default: abort();
9ee6e8bb
PB
7037 }
7038 break;
600b828c 7039 case NEON_2RM_VCLZ:
9ee6e8bb 7040 switch (size) {
dd8fbd78
FN
7041 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7042 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7043 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 7044 default: abort();
9ee6e8bb
PB
7045 }
7046 break;
600b828c 7047 case NEON_2RM_VCNT:
dd8fbd78 7048 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7049 break;
600b828c 7050 case NEON_2RM_VMVN:
dd8fbd78 7051 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7052 break;
600b828c 7053 case NEON_2RM_VQABS:
9ee6e8bb 7054 switch (size) {
02da0b2d
PM
7055 case 0:
7056 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7057 break;
7058 case 1:
7059 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7060 break;
7061 case 2:
7062 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7063 break;
600b828c 7064 default: abort();
9ee6e8bb
PB
7065 }
7066 break;
600b828c 7067 case NEON_2RM_VQNEG:
9ee6e8bb 7068 switch (size) {
02da0b2d
PM
7069 case 0:
7070 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7071 break;
7072 case 1:
7073 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7074 break;
7075 case 2:
7076 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7077 break;
600b828c 7078 default: abort();
9ee6e8bb
PB
7079 }
7080 break;
600b828c 7081 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7082 tmp2 = tcg_const_i32(0);
9ee6e8bb 7083 switch(size) {
dd8fbd78
FN
7084 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7085 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7086 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7087 default: abort();
9ee6e8bb 7088 }
39d5492a 7089 tcg_temp_free_i32(tmp2);
600b828c 7090 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7091 tcg_gen_not_i32(tmp, tmp);
600b828c 7092 }
9ee6e8bb 7093 break;
600b828c 7094 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7095 tmp2 = tcg_const_i32(0);
9ee6e8bb 7096 switch(size) {
dd8fbd78
FN
7097 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7098 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7099 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7100 default: abort();
9ee6e8bb 7101 }
39d5492a 7102 tcg_temp_free_i32(tmp2);
600b828c 7103 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7104 tcg_gen_not_i32(tmp, tmp);
600b828c 7105 }
9ee6e8bb 7106 break;
600b828c 7107 case NEON_2RM_VCEQ0:
dd8fbd78 7108 tmp2 = tcg_const_i32(0);
9ee6e8bb 7109 switch(size) {
dd8fbd78
FN
7110 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7111 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7112 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7113 default: abort();
9ee6e8bb 7114 }
39d5492a 7115 tcg_temp_free_i32(tmp2);
9ee6e8bb 7116 break;
600b828c 7117 case NEON_2RM_VABS:
9ee6e8bb 7118 switch(size) {
dd8fbd78
FN
7119 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7120 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7121 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7122 default: abort();
9ee6e8bb
PB
7123 }
7124 break;
600b828c 7125 case NEON_2RM_VNEG:
dd8fbd78
FN
7126 tmp2 = tcg_const_i32(0);
7127 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7128 tcg_temp_free_i32(tmp2);
9ee6e8bb 7129 break;
600b828c 7130 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7131 {
7132 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7133 tmp2 = tcg_const_i32(0);
aa47cfdd 7134 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7135 tcg_temp_free_i32(tmp2);
aa47cfdd 7136 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7137 break;
aa47cfdd 7138 }
600b828c 7139 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7140 {
7141 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7142 tmp2 = tcg_const_i32(0);
aa47cfdd 7143 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7144 tcg_temp_free_i32(tmp2);
aa47cfdd 7145 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7146 break;
aa47cfdd 7147 }
600b828c 7148 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7149 {
7150 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7151 tmp2 = tcg_const_i32(0);
aa47cfdd 7152 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7153 tcg_temp_free_i32(tmp2);
aa47cfdd 7154 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7155 break;
aa47cfdd 7156 }
600b828c 7157 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7158 {
7159 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7160 tmp2 = tcg_const_i32(0);
aa47cfdd 7161 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7162 tcg_temp_free_i32(tmp2);
aa47cfdd 7163 tcg_temp_free_ptr(fpstatus);
0e326109 7164 break;
aa47cfdd 7165 }
600b828c 7166 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7167 {
7168 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7169 tmp2 = tcg_const_i32(0);
aa47cfdd 7170 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7171 tcg_temp_free_i32(tmp2);
aa47cfdd 7172 tcg_temp_free_ptr(fpstatus);
0e326109 7173 break;
aa47cfdd 7174 }
600b828c 7175 case NEON_2RM_VABS_F:
4373f3ce 7176 gen_vfp_abs(0);
9ee6e8bb 7177 break;
600b828c 7178 case NEON_2RM_VNEG_F:
4373f3ce 7179 gen_vfp_neg(0);
9ee6e8bb 7180 break;
600b828c 7181 case NEON_2RM_VSWP:
dd8fbd78
FN
7182 tmp2 = neon_load_reg(rd, pass);
7183 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7184 break;
600b828c 7185 case NEON_2RM_VTRN:
dd8fbd78 7186 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7187 switch (size) {
dd8fbd78
FN
7188 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7189 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7190 default: abort();
9ee6e8bb 7191 }
dd8fbd78 7192 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7193 break;
34f7b0a2
WN
7194 case NEON_2RM_VRINTN:
7195 case NEON_2RM_VRINTA:
7196 case NEON_2RM_VRINTM:
7197 case NEON_2RM_VRINTP:
7198 case NEON_2RM_VRINTZ:
7199 {
7200 TCGv_i32 tcg_rmode;
7201 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7202 int rmode;
7203
7204 if (op == NEON_2RM_VRINTZ) {
7205 rmode = FPROUNDING_ZERO;
7206 } else {
7207 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7208 }
7209
7210 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7211 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7212 cpu_env);
7213 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7214 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7215 cpu_env);
7216 tcg_temp_free_ptr(fpstatus);
7217 tcg_temp_free_i32(tcg_rmode);
7218 break;
7219 }
2ce70625
WN
7220 case NEON_2RM_VRINTX:
7221 {
7222 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7223 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7224 tcg_temp_free_ptr(fpstatus);
7225 break;
7226 }
901ad525
WN
7227 case NEON_2RM_VCVTAU:
7228 case NEON_2RM_VCVTAS:
7229 case NEON_2RM_VCVTNU:
7230 case NEON_2RM_VCVTNS:
7231 case NEON_2RM_VCVTPU:
7232 case NEON_2RM_VCVTPS:
7233 case NEON_2RM_VCVTMU:
7234 case NEON_2RM_VCVTMS:
7235 {
7236 bool is_signed = !extract32(insn, 7, 1);
7237 TCGv_ptr fpst = get_fpstatus_ptr(1);
7238 TCGv_i32 tcg_rmode, tcg_shift;
7239 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7240
7241 tcg_shift = tcg_const_i32(0);
7242 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7243 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7244 cpu_env);
7245
7246 if (is_signed) {
7247 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7248 tcg_shift, fpst);
7249 } else {
7250 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7251 tcg_shift, fpst);
7252 }
7253
7254 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7255 cpu_env);
7256 tcg_temp_free_i32(tcg_rmode);
7257 tcg_temp_free_i32(tcg_shift);
7258 tcg_temp_free_ptr(fpst);
7259 break;
7260 }
600b828c 7261 case NEON_2RM_VRECPE:
b6d4443a
AB
7262 {
7263 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7264 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7265 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7266 break;
b6d4443a 7267 }
600b828c 7268 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7269 {
7270 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7271 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7272 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7273 break;
c2fb418e 7274 }
600b828c 7275 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7276 {
7277 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7278 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7279 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7280 break;
b6d4443a 7281 }
600b828c 7282 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7283 {
7284 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7285 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7286 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7287 break;
c2fb418e 7288 }
600b828c 7289 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7290 gen_vfp_sito(0, 1);
9ee6e8bb 7291 break;
600b828c 7292 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7293 gen_vfp_uito(0, 1);
9ee6e8bb 7294 break;
600b828c 7295 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7296 gen_vfp_tosiz(0, 1);
9ee6e8bb 7297 break;
600b828c 7298 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7299 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7300 break;
7301 default:
600b828c
PM
7302 /* Reserved op values were caught by the
7303 * neon_2rm_sizes[] check earlier.
7304 */
7305 abort();
9ee6e8bb 7306 }
600b828c 7307 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7308 tcg_gen_st_f32(cpu_F0s, cpu_env,
7309 neon_reg_offset(rd, pass));
9ee6e8bb 7310 } else {
dd8fbd78 7311 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7312 }
7313 }
7314 break;
7315 }
7316 } else if ((insn & (1 << 10)) == 0) {
7317 /* VTBL, VTBX. */
56907d77
PM
7318 int n = ((insn >> 8) & 3) + 1;
7319 if ((rn + n) > 32) {
7320 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7321 * helper function running off the end of the register file.
7322 */
7323 return 1;
7324 }
7325 n <<= 3;
9ee6e8bb 7326 if (insn & (1 << 6)) {
8f8e3aa4 7327 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7328 } else {
7d1b0095 7329 tmp = tcg_temp_new_i32();
8f8e3aa4 7330 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7331 }
8f8e3aa4 7332 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7333 tmp4 = tcg_const_i32(rn);
7334 tmp5 = tcg_const_i32(n);
9ef39277 7335 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7336 tcg_temp_free_i32(tmp);
9ee6e8bb 7337 if (insn & (1 << 6)) {
8f8e3aa4 7338 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7339 } else {
7d1b0095 7340 tmp = tcg_temp_new_i32();
8f8e3aa4 7341 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7342 }
8f8e3aa4 7343 tmp3 = neon_load_reg(rm, 1);
9ef39277 7344 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7345 tcg_temp_free_i32(tmp5);
7346 tcg_temp_free_i32(tmp4);
8f8e3aa4 7347 neon_store_reg(rd, 0, tmp2);
3018f259 7348 neon_store_reg(rd, 1, tmp3);
7d1b0095 7349 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7350 } else if ((insn & 0x380) == 0) {
7351 /* VDUP */
133da6aa
JR
7352 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7353 return 1;
7354 }
9ee6e8bb 7355 if (insn & (1 << 19)) {
dd8fbd78 7356 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7357 } else {
dd8fbd78 7358 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7359 }
7360 if (insn & (1 << 16)) {
dd8fbd78 7361 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7362 } else if (insn & (1 << 17)) {
7363 if ((insn >> 18) & 1)
dd8fbd78 7364 gen_neon_dup_high16(tmp);
9ee6e8bb 7365 else
dd8fbd78 7366 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7367 }
7368 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7369 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7370 tcg_gen_mov_i32(tmp2, tmp);
7371 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7372 }
7d1b0095 7373 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7374 } else {
7375 return 1;
7376 }
7377 }
7378 }
7379 return 0;
7380}
7381
7dcc1f89 7382static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7383{
4b6a83fb
PM
7384 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7385 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7386
7387 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7388
7389 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7390 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7391 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7392 return 1;
7393 }
d614a513 7394 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7395 return disas_iwmmxt_insn(s, insn);
d614a513 7396 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7397 return disas_dsp_insn(s, insn);
c0f4af17
PM
7398 }
7399 return 1;
4b6a83fb
PM
7400 }
7401
7402 /* Otherwise treat as a generic register access */
7403 is64 = (insn & (1 << 25)) == 0;
7404 if (!is64 && ((insn & (1 << 4)) == 0)) {
7405 /* cdp */
7406 return 1;
7407 }
7408
7409 crm = insn & 0xf;
7410 if (is64) {
7411 crn = 0;
7412 opc1 = (insn >> 4) & 0xf;
7413 opc2 = 0;
7414 rt2 = (insn >> 16) & 0xf;
7415 } else {
7416 crn = (insn >> 16) & 0xf;
7417 opc1 = (insn >> 21) & 7;
7418 opc2 = (insn >> 5) & 7;
7419 rt2 = 0;
7420 }
7421 isread = (insn >> 20) & 1;
7422 rt = (insn >> 12) & 0xf;
7423
60322b39 7424 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7425 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7426 if (ri) {
7427 /* Check access permissions */
dcbff19b 7428 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7429 return 1;
7430 }
7431
c0f4af17 7432 if (ri->accessfn ||
d614a513 7433 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7434 /* Emit code to perform further access permissions checks at
7435 * runtime; this may result in an exception.
c0f4af17
PM
7436 * Note that on XScale all cp0..c13 registers do an access check
7437 * call in order to handle c15_cpar.
f59df3f2
PM
7438 */
7439 TCGv_ptr tmpptr;
3f208fd7 7440 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7441 uint32_t syndrome;
7442
7443 /* Note that since we are an implementation which takes an
7444 * exception on a trapped conditional instruction only if the
7445 * instruction passes its condition code check, we can take
7446 * advantage of the clause in the ARM ARM that allows us to set
7447 * the COND field in the instruction to 0xE in all cases.
7448 * We could fish the actual condition out of the insn (ARM)
7449 * or the condexec bits (Thumb) but it isn't necessary.
7450 */
7451 switch (cpnum) {
7452 case 14:
7453 if (is64) {
7454 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7455 isread, false);
8bcbf37c
PM
7456 } else {
7457 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7458 rt, isread, false);
8bcbf37c
PM
7459 }
7460 break;
7461 case 15:
7462 if (is64) {
7463 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7464 isread, false);
8bcbf37c
PM
7465 } else {
7466 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7467 rt, isread, false);
8bcbf37c
PM
7468 }
7469 break;
7470 default:
7471 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7472 * so this can only happen if this is an ARMv7 or earlier CPU,
7473 * in which case the syndrome information won't actually be
7474 * guest visible.
7475 */
d614a513 7476 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7477 syndrome = syn_uncategorized();
7478 break;
7479 }
7480
43bfa4a1 7481 gen_set_condexec(s);
3977ee5d 7482 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7483 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7484 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7485 tcg_isread = tcg_const_i32(isread);
7486 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7487 tcg_isread);
f59df3f2 7488 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7489 tcg_temp_free_i32(tcg_syn);
3f208fd7 7490 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7491 }
7492
4b6a83fb
PM
7493 /* Handle special cases first */
7494 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7495 case ARM_CP_NOP:
7496 return 0;
7497 case ARM_CP_WFI:
7498 if (isread) {
7499 return 1;
7500 }
eaed129d 7501 gen_set_pc_im(s, s->pc);
4b6a83fb 7502 s->is_jmp = DISAS_WFI;
2bee5105 7503 return 0;
4b6a83fb
PM
7504 default:
7505 break;
7506 }
7507
bd79255d 7508 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7509 gen_io_start();
7510 }
7511
4b6a83fb
PM
7512 if (isread) {
7513 /* Read */
7514 if (is64) {
7515 TCGv_i64 tmp64;
7516 TCGv_i32 tmp;
7517 if (ri->type & ARM_CP_CONST) {
7518 tmp64 = tcg_const_i64(ri->resetvalue);
7519 } else if (ri->readfn) {
7520 TCGv_ptr tmpptr;
4b6a83fb
PM
7521 tmp64 = tcg_temp_new_i64();
7522 tmpptr = tcg_const_ptr(ri);
7523 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7524 tcg_temp_free_ptr(tmpptr);
7525 } else {
7526 tmp64 = tcg_temp_new_i64();
7527 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7528 }
7529 tmp = tcg_temp_new_i32();
ecc7b3aa 7530 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7531 store_reg(s, rt, tmp);
7532 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7533 tmp = tcg_temp_new_i32();
ecc7b3aa 7534 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7535 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7536 store_reg(s, rt2, tmp);
7537 } else {
39d5492a 7538 TCGv_i32 tmp;
4b6a83fb
PM
7539 if (ri->type & ARM_CP_CONST) {
7540 tmp = tcg_const_i32(ri->resetvalue);
7541 } else if (ri->readfn) {
7542 TCGv_ptr tmpptr;
4b6a83fb
PM
7543 tmp = tcg_temp_new_i32();
7544 tmpptr = tcg_const_ptr(ri);
7545 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7546 tcg_temp_free_ptr(tmpptr);
7547 } else {
7548 tmp = load_cpu_offset(ri->fieldoffset);
7549 }
7550 if (rt == 15) {
7551 /* Destination register of r15 for 32 bit loads sets
7552 * the condition codes from the high 4 bits of the value
7553 */
7554 gen_set_nzcv(tmp);
7555 tcg_temp_free_i32(tmp);
7556 } else {
7557 store_reg(s, rt, tmp);
7558 }
7559 }
7560 } else {
7561 /* Write */
7562 if (ri->type & ARM_CP_CONST) {
7563 /* If not forbidden by access permissions, treat as WI */
7564 return 0;
7565 }
7566
7567 if (is64) {
39d5492a 7568 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7569 TCGv_i64 tmp64 = tcg_temp_new_i64();
7570 tmplo = load_reg(s, rt);
7571 tmphi = load_reg(s, rt2);
7572 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7573 tcg_temp_free_i32(tmplo);
7574 tcg_temp_free_i32(tmphi);
7575 if (ri->writefn) {
7576 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7577 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7578 tcg_temp_free_ptr(tmpptr);
7579 } else {
7580 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7581 }
7582 tcg_temp_free_i64(tmp64);
7583 } else {
7584 if (ri->writefn) {
39d5492a 7585 TCGv_i32 tmp;
4b6a83fb 7586 TCGv_ptr tmpptr;
4b6a83fb
PM
7587 tmp = load_reg(s, rt);
7588 tmpptr = tcg_const_ptr(ri);
7589 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7590 tcg_temp_free_ptr(tmpptr);
7591 tcg_temp_free_i32(tmp);
7592 } else {
39d5492a 7593 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7594 store_cpu_offset(tmp, ri->fieldoffset);
7595 }
7596 }
2452731c
PM
7597 }
7598
bd79255d 7599 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7600 /* I/O operations must end the TB here (whether read or write) */
7601 gen_io_end();
7602 gen_lookup_tb(s);
7603 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7604 /* We default to ending the TB on a coprocessor register write,
7605 * but allow this to be suppressed by the register definition
7606 * (usually only necessary to work around guest bugs).
7607 */
2452731c 7608 gen_lookup_tb(s);
4b6a83fb 7609 }
2452731c 7610
4b6a83fb
PM
7611 return 0;
7612 }
7613
626187d8
PM
7614 /* Unknown register; this might be a guest error or a QEMU
7615 * unimplemented feature.
7616 */
7617 if (is64) {
7618 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7619 "64 bit system register cp:%d opc1: %d crm:%d "
7620 "(%s)\n",
7621 isread ? "read" : "write", cpnum, opc1, crm,
7622 s->ns ? "non-secure" : "secure");
626187d8
PM
7623 } else {
7624 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7625 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7626 "(%s)\n",
7627 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7628 s->ns ? "non-secure" : "secure");
626187d8
PM
7629 }
7630
4a9a539f 7631 return 1;
9ee6e8bb
PB
7632}
7633
5e3f878a
PB
7634
7635/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7636static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7637{
39d5492a 7638 TCGv_i32 tmp;
7d1b0095 7639 tmp = tcg_temp_new_i32();
ecc7b3aa 7640 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7641 store_reg(s, rlow, tmp);
7d1b0095 7642 tmp = tcg_temp_new_i32();
5e3f878a 7643 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7644 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7645 store_reg(s, rhigh, tmp);
7646}
7647
7648/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7649static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7650{
a7812ae4 7651 TCGv_i64 tmp;
39d5492a 7652 TCGv_i32 tmp2;
5e3f878a 7653
36aa55dc 7654 /* Load value and extend to 64 bits. */
a7812ae4 7655 tmp = tcg_temp_new_i64();
5e3f878a
PB
7656 tmp2 = load_reg(s, rlow);
7657 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7658 tcg_temp_free_i32(tmp2);
5e3f878a 7659 tcg_gen_add_i64(val, val, tmp);
b75263d6 7660 tcg_temp_free_i64(tmp);
5e3f878a
PB
7661}
7662
7663/* load and add a 64-bit value from a register pair. */
a7812ae4 7664static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7665{
a7812ae4 7666 TCGv_i64 tmp;
39d5492a
PM
7667 TCGv_i32 tmpl;
7668 TCGv_i32 tmph;
5e3f878a
PB
7669
7670 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7671 tmpl = load_reg(s, rlow);
7672 tmph = load_reg(s, rhigh);
a7812ae4 7673 tmp = tcg_temp_new_i64();
36aa55dc 7674 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7675 tcg_temp_free_i32(tmpl);
7676 tcg_temp_free_i32(tmph);
5e3f878a 7677 tcg_gen_add_i64(val, val, tmp);
b75263d6 7678 tcg_temp_free_i64(tmp);
5e3f878a
PB
7679}
7680
c9f10124 7681/* Set N and Z flags from hi|lo. */
39d5492a 7682static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7683{
c9f10124
RH
7684 tcg_gen_mov_i32(cpu_NF, hi);
7685 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7686}
7687
426f5abc
PB
7688/* Load/Store exclusive instructions are implemented by remembering
7689 the value/address loaded, and seeing if these are the same
b90372ad 7690 when the store is performed. This should be sufficient to implement
426f5abc
PB
7691 the architecturally mandated semantics, and avoids having to monitor
7692 regular stores.
7693
7694 In system emulation mode only one CPU will be running at once, so
7695 this sequence is effectively atomic. In user emulation mode we
7696 throw an exception and handle the atomic operation elsewhere. */
7697static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7698 TCGv_i32 addr, int size)
426f5abc 7699{
94ee24e7 7700 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7701
50225ad0
PM
7702 s->is_ldex = true;
7703
426f5abc
PB
7704 switch (size) {
7705 case 0:
12dcc321 7706 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7707 break;
7708 case 1:
12dcc321 7709 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7710 break;
7711 case 2:
7712 case 3:
12dcc321 7713 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7714 break;
7715 default:
7716 abort();
7717 }
03d05e2d 7718
426f5abc 7719 if (size == 3) {
39d5492a 7720 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7721 TCGv_i32 tmp3 = tcg_temp_new_i32();
7722
2c9adbda 7723 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7724 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7725 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7726 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7727 store_reg(s, rt2, tmp3);
7728 } else {
7729 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7730 }
03d05e2d
PM
7731
7732 store_reg(s, rt, tmp);
7733 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7734}
7735
7736static void gen_clrex(DisasContext *s)
7737{
03d05e2d 7738 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7739}
7740
7741#ifdef CONFIG_USER_ONLY
7742static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7743 TCGv_i32 addr, int size)
426f5abc 7744{
03d05e2d 7745 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7746 tcg_gen_movi_i32(cpu_exclusive_info,
7747 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7748 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7749}
7750#else
7751static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7752 TCGv_i32 addr, int size)
426f5abc 7753{
39d5492a 7754 TCGv_i32 tmp;
03d05e2d 7755 TCGv_i64 val64, extaddr;
42a268c2
RH
7756 TCGLabel *done_label;
7757 TCGLabel *fail_label;
426f5abc
PB
7758
7759 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7760 [addr] = {Rt};
7761 {Rd} = 0;
7762 } else {
7763 {Rd} = 1;
7764 } */
7765 fail_label = gen_new_label();
7766 done_label = gen_new_label();
03d05e2d
PM
7767 extaddr = tcg_temp_new_i64();
7768 tcg_gen_extu_i32_i64(extaddr, addr);
7769 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7770 tcg_temp_free_i64(extaddr);
7771
94ee24e7 7772 tmp = tcg_temp_new_i32();
426f5abc
PB
7773 switch (size) {
7774 case 0:
12dcc321 7775 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7776 break;
7777 case 1:
12dcc321 7778 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7779 break;
7780 case 2:
7781 case 3:
12dcc321 7782 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7783 break;
7784 default:
7785 abort();
7786 }
03d05e2d
PM
7787
7788 val64 = tcg_temp_new_i64();
426f5abc 7789 if (size == 3) {
39d5492a 7790 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7791 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7792 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7793 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7794 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7795 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7796 tcg_temp_free_i32(tmp3);
7797 } else {
7798 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7799 }
03d05e2d
PM
7800 tcg_temp_free_i32(tmp);
7801
7802 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7803 tcg_temp_free_i64(val64);
7804
426f5abc
PB
7805 tmp = load_reg(s, rt);
7806 switch (size) {
7807 case 0:
12dcc321 7808 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7809 break;
7810 case 1:
12dcc321 7811 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7812 break;
7813 case 2:
7814 case 3:
12dcc321 7815 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7816 break;
7817 default:
7818 abort();
7819 }
94ee24e7 7820 tcg_temp_free_i32(tmp);
426f5abc
PB
7821 if (size == 3) {
7822 tcg_gen_addi_i32(addr, addr, 4);
7823 tmp = load_reg(s, rt2);
12dcc321 7824 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
94ee24e7 7825 tcg_temp_free_i32(tmp);
426f5abc
PB
7826 }
7827 tcg_gen_movi_i32(cpu_R[rd], 0);
7828 tcg_gen_br(done_label);
7829 gen_set_label(fail_label);
7830 tcg_gen_movi_i32(cpu_R[rd], 1);
7831 gen_set_label(done_label);
03d05e2d 7832 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7833}
7834#endif
7835
81465888
PM
7836/* gen_srs:
7837 * @env: CPUARMState
7838 * @s: DisasContext
7839 * @mode: mode field from insn (which stack to store to)
7840 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7841 * @writeback: true if writeback bit set
7842 *
7843 * Generate code for the SRS (Store Return State) insn.
7844 */
7845static void gen_srs(DisasContext *s,
7846 uint32_t mode, uint32_t amode, bool writeback)
7847{
7848 int32_t offset;
cbc0326b
PM
7849 TCGv_i32 addr, tmp;
7850 bool undef = false;
7851
7852 /* SRS is:
7853 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7854 * and specified mode is monitor mode
cbc0326b
PM
7855 * - UNDEFINED in Hyp mode
7856 * - UNPREDICTABLE in User or System mode
7857 * - UNPREDICTABLE if the specified mode is:
7858 * -- not implemented
7859 * -- not a valid mode number
7860 * -- a mode that's at a higher exception level
7861 * -- Monitor, if we are Non-secure
f01377f5 7862 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7863 */
ba63cf47 7864 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7865 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7866 return;
7867 }
7868
7869 if (s->current_el == 0 || s->current_el == 2) {
7870 undef = true;
7871 }
7872
7873 switch (mode) {
7874 case ARM_CPU_MODE_USR:
7875 case ARM_CPU_MODE_FIQ:
7876 case ARM_CPU_MODE_IRQ:
7877 case ARM_CPU_MODE_SVC:
7878 case ARM_CPU_MODE_ABT:
7879 case ARM_CPU_MODE_UND:
7880 case ARM_CPU_MODE_SYS:
7881 break;
7882 case ARM_CPU_MODE_HYP:
7883 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7884 undef = true;
7885 }
7886 break;
7887 case ARM_CPU_MODE_MON:
7888 /* No need to check specifically for "are we non-secure" because
7889 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7890 * so if this isn't EL3 then we must be non-secure.
7891 */
7892 if (s->current_el != 3) {
7893 undef = true;
7894 }
7895 break;
7896 default:
7897 undef = true;
7898 }
7899
7900 if (undef) {
7901 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7902 default_exception_el(s));
7903 return;
7904 }
7905
7906 addr = tcg_temp_new_i32();
7907 tmp = tcg_const_i32(mode);
f01377f5
PM
7908 /* get_r13_banked() will raise an exception if called from System mode */
7909 gen_set_condexec(s);
7910 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7911 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7912 tcg_temp_free_i32(tmp);
7913 switch (amode) {
7914 case 0: /* DA */
7915 offset = -4;
7916 break;
7917 case 1: /* IA */
7918 offset = 0;
7919 break;
7920 case 2: /* DB */
7921 offset = -8;
7922 break;
7923 case 3: /* IB */
7924 offset = 4;
7925 break;
7926 default:
7927 abort();
7928 }
7929 tcg_gen_addi_i32(addr, addr, offset);
7930 tmp = load_reg(s, 14);
12dcc321 7931 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7932 tcg_temp_free_i32(tmp);
81465888
PM
7933 tmp = load_cpu_field(spsr);
7934 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7935 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7936 tcg_temp_free_i32(tmp);
81465888
PM
7937 if (writeback) {
7938 switch (amode) {
7939 case 0:
7940 offset = -8;
7941 break;
7942 case 1:
7943 offset = 4;
7944 break;
7945 case 2:
7946 offset = -4;
7947 break;
7948 case 3:
7949 offset = 0;
7950 break;
7951 default:
7952 abort();
7953 }
7954 tcg_gen_addi_i32(addr, addr, offset);
7955 tmp = tcg_const_i32(mode);
7956 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7957 tcg_temp_free_i32(tmp);
7958 }
7959 tcg_temp_free_i32(addr);
f01377f5 7960 s->is_jmp = DISAS_UPDATE;
81465888
PM
7961}
7962
f4df2210 7963static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7964{
f4df2210 7965 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7966 TCGv_i32 tmp;
7967 TCGv_i32 tmp2;
7968 TCGv_i32 tmp3;
7969 TCGv_i32 addr;
a7812ae4 7970 TCGv_i64 tmp64;
9ee6e8bb 7971
9ee6e8bb 7972 /* M variants do not implement ARM mode. */
b53d8923 7973 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7974 goto illegal_op;
b53d8923 7975 }
9ee6e8bb
PB
7976 cond = insn >> 28;
7977 if (cond == 0xf){
be5e7a76
DES
7978 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7979 * choose to UNDEF. In ARMv5 and above the space is used
7980 * for miscellaneous unconditional instructions.
7981 */
7982 ARCH(5);
7983
9ee6e8bb
PB
7984 /* Unconditional instructions. */
7985 if (((insn >> 25) & 7) == 1) {
7986 /* NEON Data processing. */
d614a513 7987 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7988 goto illegal_op;
d614a513 7989 }
9ee6e8bb 7990
7dcc1f89 7991 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7992 goto illegal_op;
7dcc1f89 7993 }
9ee6e8bb
PB
7994 return;
7995 }
7996 if ((insn & 0x0f100000) == 0x04000000) {
7997 /* NEON load/store. */
d614a513 7998 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7999 goto illegal_op;
d614a513 8000 }
9ee6e8bb 8001
7dcc1f89 8002 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8003 goto illegal_op;
7dcc1f89 8004 }
9ee6e8bb
PB
8005 return;
8006 }
6a57f3eb
WN
8007 if ((insn & 0x0f000e10) == 0x0e000a00) {
8008 /* VFP. */
7dcc1f89 8009 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8010 goto illegal_op;
8011 }
8012 return;
8013 }
3d185e5d
PM
8014 if (((insn & 0x0f30f000) == 0x0510f000) ||
8015 ((insn & 0x0f30f010) == 0x0710f000)) {
8016 if ((insn & (1 << 22)) == 0) {
8017 /* PLDW; v7MP */
d614a513 8018 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8019 goto illegal_op;
8020 }
8021 }
8022 /* Otherwise PLD; v5TE+ */
be5e7a76 8023 ARCH(5TE);
3d185e5d
PM
8024 return;
8025 }
8026 if (((insn & 0x0f70f000) == 0x0450f000) ||
8027 ((insn & 0x0f70f010) == 0x0650f000)) {
8028 ARCH(7);
8029 return; /* PLI; V7 */
8030 }
8031 if (((insn & 0x0f700000) == 0x04100000) ||
8032 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8033 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8034 goto illegal_op;
8035 }
8036 return; /* v7MP: Unallocated memory hint: must NOP */
8037 }
8038
8039 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8040 ARCH(6);
8041 /* setend */
9886ecdf
PB
8042 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8043 gen_helper_setend(cpu_env);
8044 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8045 }
8046 return;
8047 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8048 switch ((insn >> 4) & 0xf) {
8049 case 1: /* clrex */
8050 ARCH(6K);
426f5abc 8051 gen_clrex(s);
9ee6e8bb
PB
8052 return;
8053 case 4: /* dsb */
8054 case 5: /* dmb */
9ee6e8bb
PB
8055 ARCH(7);
8056 /* We don't emulate caches so these are a no-op. */
8057 return;
6df99dec
SS
8058 case 6: /* isb */
8059 /* We need to break the TB after this insn to execute
8060 * self-modifying code correctly and also to take
8061 * any pending interrupts immediately.
8062 */
8063 gen_lookup_tb(s);
8064 return;
9ee6e8bb
PB
8065 default:
8066 goto illegal_op;
8067 }
8068 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8069 /* srs */
81465888
PM
8070 ARCH(6);
8071 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8072 return;
ea825eee 8073 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8074 /* rfe */
c67b6b71 8075 int32_t offset;
9ee6e8bb
PB
8076 if (IS_USER(s))
8077 goto illegal_op;
8078 ARCH(6);
8079 rn = (insn >> 16) & 0xf;
b0109805 8080 addr = load_reg(s, rn);
9ee6e8bb
PB
8081 i = (insn >> 23) & 3;
8082 switch (i) {
b0109805 8083 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8084 case 1: offset = 0; break; /* IA */
8085 case 2: offset = -8; break; /* DB */
b0109805 8086 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8087 default: abort();
8088 }
8089 if (offset)
b0109805
PB
8090 tcg_gen_addi_i32(addr, addr, offset);
8091 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8092 tmp = tcg_temp_new_i32();
12dcc321 8093 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8094 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8095 tmp2 = tcg_temp_new_i32();
12dcc321 8096 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8097 if (insn & (1 << 21)) {
8098 /* Base writeback. */
8099 switch (i) {
b0109805 8100 case 0: offset = -8; break;
c67b6b71
FN
8101 case 1: offset = 4; break;
8102 case 2: offset = -4; break;
b0109805 8103 case 3: offset = 0; break;
9ee6e8bb
PB
8104 default: abort();
8105 }
8106 if (offset)
b0109805
PB
8107 tcg_gen_addi_i32(addr, addr, offset);
8108 store_reg(s, rn, addr);
8109 } else {
7d1b0095 8110 tcg_temp_free_i32(addr);
9ee6e8bb 8111 }
b0109805 8112 gen_rfe(s, tmp, tmp2);
c67b6b71 8113 return;
9ee6e8bb
PB
8114 } else if ((insn & 0x0e000000) == 0x0a000000) {
8115 /* branch link and change to thumb (blx <offset>) */
8116 int32_t offset;
8117
8118 val = (uint32_t)s->pc;
7d1b0095 8119 tmp = tcg_temp_new_i32();
d9ba4830
PB
8120 tcg_gen_movi_i32(tmp, val);
8121 store_reg(s, 14, tmp);
9ee6e8bb
PB
8122 /* Sign-extend the 24-bit offset */
8123 offset = (((int32_t)insn) << 8) >> 8;
8124 /* offset * 4 + bit24 * 2 + (thumb bit) */
8125 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8126 /* pipeline offset */
8127 val += 4;
be5e7a76 8128 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8129 gen_bx_im(s, val);
9ee6e8bb
PB
8130 return;
8131 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8132 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8133 /* iWMMXt register transfer. */
c0f4af17 8134 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8135 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8136 return;
c0f4af17
PM
8137 }
8138 }
9ee6e8bb
PB
8139 }
8140 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8141 /* Coprocessor double register transfer. */
be5e7a76 8142 ARCH(5TE);
9ee6e8bb
PB
8143 } else if ((insn & 0x0f000010) == 0x0e000010) {
8144 /* Additional coprocessor register transfer. */
7997d92f 8145 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8146 uint32_t mask;
8147 uint32_t val;
8148 /* cps (privileged) */
8149 if (IS_USER(s))
8150 return;
8151 mask = val = 0;
8152 if (insn & (1 << 19)) {
8153 if (insn & (1 << 8))
8154 mask |= CPSR_A;
8155 if (insn & (1 << 7))
8156 mask |= CPSR_I;
8157 if (insn & (1 << 6))
8158 mask |= CPSR_F;
8159 if (insn & (1 << 18))
8160 val |= mask;
8161 }
7997d92f 8162 if (insn & (1 << 17)) {
9ee6e8bb
PB
8163 mask |= CPSR_M;
8164 val |= (insn & 0x1f);
8165 }
8166 if (mask) {
2fbac54b 8167 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8168 }
8169 return;
8170 }
8171 goto illegal_op;
8172 }
8173 if (cond != 0xe) {
8174 /* if not always execute, we generate a conditional jump to
8175 next instruction */
8176 s->condlabel = gen_new_label();
39fb730a 8177 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8178 s->condjmp = 1;
8179 }
8180 if ((insn & 0x0f900000) == 0x03000000) {
8181 if ((insn & (1 << 21)) == 0) {
8182 ARCH(6T2);
8183 rd = (insn >> 12) & 0xf;
8184 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8185 if ((insn & (1 << 22)) == 0) {
8186 /* MOVW */
7d1b0095 8187 tmp = tcg_temp_new_i32();
5e3f878a 8188 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8189 } else {
8190 /* MOVT */
5e3f878a 8191 tmp = load_reg(s, rd);
86831435 8192 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8193 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8194 }
5e3f878a 8195 store_reg(s, rd, tmp);
9ee6e8bb
PB
8196 } else {
8197 if (((insn >> 12) & 0xf) != 0xf)
8198 goto illegal_op;
8199 if (((insn >> 16) & 0xf) == 0) {
8200 gen_nop_hint(s, insn & 0xff);
8201 } else {
8202 /* CPSR = immediate */
8203 val = insn & 0xff;
8204 shift = ((insn >> 8) & 0xf) * 2;
8205 if (shift)
8206 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8207 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8208 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8209 i, val)) {
9ee6e8bb 8210 goto illegal_op;
7dcc1f89 8211 }
9ee6e8bb
PB
8212 }
8213 }
8214 } else if ((insn & 0x0f900000) == 0x01000000
8215 && (insn & 0x00000090) != 0x00000090) {
8216 /* miscellaneous instructions */
8217 op1 = (insn >> 21) & 3;
8218 sh = (insn >> 4) & 0xf;
8219 rm = insn & 0xf;
8220 switch (sh) {
8bfd0550
PM
8221 case 0x0: /* MSR, MRS */
8222 if (insn & (1 << 9)) {
8223 /* MSR (banked) and MRS (banked) */
8224 int sysm = extract32(insn, 16, 4) |
8225 (extract32(insn, 8, 1) << 4);
8226 int r = extract32(insn, 22, 1);
8227
8228 if (op1 & 1) {
8229 /* MSR (banked) */
8230 gen_msr_banked(s, r, sysm, rm);
8231 } else {
8232 /* MRS (banked) */
8233 int rd = extract32(insn, 12, 4);
8234
8235 gen_mrs_banked(s, r, sysm, rd);
8236 }
8237 break;
8238 }
8239
8240 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8241 if (op1 & 1) {
8242 /* PSR = reg */
2fbac54b 8243 tmp = load_reg(s, rm);
9ee6e8bb 8244 i = ((op1 & 2) != 0);
7dcc1f89 8245 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8246 goto illegal_op;
8247 } else {
8248 /* reg = PSR */
8249 rd = (insn >> 12) & 0xf;
8250 if (op1 & 2) {
8251 if (IS_USER(s))
8252 goto illegal_op;
d9ba4830 8253 tmp = load_cpu_field(spsr);
9ee6e8bb 8254 } else {
7d1b0095 8255 tmp = tcg_temp_new_i32();
9ef39277 8256 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8257 }
d9ba4830 8258 store_reg(s, rd, tmp);
9ee6e8bb
PB
8259 }
8260 break;
8261 case 0x1:
8262 if (op1 == 1) {
8263 /* branch/exchange thumb (bx). */
be5e7a76 8264 ARCH(4T);
d9ba4830
PB
8265 tmp = load_reg(s, rm);
8266 gen_bx(s, tmp);
9ee6e8bb
PB
8267 } else if (op1 == 3) {
8268 /* clz */
be5e7a76 8269 ARCH(5);
9ee6e8bb 8270 rd = (insn >> 12) & 0xf;
1497c961
PB
8271 tmp = load_reg(s, rm);
8272 gen_helper_clz(tmp, tmp);
8273 store_reg(s, rd, tmp);
9ee6e8bb
PB
8274 } else {
8275 goto illegal_op;
8276 }
8277 break;
8278 case 0x2:
8279 if (op1 == 1) {
8280 ARCH(5J); /* bxj */
8281 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8282 tmp = load_reg(s, rm);
8283 gen_bx(s, tmp);
9ee6e8bb
PB
8284 } else {
8285 goto illegal_op;
8286 }
8287 break;
8288 case 0x3:
8289 if (op1 != 1)
8290 goto illegal_op;
8291
be5e7a76 8292 ARCH(5);
9ee6e8bb 8293 /* branch link/exchange thumb (blx) */
d9ba4830 8294 tmp = load_reg(s, rm);
7d1b0095 8295 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8296 tcg_gen_movi_i32(tmp2, s->pc);
8297 store_reg(s, 14, tmp2);
8298 gen_bx(s, tmp);
9ee6e8bb 8299 break;
eb0ecd5a
WN
8300 case 0x4:
8301 {
8302 /* crc32/crc32c */
8303 uint32_t c = extract32(insn, 8, 4);
8304
8305 /* Check this CPU supports ARMv8 CRC instructions.
8306 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8307 * Bits 8, 10 and 11 should be zero.
8308 */
d614a513 8309 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8310 (c & 0xd) != 0) {
8311 goto illegal_op;
8312 }
8313
8314 rn = extract32(insn, 16, 4);
8315 rd = extract32(insn, 12, 4);
8316
8317 tmp = load_reg(s, rn);
8318 tmp2 = load_reg(s, rm);
aa633469
PM
8319 if (op1 == 0) {
8320 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8321 } else if (op1 == 1) {
8322 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8323 }
eb0ecd5a
WN
8324 tmp3 = tcg_const_i32(1 << op1);
8325 if (c & 0x2) {
8326 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8327 } else {
8328 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8329 }
8330 tcg_temp_free_i32(tmp2);
8331 tcg_temp_free_i32(tmp3);
8332 store_reg(s, rd, tmp);
8333 break;
8334 }
9ee6e8bb 8335 case 0x5: /* saturating add/subtract */
be5e7a76 8336 ARCH(5TE);
9ee6e8bb
PB
8337 rd = (insn >> 12) & 0xf;
8338 rn = (insn >> 16) & 0xf;
b40d0353 8339 tmp = load_reg(s, rm);
5e3f878a 8340 tmp2 = load_reg(s, rn);
9ee6e8bb 8341 if (op1 & 2)
9ef39277 8342 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8343 if (op1 & 1)
9ef39277 8344 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8345 else
9ef39277 8346 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8347 tcg_temp_free_i32(tmp2);
5e3f878a 8348 store_reg(s, rd, tmp);
9ee6e8bb 8349 break;
49e14940 8350 case 7:
d4a2dc67
PM
8351 {
8352 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
8353 switch (op1) {
8354 case 1:
8355 /* bkpt */
8356 ARCH(5);
8357 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8358 syn_aa32_bkpt(imm16, false),
8359 default_exception_el(s));
37e6456e
PM
8360 break;
8361 case 2:
8362 /* Hypervisor call (v7) */
8363 ARCH(7);
8364 if (IS_USER(s)) {
8365 goto illegal_op;
8366 }
8367 gen_hvc(s, imm16);
8368 break;
8369 case 3:
8370 /* Secure monitor call (v6+) */
8371 ARCH(6K);
8372 if (IS_USER(s)) {
8373 goto illegal_op;
8374 }
8375 gen_smc(s);
8376 break;
8377 default:
49e14940
AL
8378 goto illegal_op;
8379 }
9ee6e8bb 8380 break;
d4a2dc67 8381 }
9ee6e8bb
PB
8382 case 0x8: /* signed multiply */
8383 case 0xa:
8384 case 0xc:
8385 case 0xe:
be5e7a76 8386 ARCH(5TE);
9ee6e8bb
PB
8387 rs = (insn >> 8) & 0xf;
8388 rn = (insn >> 12) & 0xf;
8389 rd = (insn >> 16) & 0xf;
8390 if (op1 == 1) {
8391 /* (32 * 16) >> 16 */
5e3f878a
PB
8392 tmp = load_reg(s, rm);
8393 tmp2 = load_reg(s, rs);
9ee6e8bb 8394 if (sh & 4)
5e3f878a 8395 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8396 else
5e3f878a 8397 gen_sxth(tmp2);
a7812ae4
PB
8398 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8399 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8400 tmp = tcg_temp_new_i32();
ecc7b3aa 8401 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8402 tcg_temp_free_i64(tmp64);
9ee6e8bb 8403 if ((sh & 2) == 0) {
5e3f878a 8404 tmp2 = load_reg(s, rn);
9ef39277 8405 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8406 tcg_temp_free_i32(tmp2);
9ee6e8bb 8407 }
5e3f878a 8408 store_reg(s, rd, tmp);
9ee6e8bb
PB
8409 } else {
8410 /* 16 * 16 */
5e3f878a
PB
8411 tmp = load_reg(s, rm);
8412 tmp2 = load_reg(s, rs);
8413 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8414 tcg_temp_free_i32(tmp2);
9ee6e8bb 8415 if (op1 == 2) {
a7812ae4
PB
8416 tmp64 = tcg_temp_new_i64();
8417 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8418 tcg_temp_free_i32(tmp);
a7812ae4
PB
8419 gen_addq(s, tmp64, rn, rd);
8420 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8421 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8422 } else {
8423 if (op1 == 0) {
5e3f878a 8424 tmp2 = load_reg(s, rn);
9ef39277 8425 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8426 tcg_temp_free_i32(tmp2);
9ee6e8bb 8427 }
5e3f878a 8428 store_reg(s, rd, tmp);
9ee6e8bb
PB
8429 }
8430 }
8431 break;
8432 default:
8433 goto illegal_op;
8434 }
8435 } else if (((insn & 0x0e000000) == 0 &&
8436 (insn & 0x00000090) != 0x90) ||
8437 ((insn & 0x0e000000) == (1 << 25))) {
8438 int set_cc, logic_cc, shiftop;
8439
8440 op1 = (insn >> 21) & 0xf;
8441 set_cc = (insn >> 20) & 1;
8442 logic_cc = table_logic_cc[op1] & set_cc;
8443
8444 /* data processing instruction */
8445 if (insn & (1 << 25)) {
8446 /* immediate operand */
8447 val = insn & 0xff;
8448 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8449 if (shift) {
9ee6e8bb 8450 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8451 }
7d1b0095 8452 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8453 tcg_gen_movi_i32(tmp2, val);
8454 if (logic_cc && shift) {
8455 gen_set_CF_bit31(tmp2);
8456 }
9ee6e8bb
PB
8457 } else {
8458 /* register */
8459 rm = (insn) & 0xf;
e9bb4aa9 8460 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8461 shiftop = (insn >> 5) & 3;
8462 if (!(insn & (1 << 4))) {
8463 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8464 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8465 } else {
8466 rs = (insn >> 8) & 0xf;
8984bd2e 8467 tmp = load_reg(s, rs);
e9bb4aa9 8468 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8469 }
8470 }
8471 if (op1 != 0x0f && op1 != 0x0d) {
8472 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8473 tmp = load_reg(s, rn);
8474 } else {
39d5492a 8475 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8476 }
8477 rd = (insn >> 12) & 0xf;
8478 switch(op1) {
8479 case 0x00:
e9bb4aa9
JR
8480 tcg_gen_and_i32(tmp, tmp, tmp2);
8481 if (logic_cc) {
8482 gen_logic_CC(tmp);
8483 }
7dcc1f89 8484 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8485 break;
8486 case 0x01:
e9bb4aa9
JR
8487 tcg_gen_xor_i32(tmp, tmp, tmp2);
8488 if (logic_cc) {
8489 gen_logic_CC(tmp);
8490 }
7dcc1f89 8491 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8492 break;
8493 case 0x02:
8494 if (set_cc && rd == 15) {
8495 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8496 if (IS_USER(s)) {
9ee6e8bb 8497 goto illegal_op;
e9bb4aa9 8498 }
72485ec4 8499 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8500 gen_exception_return(s, tmp);
9ee6e8bb 8501 } else {
e9bb4aa9 8502 if (set_cc) {
72485ec4 8503 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8504 } else {
8505 tcg_gen_sub_i32(tmp, tmp, tmp2);
8506 }
7dcc1f89 8507 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8508 }
8509 break;
8510 case 0x03:
e9bb4aa9 8511 if (set_cc) {
72485ec4 8512 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8513 } else {
8514 tcg_gen_sub_i32(tmp, tmp2, tmp);
8515 }
7dcc1f89 8516 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8517 break;
8518 case 0x04:
e9bb4aa9 8519 if (set_cc) {
72485ec4 8520 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8521 } else {
8522 tcg_gen_add_i32(tmp, tmp, tmp2);
8523 }
7dcc1f89 8524 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8525 break;
8526 case 0x05:
e9bb4aa9 8527 if (set_cc) {
49b4c31e 8528 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8529 } else {
8530 gen_add_carry(tmp, tmp, tmp2);
8531 }
7dcc1f89 8532 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8533 break;
8534 case 0x06:
e9bb4aa9 8535 if (set_cc) {
2de68a49 8536 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8537 } else {
8538 gen_sub_carry(tmp, tmp, tmp2);
8539 }
7dcc1f89 8540 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8541 break;
8542 case 0x07:
e9bb4aa9 8543 if (set_cc) {
2de68a49 8544 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8545 } else {
8546 gen_sub_carry(tmp, tmp2, tmp);
8547 }
7dcc1f89 8548 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8549 break;
8550 case 0x08:
8551 if (set_cc) {
e9bb4aa9
JR
8552 tcg_gen_and_i32(tmp, tmp, tmp2);
8553 gen_logic_CC(tmp);
9ee6e8bb 8554 }
7d1b0095 8555 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8556 break;
8557 case 0x09:
8558 if (set_cc) {
e9bb4aa9
JR
8559 tcg_gen_xor_i32(tmp, tmp, tmp2);
8560 gen_logic_CC(tmp);
9ee6e8bb 8561 }
7d1b0095 8562 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8563 break;
8564 case 0x0a:
8565 if (set_cc) {
72485ec4 8566 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8567 }
7d1b0095 8568 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8569 break;
8570 case 0x0b:
8571 if (set_cc) {
72485ec4 8572 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8573 }
7d1b0095 8574 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8575 break;
8576 case 0x0c:
e9bb4aa9
JR
8577 tcg_gen_or_i32(tmp, tmp, tmp2);
8578 if (logic_cc) {
8579 gen_logic_CC(tmp);
8580 }
7dcc1f89 8581 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8582 break;
8583 case 0x0d:
8584 if (logic_cc && rd == 15) {
8585 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8586 if (IS_USER(s)) {
9ee6e8bb 8587 goto illegal_op;
e9bb4aa9
JR
8588 }
8589 gen_exception_return(s, tmp2);
9ee6e8bb 8590 } else {
e9bb4aa9
JR
8591 if (logic_cc) {
8592 gen_logic_CC(tmp2);
8593 }
7dcc1f89 8594 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8595 }
8596 break;
8597 case 0x0e:
f669df27 8598 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8599 if (logic_cc) {
8600 gen_logic_CC(tmp);
8601 }
7dcc1f89 8602 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8603 break;
8604 default:
8605 case 0x0f:
e9bb4aa9
JR
8606 tcg_gen_not_i32(tmp2, tmp2);
8607 if (logic_cc) {
8608 gen_logic_CC(tmp2);
8609 }
7dcc1f89 8610 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8611 break;
8612 }
e9bb4aa9 8613 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8614 tcg_temp_free_i32(tmp2);
e9bb4aa9 8615 }
9ee6e8bb
PB
8616 } else {
8617 /* other instructions */
8618 op1 = (insn >> 24) & 0xf;
8619 switch(op1) {
8620 case 0x0:
8621 case 0x1:
8622 /* multiplies, extra load/stores */
8623 sh = (insn >> 5) & 3;
8624 if (sh == 0) {
8625 if (op1 == 0x0) {
8626 rd = (insn >> 16) & 0xf;
8627 rn = (insn >> 12) & 0xf;
8628 rs = (insn >> 8) & 0xf;
8629 rm = (insn) & 0xf;
8630 op1 = (insn >> 20) & 0xf;
8631 switch (op1) {
8632 case 0: case 1: case 2: case 3: case 6:
8633 /* 32 bit mul */
5e3f878a
PB
8634 tmp = load_reg(s, rs);
8635 tmp2 = load_reg(s, rm);
8636 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8637 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8638 if (insn & (1 << 22)) {
8639 /* Subtract (mls) */
8640 ARCH(6T2);
5e3f878a
PB
8641 tmp2 = load_reg(s, rn);
8642 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8643 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8644 } else if (insn & (1 << 21)) {
8645 /* Add */
5e3f878a
PB
8646 tmp2 = load_reg(s, rn);
8647 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8648 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8649 }
8650 if (insn & (1 << 20))
5e3f878a
PB
8651 gen_logic_CC(tmp);
8652 store_reg(s, rd, tmp);
9ee6e8bb 8653 break;
8aac08b1
AJ
8654 case 4:
8655 /* 64 bit mul double accumulate (UMAAL) */
8656 ARCH(6);
8657 tmp = load_reg(s, rs);
8658 tmp2 = load_reg(s, rm);
8659 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8660 gen_addq_lo(s, tmp64, rn);
8661 gen_addq_lo(s, tmp64, rd);
8662 gen_storeq_reg(s, rn, rd, tmp64);
8663 tcg_temp_free_i64(tmp64);
8664 break;
8665 case 8: case 9: case 10: case 11:
8666 case 12: case 13: case 14: case 15:
8667 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8668 tmp = load_reg(s, rs);
8669 tmp2 = load_reg(s, rm);
8aac08b1 8670 if (insn & (1 << 22)) {
c9f10124 8671 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8672 } else {
c9f10124 8673 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8674 }
8675 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8676 TCGv_i32 al = load_reg(s, rn);
8677 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8678 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8679 tcg_temp_free_i32(al);
8680 tcg_temp_free_i32(ah);
9ee6e8bb 8681 }
8aac08b1 8682 if (insn & (1 << 20)) {
c9f10124 8683 gen_logicq_cc(tmp, tmp2);
8aac08b1 8684 }
c9f10124
RH
8685 store_reg(s, rn, tmp);
8686 store_reg(s, rd, tmp2);
9ee6e8bb 8687 break;
8aac08b1
AJ
8688 default:
8689 goto illegal_op;
9ee6e8bb
PB
8690 }
8691 } else {
8692 rn = (insn >> 16) & 0xf;
8693 rd = (insn >> 12) & 0xf;
8694 if (insn & (1 << 23)) {
8695 /* load/store exclusive */
2359bf80 8696 int op2 = (insn >> 8) & 3;
86753403 8697 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8698
8699 switch (op2) {
8700 case 0: /* lda/stl */
8701 if (op1 == 1) {
8702 goto illegal_op;
8703 }
8704 ARCH(8);
8705 break;
8706 case 1: /* reserved */
8707 goto illegal_op;
8708 case 2: /* ldaex/stlex */
8709 ARCH(8);
8710 break;
8711 case 3: /* ldrex/strex */
8712 if (op1) {
8713 ARCH(6K);
8714 } else {
8715 ARCH(6);
8716 }
8717 break;
8718 }
8719
3174f8e9 8720 addr = tcg_temp_local_new_i32();
98a46317 8721 load_reg_var(s, addr, rn);
2359bf80
MR
8722
8723 /* Since the emulation does not have barriers,
8724 the acquire/release semantics need no special
8725 handling */
8726 if (op2 == 0) {
8727 if (insn & (1 << 20)) {
8728 tmp = tcg_temp_new_i32();
8729 switch (op1) {
8730 case 0: /* lda */
12dcc321
PB
8731 gen_aa32_ld32u(s, tmp, addr,
8732 get_mem_index(s));
2359bf80
MR
8733 break;
8734 case 2: /* ldab */
12dcc321
PB
8735 gen_aa32_ld8u(s, tmp, addr,
8736 get_mem_index(s));
2359bf80
MR
8737 break;
8738 case 3: /* ldah */
12dcc321
PB
8739 gen_aa32_ld16u(s, tmp, addr,
8740 get_mem_index(s));
2359bf80
MR
8741 break;
8742 default:
8743 abort();
8744 }
8745 store_reg(s, rd, tmp);
8746 } else {
8747 rm = insn & 0xf;
8748 tmp = load_reg(s, rm);
8749 switch (op1) {
8750 case 0: /* stl */
12dcc321
PB
8751 gen_aa32_st32(s, tmp, addr,
8752 get_mem_index(s));
2359bf80
MR
8753 break;
8754 case 2: /* stlb */
12dcc321
PB
8755 gen_aa32_st8(s, tmp, addr,
8756 get_mem_index(s));
2359bf80
MR
8757 break;
8758 case 3: /* stlh */
12dcc321
PB
8759 gen_aa32_st16(s, tmp, addr,
8760 get_mem_index(s));
2359bf80
MR
8761 break;
8762 default:
8763 abort();
8764 }
8765 tcg_temp_free_i32(tmp);
8766 }
8767 } else if (insn & (1 << 20)) {
86753403
PB
8768 switch (op1) {
8769 case 0: /* ldrex */
426f5abc 8770 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8771 break;
8772 case 1: /* ldrexd */
426f5abc 8773 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8774 break;
8775 case 2: /* ldrexb */
426f5abc 8776 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8777 break;
8778 case 3: /* ldrexh */
426f5abc 8779 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8780 break;
8781 default:
8782 abort();
8783 }
9ee6e8bb
PB
8784 } else {
8785 rm = insn & 0xf;
86753403
PB
8786 switch (op1) {
8787 case 0: /* strex */
426f5abc 8788 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8789 break;
8790 case 1: /* strexd */
502e64fe 8791 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8792 break;
8793 case 2: /* strexb */
426f5abc 8794 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8795 break;
8796 case 3: /* strexh */
426f5abc 8797 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8798 break;
8799 default:
8800 abort();
8801 }
9ee6e8bb 8802 }
39d5492a 8803 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8804 } else {
8805 /* SWP instruction */
8806 rm = (insn) & 0xf;
8807
8984bd2e
PB
8808 /* ??? This is not really atomic. However we know
8809 we never have multiple CPUs running in parallel,
8810 so it is good enough. */
8811 addr = load_reg(s, rn);
8812 tmp = load_reg(s, rm);
5a839c0d 8813 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8814 if (insn & (1 << 22)) {
12dcc321
PB
8815 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8816 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8817 } else {
12dcc321
PB
8818 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8819 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8820 }
5a839c0d 8821 tcg_temp_free_i32(tmp);
7d1b0095 8822 tcg_temp_free_i32(addr);
8984bd2e 8823 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8824 }
8825 }
8826 } else {
8827 int address_offset;
3960c336
PM
8828 bool load = insn & (1 << 20);
8829 bool doubleword = false;
9ee6e8bb
PB
8830 /* Misc load/store */
8831 rn = (insn >> 16) & 0xf;
8832 rd = (insn >> 12) & 0xf;
3960c336
PM
8833
8834 if (!load && (sh & 2)) {
8835 /* doubleword */
8836 ARCH(5TE);
8837 if (rd & 1) {
8838 /* UNPREDICTABLE; we choose to UNDEF */
8839 goto illegal_op;
8840 }
8841 load = (sh & 1) == 0;
8842 doubleword = true;
8843 }
8844
b0109805 8845 addr = load_reg(s, rn);
9ee6e8bb 8846 if (insn & (1 << 24))
b0109805 8847 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8848 address_offset = 0;
3960c336
PM
8849
8850 if (doubleword) {
8851 if (!load) {
9ee6e8bb 8852 /* store */
b0109805 8853 tmp = load_reg(s, rd);
12dcc321 8854 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8855 tcg_temp_free_i32(tmp);
b0109805
PB
8856 tcg_gen_addi_i32(addr, addr, 4);
8857 tmp = load_reg(s, rd + 1);
12dcc321 8858 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8859 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8860 } else {
8861 /* load */
5a839c0d 8862 tmp = tcg_temp_new_i32();
12dcc321 8863 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8864 store_reg(s, rd, tmp);
8865 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8866 tmp = tcg_temp_new_i32();
12dcc321 8867 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8868 rd++;
9ee6e8bb
PB
8869 }
8870 address_offset = -4;
3960c336
PM
8871 } else if (load) {
8872 /* load */
8873 tmp = tcg_temp_new_i32();
8874 switch (sh) {
8875 case 1:
12dcc321 8876 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3960c336
PM
8877 break;
8878 case 2:
12dcc321 8879 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8880 break;
8881 default:
8882 case 3:
12dcc321 8883 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8884 break;
8885 }
9ee6e8bb
PB
8886 } else {
8887 /* store */
b0109805 8888 tmp = load_reg(s, rd);
12dcc321 8889 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5a839c0d 8890 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8891 }
8892 /* Perform base writeback before the loaded value to
8893 ensure correct behavior with overlapping index registers.
b6af0975 8894 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8895 destination and index registers overlap. */
8896 if (!(insn & (1 << 24))) {
b0109805
PB
8897 gen_add_datah_offset(s, insn, address_offset, addr);
8898 store_reg(s, rn, addr);
9ee6e8bb
PB
8899 } else if (insn & (1 << 21)) {
8900 if (address_offset)
b0109805
PB
8901 tcg_gen_addi_i32(addr, addr, address_offset);
8902 store_reg(s, rn, addr);
8903 } else {
7d1b0095 8904 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8905 }
8906 if (load) {
8907 /* Complete the load. */
b0109805 8908 store_reg(s, rd, tmp);
9ee6e8bb
PB
8909 }
8910 }
8911 break;
8912 case 0x4:
8913 case 0x5:
8914 goto do_ldst;
8915 case 0x6:
8916 case 0x7:
8917 if (insn & (1 << 4)) {
8918 ARCH(6);
8919 /* Armv6 Media instructions. */
8920 rm = insn & 0xf;
8921 rn = (insn >> 16) & 0xf;
2c0262af 8922 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8923 rs = (insn >> 8) & 0xf;
8924 switch ((insn >> 23) & 3) {
8925 case 0: /* Parallel add/subtract. */
8926 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8927 tmp = load_reg(s, rn);
8928 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8929 sh = (insn >> 5) & 7;
8930 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8931 goto illegal_op;
6ddbc6e4 8932 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8933 tcg_temp_free_i32(tmp2);
6ddbc6e4 8934 store_reg(s, rd, tmp);
9ee6e8bb
PB
8935 break;
8936 case 1:
8937 if ((insn & 0x00700020) == 0) {
6c95676b 8938 /* Halfword pack. */
3670669c
PB
8939 tmp = load_reg(s, rn);
8940 tmp2 = load_reg(s, rm);
9ee6e8bb 8941 shift = (insn >> 7) & 0x1f;
3670669c
PB
8942 if (insn & (1 << 6)) {
8943 /* pkhtb */
22478e79
AZ
8944 if (shift == 0)
8945 shift = 31;
8946 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8947 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8948 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8949 } else {
8950 /* pkhbt */
22478e79
AZ
8951 if (shift)
8952 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8953 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8954 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8955 }
8956 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8957 tcg_temp_free_i32(tmp2);
3670669c 8958 store_reg(s, rd, tmp);
9ee6e8bb
PB
8959 } else if ((insn & 0x00200020) == 0x00200000) {
8960 /* [us]sat */
6ddbc6e4 8961 tmp = load_reg(s, rm);
9ee6e8bb
PB
8962 shift = (insn >> 7) & 0x1f;
8963 if (insn & (1 << 6)) {
8964 if (shift == 0)
8965 shift = 31;
6ddbc6e4 8966 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8967 } else {
6ddbc6e4 8968 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8969 }
8970 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8971 tmp2 = tcg_const_i32(sh);
8972 if (insn & (1 << 22))
9ef39277 8973 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8974 else
9ef39277 8975 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8976 tcg_temp_free_i32(tmp2);
6ddbc6e4 8977 store_reg(s, rd, tmp);
9ee6e8bb
PB
8978 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8979 /* [us]sat16 */
6ddbc6e4 8980 tmp = load_reg(s, rm);
9ee6e8bb 8981 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8982 tmp2 = tcg_const_i32(sh);
8983 if (insn & (1 << 22))
9ef39277 8984 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8985 else
9ef39277 8986 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8987 tcg_temp_free_i32(tmp2);
6ddbc6e4 8988 store_reg(s, rd, tmp);
9ee6e8bb
PB
8989 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8990 /* Select bytes. */
6ddbc6e4
PB
8991 tmp = load_reg(s, rn);
8992 tmp2 = load_reg(s, rm);
7d1b0095 8993 tmp3 = tcg_temp_new_i32();
0ecb72a5 8994 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8995 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8996 tcg_temp_free_i32(tmp3);
8997 tcg_temp_free_i32(tmp2);
6ddbc6e4 8998 store_reg(s, rd, tmp);
9ee6e8bb 8999 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9000 tmp = load_reg(s, rm);
9ee6e8bb 9001 shift = (insn >> 10) & 3;
1301f322 9002 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9003 rotate, a shift is sufficient. */
9004 if (shift != 0)
f669df27 9005 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9006 op1 = (insn >> 20) & 7;
9007 switch (op1) {
5e3f878a
PB
9008 case 0: gen_sxtb16(tmp); break;
9009 case 2: gen_sxtb(tmp); break;
9010 case 3: gen_sxth(tmp); break;
9011 case 4: gen_uxtb16(tmp); break;
9012 case 6: gen_uxtb(tmp); break;
9013 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9014 default: goto illegal_op;
9015 }
9016 if (rn != 15) {
5e3f878a 9017 tmp2 = load_reg(s, rn);
9ee6e8bb 9018 if ((op1 & 3) == 0) {
5e3f878a 9019 gen_add16(tmp, tmp2);
9ee6e8bb 9020 } else {
5e3f878a 9021 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9022 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9023 }
9024 }
6c95676b 9025 store_reg(s, rd, tmp);
9ee6e8bb
PB
9026 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9027 /* rev */
b0109805 9028 tmp = load_reg(s, rm);
9ee6e8bb
PB
9029 if (insn & (1 << 22)) {
9030 if (insn & (1 << 7)) {
b0109805 9031 gen_revsh(tmp);
9ee6e8bb
PB
9032 } else {
9033 ARCH(6T2);
b0109805 9034 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9035 }
9036 } else {
9037 if (insn & (1 << 7))
b0109805 9038 gen_rev16(tmp);
9ee6e8bb 9039 else
66896cb8 9040 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9041 }
b0109805 9042 store_reg(s, rd, tmp);
9ee6e8bb
PB
9043 } else {
9044 goto illegal_op;
9045 }
9046 break;
9047 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9048 switch ((insn >> 20) & 0x7) {
9049 case 5:
9050 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9051 /* op2 not 00x or 11x : UNDEF */
9052 goto illegal_op;
9053 }
838fa72d
AJ
9054 /* Signed multiply most significant [accumulate].
9055 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9056 tmp = load_reg(s, rm);
9057 tmp2 = load_reg(s, rs);
a7812ae4 9058 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9059
955a7dd5 9060 if (rd != 15) {
838fa72d 9061 tmp = load_reg(s, rd);
9ee6e8bb 9062 if (insn & (1 << 6)) {
838fa72d 9063 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9064 } else {
838fa72d 9065 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9066 }
9067 }
838fa72d
AJ
9068 if (insn & (1 << 5)) {
9069 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9070 }
9071 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9072 tmp = tcg_temp_new_i32();
ecc7b3aa 9073 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9074 tcg_temp_free_i64(tmp64);
955a7dd5 9075 store_reg(s, rn, tmp);
41e9564d
PM
9076 break;
9077 case 0:
9078 case 4:
9079 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9080 if (insn & (1 << 7)) {
9081 goto illegal_op;
9082 }
9083 tmp = load_reg(s, rm);
9084 tmp2 = load_reg(s, rs);
9ee6e8bb 9085 if (insn & (1 << 5))
5e3f878a
PB
9086 gen_swap_half(tmp2);
9087 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9088 if (insn & (1 << 22)) {
5e3f878a 9089 /* smlald, smlsld */
33bbd75a
PC
9090 TCGv_i64 tmp64_2;
9091
a7812ae4 9092 tmp64 = tcg_temp_new_i64();
33bbd75a 9093 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9094 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9095 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9096 tcg_temp_free_i32(tmp);
33bbd75a
PC
9097 tcg_temp_free_i32(tmp2);
9098 if (insn & (1 << 6)) {
9099 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9100 } else {
9101 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9102 }
9103 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9104 gen_addq(s, tmp64, rd, rn);
9105 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9106 tcg_temp_free_i64(tmp64);
9ee6e8bb 9107 } else {
5e3f878a 9108 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9109 if (insn & (1 << 6)) {
9110 /* This subtraction cannot overflow. */
9111 tcg_gen_sub_i32(tmp, tmp, tmp2);
9112 } else {
9113 /* This addition cannot overflow 32 bits;
9114 * however it may overflow considered as a
9115 * signed operation, in which case we must set
9116 * the Q flag.
9117 */
9118 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9119 }
9120 tcg_temp_free_i32(tmp2);
22478e79 9121 if (rd != 15)
9ee6e8bb 9122 {
22478e79 9123 tmp2 = load_reg(s, rd);
9ef39277 9124 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9125 tcg_temp_free_i32(tmp2);
9ee6e8bb 9126 }
22478e79 9127 store_reg(s, rn, tmp);
9ee6e8bb 9128 }
41e9564d 9129 break;
b8b8ea05
PM
9130 case 1:
9131 case 3:
9132 /* SDIV, UDIV */
d614a513 9133 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9134 goto illegal_op;
9135 }
9136 if (((insn >> 5) & 7) || (rd != 15)) {
9137 goto illegal_op;
9138 }
9139 tmp = load_reg(s, rm);
9140 tmp2 = load_reg(s, rs);
9141 if (insn & (1 << 21)) {
9142 gen_helper_udiv(tmp, tmp, tmp2);
9143 } else {
9144 gen_helper_sdiv(tmp, tmp, tmp2);
9145 }
9146 tcg_temp_free_i32(tmp2);
9147 store_reg(s, rn, tmp);
9148 break;
41e9564d
PM
9149 default:
9150 goto illegal_op;
9ee6e8bb
PB
9151 }
9152 break;
9153 case 3:
9154 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9155 switch (op1) {
9156 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9157 ARCH(6);
9158 tmp = load_reg(s, rm);
9159 tmp2 = load_reg(s, rs);
9160 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9161 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9162 if (rd != 15) {
9163 tmp2 = load_reg(s, rd);
6ddbc6e4 9164 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9165 tcg_temp_free_i32(tmp2);
9ee6e8bb 9166 }
ded9d295 9167 store_reg(s, rn, tmp);
9ee6e8bb
PB
9168 break;
9169 case 0x20: case 0x24: case 0x28: case 0x2c:
9170 /* Bitfield insert/clear. */
9171 ARCH(6T2);
9172 shift = (insn >> 7) & 0x1f;
9173 i = (insn >> 16) & 0x1f;
45140a57
KB
9174 if (i < shift) {
9175 /* UNPREDICTABLE; we choose to UNDEF */
9176 goto illegal_op;
9177 }
9ee6e8bb
PB
9178 i = i + 1 - shift;
9179 if (rm == 15) {
7d1b0095 9180 tmp = tcg_temp_new_i32();
5e3f878a 9181 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9182 } else {
5e3f878a 9183 tmp = load_reg(s, rm);
9ee6e8bb
PB
9184 }
9185 if (i != 32) {
5e3f878a 9186 tmp2 = load_reg(s, rd);
d593c48e 9187 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9188 tcg_temp_free_i32(tmp2);
9ee6e8bb 9189 }
5e3f878a 9190 store_reg(s, rd, tmp);
9ee6e8bb
PB
9191 break;
9192 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9193 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9194 ARCH(6T2);
5e3f878a 9195 tmp = load_reg(s, rm);
9ee6e8bb
PB
9196 shift = (insn >> 7) & 0x1f;
9197 i = ((insn >> 16) & 0x1f) + 1;
9198 if (shift + i > 32)
9199 goto illegal_op;
9200 if (i < 32) {
9201 if (op1 & 0x20) {
5e3f878a 9202 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 9203 } else {
5e3f878a 9204 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
9205 }
9206 }
5e3f878a 9207 store_reg(s, rd, tmp);
9ee6e8bb
PB
9208 break;
9209 default:
9210 goto illegal_op;
9211 }
9212 break;
9213 }
9214 break;
9215 }
9216 do_ldst:
9217 /* Check for undefined extension instructions
9218 * per the ARM Bible IE:
9219 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9220 */
9221 sh = (0xf << 20) | (0xf << 4);
9222 if (op1 == 0x7 && ((insn & sh) == sh))
9223 {
9224 goto illegal_op;
9225 }
9226 /* load/store byte/word */
9227 rn = (insn >> 16) & 0xf;
9228 rd = (insn >> 12) & 0xf;
b0109805 9229 tmp2 = load_reg(s, rn);
a99caa48
PM
9230 if ((insn & 0x01200000) == 0x00200000) {
9231 /* ldrt/strt */
579d21cc 9232 i = get_a32_user_mem_index(s);
a99caa48
PM
9233 } else {
9234 i = get_mem_index(s);
9235 }
9ee6e8bb 9236 if (insn & (1 << 24))
b0109805 9237 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9238 if (insn & (1 << 20)) {
9239 /* load */
5a839c0d 9240 tmp = tcg_temp_new_i32();
9ee6e8bb 9241 if (insn & (1 << 22)) {
12dcc321 9242 gen_aa32_ld8u(s, tmp, tmp2, i);
9ee6e8bb 9243 } else {
12dcc321 9244 gen_aa32_ld32u(s, tmp, tmp2, i);
9ee6e8bb 9245 }
9ee6e8bb
PB
9246 } else {
9247 /* store */
b0109805 9248 tmp = load_reg(s, rd);
5a839c0d 9249 if (insn & (1 << 22)) {
12dcc321 9250 gen_aa32_st8(s, tmp, tmp2, i);
5a839c0d 9251 } else {
12dcc321 9252 gen_aa32_st32(s, tmp, tmp2, i);
5a839c0d
PM
9253 }
9254 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9255 }
9256 if (!(insn & (1 << 24))) {
b0109805
PB
9257 gen_add_data_offset(s, insn, tmp2);
9258 store_reg(s, rn, tmp2);
9259 } else if (insn & (1 << 21)) {
9260 store_reg(s, rn, tmp2);
9261 } else {
7d1b0095 9262 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9263 }
9264 if (insn & (1 << 20)) {
9265 /* Complete the load. */
7dcc1f89 9266 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9267 }
9268 break;
9269 case 0x08:
9270 case 0x09:
9271 {
da3e53dd
PM
9272 int j, n, loaded_base;
9273 bool exc_return = false;
9274 bool is_load = extract32(insn, 20, 1);
9275 bool user = false;
39d5492a 9276 TCGv_i32 loaded_var;
9ee6e8bb
PB
9277 /* load/store multiple words */
9278 /* XXX: store correct base if write back */
9ee6e8bb 9279 if (insn & (1 << 22)) {
da3e53dd 9280 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9281 if (IS_USER(s))
9282 goto illegal_op; /* only usable in supervisor mode */
9283
da3e53dd
PM
9284 if (is_load && extract32(insn, 15, 1)) {
9285 exc_return = true;
9286 } else {
9287 user = true;
9288 }
9ee6e8bb
PB
9289 }
9290 rn = (insn >> 16) & 0xf;
b0109805 9291 addr = load_reg(s, rn);
9ee6e8bb
PB
9292
9293 /* compute total size */
9294 loaded_base = 0;
39d5492a 9295 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9296 n = 0;
9297 for(i=0;i<16;i++) {
9298 if (insn & (1 << i))
9299 n++;
9300 }
9301 /* XXX: test invalid n == 0 case ? */
9302 if (insn & (1 << 23)) {
9303 if (insn & (1 << 24)) {
9304 /* pre increment */
b0109805 9305 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9306 } else {
9307 /* post increment */
9308 }
9309 } else {
9310 if (insn & (1 << 24)) {
9311 /* pre decrement */
b0109805 9312 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9313 } else {
9314 /* post decrement */
9315 if (n != 1)
b0109805 9316 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9317 }
9318 }
9319 j = 0;
9320 for(i=0;i<16;i++) {
9321 if (insn & (1 << i)) {
da3e53dd 9322 if (is_load) {
9ee6e8bb 9323 /* load */
5a839c0d 9324 tmp = tcg_temp_new_i32();
12dcc321 9325 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9326 if (user) {
b75263d6 9327 tmp2 = tcg_const_i32(i);
1ce94f81 9328 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9329 tcg_temp_free_i32(tmp2);
7d1b0095 9330 tcg_temp_free_i32(tmp);
9ee6e8bb 9331 } else if (i == rn) {
b0109805 9332 loaded_var = tmp;
9ee6e8bb
PB
9333 loaded_base = 1;
9334 } else {
7dcc1f89 9335 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9336 }
9337 } else {
9338 /* store */
9339 if (i == 15) {
9340 /* special case: r15 = PC + 8 */
9341 val = (long)s->pc + 4;
7d1b0095 9342 tmp = tcg_temp_new_i32();
b0109805 9343 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9344 } else if (user) {
7d1b0095 9345 tmp = tcg_temp_new_i32();
b75263d6 9346 tmp2 = tcg_const_i32(i);
9ef39277 9347 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9348 tcg_temp_free_i32(tmp2);
9ee6e8bb 9349 } else {
b0109805 9350 tmp = load_reg(s, i);
9ee6e8bb 9351 }
12dcc321 9352 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9353 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9354 }
9355 j++;
9356 /* no need to add after the last transfer */
9357 if (j != n)
b0109805 9358 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9359 }
9360 }
9361 if (insn & (1 << 21)) {
9362 /* write back */
9363 if (insn & (1 << 23)) {
9364 if (insn & (1 << 24)) {
9365 /* pre increment */
9366 } else {
9367 /* post increment */
b0109805 9368 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9369 }
9370 } else {
9371 if (insn & (1 << 24)) {
9372 /* pre decrement */
9373 if (n != 1)
b0109805 9374 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9375 } else {
9376 /* post decrement */
b0109805 9377 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9378 }
9379 }
b0109805
PB
9380 store_reg(s, rn, addr);
9381 } else {
7d1b0095 9382 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9383 }
9384 if (loaded_base) {
b0109805 9385 store_reg(s, rn, loaded_var);
9ee6e8bb 9386 }
da3e53dd 9387 if (exc_return) {
9ee6e8bb 9388 /* Restore CPSR from SPSR. */
d9ba4830 9389 tmp = load_cpu_field(spsr);
235ea1f5 9390 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9391 tcg_temp_free_i32(tmp);
577bf808 9392 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9393 }
9394 }
9395 break;
9396 case 0xa:
9397 case 0xb:
9398 {
9399 int32_t offset;
9400
9401 /* branch (and link) */
9402 val = (int32_t)s->pc;
9403 if (insn & (1 << 24)) {
7d1b0095 9404 tmp = tcg_temp_new_i32();
5e3f878a
PB
9405 tcg_gen_movi_i32(tmp, val);
9406 store_reg(s, 14, tmp);
9ee6e8bb 9407 }
534df156
PM
9408 offset = sextract32(insn << 2, 0, 26);
9409 val += offset + 4;
9ee6e8bb
PB
9410 gen_jmp(s, val);
9411 }
9412 break;
9413 case 0xc:
9414 case 0xd:
9415 case 0xe:
6a57f3eb
WN
9416 if (((insn >> 8) & 0xe) == 10) {
9417 /* VFP. */
7dcc1f89 9418 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9419 goto illegal_op;
9420 }
7dcc1f89 9421 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9422 /* Coprocessor. */
9ee6e8bb 9423 goto illegal_op;
6a57f3eb 9424 }
9ee6e8bb
PB
9425 break;
9426 case 0xf:
9427 /* swi */
eaed129d 9428 gen_set_pc_im(s, s->pc);
d4a2dc67 9429 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9430 s->is_jmp = DISAS_SWI;
9431 break;
9432 default:
9433 illegal_op:
73710361
GB
9434 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9435 default_exception_el(s));
9ee6e8bb
PB
9436 break;
9437 }
9438 }
9439}
9440
9441/* Return true if this is a Thumb-2 logical op. */
9442static int
9443thumb2_logic_op(int op)
9444{
9445 return (op < 8);
9446}
9447
9448/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9449 then set condition code flags based on the result of the operation.
9450 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9451 to the high bit of T1.
9452 Returns zero if the opcode is valid. */
9453
9454static int
39d5492a
PM
9455gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9456 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9457{
9458 int logic_cc;
9459
9460 logic_cc = 0;
9461 switch (op) {
9462 case 0: /* and */
396e467c 9463 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9464 logic_cc = conds;
9465 break;
9466 case 1: /* bic */
f669df27 9467 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9468 logic_cc = conds;
9469 break;
9470 case 2: /* orr */
396e467c 9471 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9472 logic_cc = conds;
9473 break;
9474 case 3: /* orn */
29501f1b 9475 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9476 logic_cc = conds;
9477 break;
9478 case 4: /* eor */
396e467c 9479 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9480 logic_cc = conds;
9481 break;
9482 case 8: /* add */
9483 if (conds)
72485ec4 9484 gen_add_CC(t0, t0, t1);
9ee6e8bb 9485 else
396e467c 9486 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9487 break;
9488 case 10: /* adc */
9489 if (conds)
49b4c31e 9490 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9491 else
396e467c 9492 gen_adc(t0, t1);
9ee6e8bb
PB
9493 break;
9494 case 11: /* sbc */
2de68a49
RH
9495 if (conds) {
9496 gen_sbc_CC(t0, t0, t1);
9497 } else {
396e467c 9498 gen_sub_carry(t0, t0, t1);
2de68a49 9499 }
9ee6e8bb
PB
9500 break;
9501 case 13: /* sub */
9502 if (conds)
72485ec4 9503 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9504 else
396e467c 9505 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9506 break;
9507 case 14: /* rsb */
9508 if (conds)
72485ec4 9509 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9510 else
396e467c 9511 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9512 break;
9513 default: /* 5, 6, 7, 9, 12, 15. */
9514 return 1;
9515 }
9516 if (logic_cc) {
396e467c 9517 gen_logic_CC(t0);
9ee6e8bb 9518 if (shifter_out)
396e467c 9519 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9520 }
9521 return 0;
9522}
9523
9524/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9525 is not legal. */
0ecb72a5 9526static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9527{
b0109805 9528 uint32_t insn, imm, shift, offset;
9ee6e8bb 9529 uint32_t rd, rn, rm, rs;
39d5492a
PM
9530 TCGv_i32 tmp;
9531 TCGv_i32 tmp2;
9532 TCGv_i32 tmp3;
9533 TCGv_i32 addr;
a7812ae4 9534 TCGv_i64 tmp64;
9ee6e8bb
PB
9535 int op;
9536 int shiftop;
9537 int conds;
9538 int logic_cc;
9539
d614a513
PM
9540 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9541 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9542 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9543 16-bit instructions to get correct prefetch abort behavior. */
9544 insn = insn_hw1;
9545 if ((insn & (1 << 12)) == 0) {
be5e7a76 9546 ARCH(5);
9ee6e8bb
PB
9547 /* Second half of blx. */
9548 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9549 tmp = load_reg(s, 14);
9550 tcg_gen_addi_i32(tmp, tmp, offset);
9551 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9552
7d1b0095 9553 tmp2 = tcg_temp_new_i32();
b0109805 9554 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9555 store_reg(s, 14, tmp2);
9556 gen_bx(s, tmp);
9ee6e8bb
PB
9557 return 0;
9558 }
9559 if (insn & (1 << 11)) {
9560 /* Second half of bl. */
9561 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9562 tmp = load_reg(s, 14);
6a0d8a1d 9563 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9564
7d1b0095 9565 tmp2 = tcg_temp_new_i32();
b0109805 9566 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9567 store_reg(s, 14, tmp2);
9568 gen_bx(s, tmp);
9ee6e8bb
PB
9569 return 0;
9570 }
9571 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9572 /* Instruction spans a page boundary. Implement it as two
9573 16-bit instructions in case the second half causes an
9574 prefetch abort. */
9575 offset = ((int32_t)insn << 21) >> 9;
396e467c 9576 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9577 return 0;
9578 }
9579 /* Fall through to 32-bit decode. */
9580 }
9581
f9fd40eb 9582 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9583 s->pc += 2;
9584 insn |= (uint32_t)insn_hw1 << 16;
9585
9586 if ((insn & 0xf800e800) != 0xf000e800) {
9587 ARCH(6T2);
9588 }
9589
9590 rn = (insn >> 16) & 0xf;
9591 rs = (insn >> 12) & 0xf;
9592 rd = (insn >> 8) & 0xf;
9593 rm = insn & 0xf;
9594 switch ((insn >> 25) & 0xf) {
9595 case 0: case 1: case 2: case 3:
9596 /* 16-bit instructions. Should never happen. */
9597 abort();
9598 case 4:
9599 if (insn & (1 << 22)) {
9600 /* Other load/store, table branch. */
9601 if (insn & 0x01200000) {
9602 /* Load/store doubleword. */
9603 if (rn == 15) {
7d1b0095 9604 addr = tcg_temp_new_i32();
b0109805 9605 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9606 } else {
b0109805 9607 addr = load_reg(s, rn);
9ee6e8bb
PB
9608 }
9609 offset = (insn & 0xff) * 4;
9610 if ((insn & (1 << 23)) == 0)
9611 offset = -offset;
9612 if (insn & (1 << 24)) {
b0109805 9613 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9614 offset = 0;
9615 }
9616 if (insn & (1 << 20)) {
9617 /* ldrd */
e2592fad 9618 tmp = tcg_temp_new_i32();
12dcc321 9619 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9620 store_reg(s, rs, tmp);
9621 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9622 tmp = tcg_temp_new_i32();
12dcc321 9623 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9624 store_reg(s, rd, tmp);
9ee6e8bb
PB
9625 } else {
9626 /* strd */
b0109805 9627 tmp = load_reg(s, rs);
12dcc321 9628 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9629 tcg_temp_free_i32(tmp);
b0109805
PB
9630 tcg_gen_addi_i32(addr, addr, 4);
9631 tmp = load_reg(s, rd);
12dcc321 9632 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9633 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9634 }
9635 if (insn & (1 << 21)) {
9636 /* Base writeback. */
9637 if (rn == 15)
9638 goto illegal_op;
b0109805
PB
9639 tcg_gen_addi_i32(addr, addr, offset - 4);
9640 store_reg(s, rn, addr);
9641 } else {
7d1b0095 9642 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9643 }
9644 } else if ((insn & (1 << 23)) == 0) {
9645 /* Load/store exclusive word. */
39d5492a 9646 addr = tcg_temp_local_new_i32();
98a46317 9647 load_reg_var(s, addr, rn);
426f5abc 9648 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9649 if (insn & (1 << 20)) {
426f5abc 9650 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9651 } else {
426f5abc 9652 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9653 }
39d5492a 9654 tcg_temp_free_i32(addr);
2359bf80 9655 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9656 /* Table Branch. */
9657 if (rn == 15) {
7d1b0095 9658 addr = tcg_temp_new_i32();
b0109805 9659 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9660 } else {
b0109805 9661 addr = load_reg(s, rn);
9ee6e8bb 9662 }
b26eefb6 9663 tmp = load_reg(s, rm);
b0109805 9664 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9665 if (insn & (1 << 4)) {
9666 /* tbh */
b0109805 9667 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9668 tcg_temp_free_i32(tmp);
e2592fad 9669 tmp = tcg_temp_new_i32();
12dcc321 9670 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9671 } else { /* tbb */
7d1b0095 9672 tcg_temp_free_i32(tmp);
e2592fad 9673 tmp = tcg_temp_new_i32();
12dcc321 9674 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9675 }
7d1b0095 9676 tcg_temp_free_i32(addr);
b0109805
PB
9677 tcg_gen_shli_i32(tmp, tmp, 1);
9678 tcg_gen_addi_i32(tmp, tmp, s->pc);
9679 store_reg(s, 15, tmp);
9ee6e8bb 9680 } else {
2359bf80 9681 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9682 op = (insn >> 4) & 0x3;
2359bf80
MR
9683 switch (op2) {
9684 case 0:
426f5abc 9685 goto illegal_op;
2359bf80
MR
9686 case 1:
9687 /* Load/store exclusive byte/halfword/doubleword */
9688 if (op == 2) {
9689 goto illegal_op;
9690 }
9691 ARCH(7);
9692 break;
9693 case 2:
9694 /* Load-acquire/store-release */
9695 if (op == 3) {
9696 goto illegal_op;
9697 }
9698 /* Fall through */
9699 case 3:
9700 /* Load-acquire/store-release exclusive */
9701 ARCH(8);
9702 break;
426f5abc 9703 }
39d5492a 9704 addr = tcg_temp_local_new_i32();
98a46317 9705 load_reg_var(s, addr, rn);
2359bf80
MR
9706 if (!(op2 & 1)) {
9707 if (insn & (1 << 20)) {
9708 tmp = tcg_temp_new_i32();
9709 switch (op) {
9710 case 0: /* ldab */
12dcc321 9711 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9712 break;
9713 case 1: /* ldah */
12dcc321 9714 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9715 break;
9716 case 2: /* lda */
12dcc321 9717 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9718 break;
9719 default:
9720 abort();
9721 }
9722 store_reg(s, rs, tmp);
9723 } else {
9724 tmp = load_reg(s, rs);
9725 switch (op) {
9726 case 0: /* stlb */
12dcc321 9727 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9728 break;
9729 case 1: /* stlh */
12dcc321 9730 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9731 break;
9732 case 2: /* stl */
12dcc321 9733 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9734 break;
9735 default:
9736 abort();
9737 }
9738 tcg_temp_free_i32(tmp);
9739 }
9740 } else if (insn & (1 << 20)) {
426f5abc 9741 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9742 } else {
426f5abc 9743 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9744 }
39d5492a 9745 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9746 }
9747 } else {
9748 /* Load/store multiple, RFE, SRS. */
9749 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9750 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9751 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9752 goto illegal_op;
00115976 9753 }
9ee6e8bb
PB
9754 if (insn & (1 << 20)) {
9755 /* rfe */
b0109805
PB
9756 addr = load_reg(s, rn);
9757 if ((insn & (1 << 24)) == 0)
9758 tcg_gen_addi_i32(addr, addr, -8);
9759 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9760 tmp = tcg_temp_new_i32();
12dcc321 9761 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9762 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9763 tmp2 = tcg_temp_new_i32();
12dcc321 9764 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9765 if (insn & (1 << 21)) {
9766 /* Base writeback. */
b0109805
PB
9767 if (insn & (1 << 24)) {
9768 tcg_gen_addi_i32(addr, addr, 4);
9769 } else {
9770 tcg_gen_addi_i32(addr, addr, -4);
9771 }
9772 store_reg(s, rn, addr);
9773 } else {
7d1b0095 9774 tcg_temp_free_i32(addr);
9ee6e8bb 9775 }
b0109805 9776 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9777 } else {
9778 /* srs */
81465888
PM
9779 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9780 insn & (1 << 21));
9ee6e8bb
PB
9781 }
9782 } else {
5856d44e 9783 int i, loaded_base = 0;
39d5492a 9784 TCGv_i32 loaded_var;
9ee6e8bb 9785 /* Load/store multiple. */
b0109805 9786 addr = load_reg(s, rn);
9ee6e8bb
PB
9787 offset = 0;
9788 for (i = 0; i < 16; i++) {
9789 if (insn & (1 << i))
9790 offset += 4;
9791 }
9792 if (insn & (1 << 24)) {
b0109805 9793 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9794 }
9795
39d5492a 9796 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9797 for (i = 0; i < 16; i++) {
9798 if ((insn & (1 << i)) == 0)
9799 continue;
9800 if (insn & (1 << 20)) {
9801 /* Load. */
e2592fad 9802 tmp = tcg_temp_new_i32();
12dcc321 9803 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9804 if (i == 15) {
b0109805 9805 gen_bx(s, tmp);
5856d44e
YO
9806 } else if (i == rn) {
9807 loaded_var = tmp;
9808 loaded_base = 1;
9ee6e8bb 9809 } else {
b0109805 9810 store_reg(s, i, tmp);
9ee6e8bb
PB
9811 }
9812 } else {
9813 /* Store. */
b0109805 9814 tmp = load_reg(s, i);
12dcc321 9815 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9816 tcg_temp_free_i32(tmp);
9ee6e8bb 9817 }
b0109805 9818 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9819 }
5856d44e
YO
9820 if (loaded_base) {
9821 store_reg(s, rn, loaded_var);
9822 }
9ee6e8bb
PB
9823 if (insn & (1 << 21)) {
9824 /* Base register writeback. */
9825 if (insn & (1 << 24)) {
b0109805 9826 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9827 }
9828 /* Fault if writeback register is in register list. */
9829 if (insn & (1 << rn))
9830 goto illegal_op;
b0109805
PB
9831 store_reg(s, rn, addr);
9832 } else {
7d1b0095 9833 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9834 }
9835 }
9836 }
9837 break;
2af9ab77
JB
9838 case 5:
9839
9ee6e8bb 9840 op = (insn >> 21) & 0xf;
2af9ab77 9841 if (op == 6) {
62b44f05
AR
9842 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9843 goto illegal_op;
9844 }
2af9ab77
JB
9845 /* Halfword pack. */
9846 tmp = load_reg(s, rn);
9847 tmp2 = load_reg(s, rm);
9848 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9849 if (insn & (1 << 5)) {
9850 /* pkhtb */
9851 if (shift == 0)
9852 shift = 31;
9853 tcg_gen_sari_i32(tmp2, tmp2, shift);
9854 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9855 tcg_gen_ext16u_i32(tmp2, tmp2);
9856 } else {
9857 /* pkhbt */
9858 if (shift)
9859 tcg_gen_shli_i32(tmp2, tmp2, shift);
9860 tcg_gen_ext16u_i32(tmp, tmp);
9861 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9862 }
9863 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9864 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9865 store_reg(s, rd, tmp);
9866 } else {
2af9ab77
JB
9867 /* Data processing register constant shift. */
9868 if (rn == 15) {
7d1b0095 9869 tmp = tcg_temp_new_i32();
2af9ab77
JB
9870 tcg_gen_movi_i32(tmp, 0);
9871 } else {
9872 tmp = load_reg(s, rn);
9873 }
9874 tmp2 = load_reg(s, rm);
9875
9876 shiftop = (insn >> 4) & 3;
9877 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9878 conds = (insn & (1 << 20)) != 0;
9879 logic_cc = (conds && thumb2_logic_op(op));
9880 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9881 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9882 goto illegal_op;
7d1b0095 9883 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9884 if (rd != 15) {
9885 store_reg(s, rd, tmp);
9886 } else {
7d1b0095 9887 tcg_temp_free_i32(tmp);
2af9ab77 9888 }
3174f8e9 9889 }
9ee6e8bb
PB
9890 break;
9891 case 13: /* Misc data processing. */
9892 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9893 if (op < 4 && (insn & 0xf000) != 0xf000)
9894 goto illegal_op;
9895 switch (op) {
9896 case 0: /* Register controlled shift. */
8984bd2e
PB
9897 tmp = load_reg(s, rn);
9898 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9899 if ((insn & 0x70) != 0)
9900 goto illegal_op;
9901 op = (insn >> 21) & 3;
8984bd2e
PB
9902 logic_cc = (insn & (1 << 20)) != 0;
9903 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9904 if (logic_cc)
9905 gen_logic_CC(tmp);
7dcc1f89 9906 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9907 break;
9908 case 1: /* Sign/zero extend. */
62b44f05
AR
9909 op = (insn >> 20) & 7;
9910 switch (op) {
9911 case 0: /* SXTAH, SXTH */
9912 case 1: /* UXTAH, UXTH */
9913 case 4: /* SXTAB, SXTB */
9914 case 5: /* UXTAB, UXTB */
9915 break;
9916 case 2: /* SXTAB16, SXTB16 */
9917 case 3: /* UXTAB16, UXTB16 */
9918 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9919 goto illegal_op;
9920 }
9921 break;
9922 default:
9923 goto illegal_op;
9924 }
9925 if (rn != 15) {
9926 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9927 goto illegal_op;
9928 }
9929 }
5e3f878a 9930 tmp = load_reg(s, rm);
9ee6e8bb 9931 shift = (insn >> 4) & 3;
1301f322 9932 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9933 rotate, a shift is sufficient. */
9934 if (shift != 0)
f669df27 9935 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9936 op = (insn >> 20) & 7;
9937 switch (op) {
5e3f878a
PB
9938 case 0: gen_sxth(tmp); break;
9939 case 1: gen_uxth(tmp); break;
9940 case 2: gen_sxtb16(tmp); break;
9941 case 3: gen_uxtb16(tmp); break;
9942 case 4: gen_sxtb(tmp); break;
9943 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9944 default:
9945 g_assert_not_reached();
9ee6e8bb
PB
9946 }
9947 if (rn != 15) {
5e3f878a 9948 tmp2 = load_reg(s, rn);
9ee6e8bb 9949 if ((op >> 1) == 1) {
5e3f878a 9950 gen_add16(tmp, tmp2);
9ee6e8bb 9951 } else {
5e3f878a 9952 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9953 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9954 }
9955 }
5e3f878a 9956 store_reg(s, rd, tmp);
9ee6e8bb
PB
9957 break;
9958 case 2: /* SIMD add/subtract. */
62b44f05
AR
9959 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9960 goto illegal_op;
9961 }
9ee6e8bb
PB
9962 op = (insn >> 20) & 7;
9963 shift = (insn >> 4) & 7;
9964 if ((op & 3) == 3 || (shift & 3) == 3)
9965 goto illegal_op;
6ddbc6e4
PB
9966 tmp = load_reg(s, rn);
9967 tmp2 = load_reg(s, rm);
9968 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9969 tcg_temp_free_i32(tmp2);
6ddbc6e4 9970 store_reg(s, rd, tmp);
9ee6e8bb
PB
9971 break;
9972 case 3: /* Other data processing. */
9973 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9974 if (op < 4) {
9975 /* Saturating add/subtract. */
62b44f05
AR
9976 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9977 goto illegal_op;
9978 }
d9ba4830
PB
9979 tmp = load_reg(s, rn);
9980 tmp2 = load_reg(s, rm);
9ee6e8bb 9981 if (op & 1)
9ef39277 9982 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9983 if (op & 2)
9ef39277 9984 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9985 else
9ef39277 9986 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9987 tcg_temp_free_i32(tmp2);
9ee6e8bb 9988 } else {
62b44f05
AR
9989 switch (op) {
9990 case 0x0a: /* rbit */
9991 case 0x08: /* rev */
9992 case 0x09: /* rev16 */
9993 case 0x0b: /* revsh */
9994 case 0x18: /* clz */
9995 break;
9996 case 0x10: /* sel */
9997 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9998 goto illegal_op;
9999 }
10000 break;
10001 case 0x20: /* crc32/crc32c */
10002 case 0x21:
10003 case 0x22:
10004 case 0x28:
10005 case 0x29:
10006 case 0x2a:
10007 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10008 goto illegal_op;
10009 }
10010 break;
10011 default:
10012 goto illegal_op;
10013 }
d9ba4830 10014 tmp = load_reg(s, rn);
9ee6e8bb
PB
10015 switch (op) {
10016 case 0x0a: /* rbit */
d9ba4830 10017 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10018 break;
10019 case 0x08: /* rev */
66896cb8 10020 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10021 break;
10022 case 0x09: /* rev16 */
d9ba4830 10023 gen_rev16(tmp);
9ee6e8bb
PB
10024 break;
10025 case 0x0b: /* revsh */
d9ba4830 10026 gen_revsh(tmp);
9ee6e8bb
PB
10027 break;
10028 case 0x10: /* sel */
d9ba4830 10029 tmp2 = load_reg(s, rm);
7d1b0095 10030 tmp3 = tcg_temp_new_i32();
0ecb72a5 10031 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10032 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10033 tcg_temp_free_i32(tmp3);
10034 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10035 break;
10036 case 0x18: /* clz */
d9ba4830 10037 gen_helper_clz(tmp, tmp);
9ee6e8bb 10038 break;
eb0ecd5a
WN
10039 case 0x20:
10040 case 0x21:
10041 case 0x22:
10042 case 0x28:
10043 case 0x29:
10044 case 0x2a:
10045 {
10046 /* crc32/crc32c */
10047 uint32_t sz = op & 0x3;
10048 uint32_t c = op & 0x8;
10049
eb0ecd5a 10050 tmp2 = load_reg(s, rm);
aa633469
PM
10051 if (sz == 0) {
10052 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10053 } else if (sz == 1) {
10054 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10055 }
eb0ecd5a
WN
10056 tmp3 = tcg_const_i32(1 << sz);
10057 if (c) {
10058 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10059 } else {
10060 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10061 }
10062 tcg_temp_free_i32(tmp2);
10063 tcg_temp_free_i32(tmp3);
10064 break;
10065 }
9ee6e8bb 10066 default:
62b44f05 10067 g_assert_not_reached();
9ee6e8bb
PB
10068 }
10069 }
d9ba4830 10070 store_reg(s, rd, tmp);
9ee6e8bb
PB
10071 break;
10072 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10073 switch ((insn >> 20) & 7) {
10074 case 0: /* 32 x 32 -> 32 */
10075 case 7: /* Unsigned sum of absolute differences. */
10076 break;
10077 case 1: /* 16 x 16 -> 32 */
10078 case 2: /* Dual multiply add. */
10079 case 3: /* 32 * 16 -> 32msb */
10080 case 4: /* Dual multiply subtract. */
10081 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10082 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10083 goto illegal_op;
10084 }
10085 break;
10086 }
9ee6e8bb 10087 op = (insn >> 4) & 0xf;
d9ba4830
PB
10088 tmp = load_reg(s, rn);
10089 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10090 switch ((insn >> 20) & 7) {
10091 case 0: /* 32 x 32 -> 32 */
d9ba4830 10092 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10093 tcg_temp_free_i32(tmp2);
9ee6e8bb 10094 if (rs != 15) {
d9ba4830 10095 tmp2 = load_reg(s, rs);
9ee6e8bb 10096 if (op)
d9ba4830 10097 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10098 else
d9ba4830 10099 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10100 tcg_temp_free_i32(tmp2);
9ee6e8bb 10101 }
9ee6e8bb
PB
10102 break;
10103 case 1: /* 16 x 16 -> 32 */
d9ba4830 10104 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10105 tcg_temp_free_i32(tmp2);
9ee6e8bb 10106 if (rs != 15) {
d9ba4830 10107 tmp2 = load_reg(s, rs);
9ef39277 10108 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10109 tcg_temp_free_i32(tmp2);
9ee6e8bb 10110 }
9ee6e8bb
PB
10111 break;
10112 case 2: /* Dual multiply add. */
10113 case 4: /* Dual multiply subtract. */
10114 if (op)
d9ba4830
PB
10115 gen_swap_half(tmp2);
10116 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10117 if (insn & (1 << 22)) {
e1d177b9 10118 /* This subtraction cannot overflow. */
d9ba4830 10119 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10120 } else {
e1d177b9
PM
10121 /* This addition cannot overflow 32 bits;
10122 * however it may overflow considered as a signed
10123 * operation, in which case we must set the Q flag.
10124 */
9ef39277 10125 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10126 }
7d1b0095 10127 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10128 if (rs != 15)
10129 {
d9ba4830 10130 tmp2 = load_reg(s, rs);
9ef39277 10131 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10132 tcg_temp_free_i32(tmp2);
9ee6e8bb 10133 }
9ee6e8bb
PB
10134 break;
10135 case 3: /* 32 * 16 -> 32msb */
10136 if (op)
d9ba4830 10137 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10138 else
d9ba4830 10139 gen_sxth(tmp2);
a7812ae4
PB
10140 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10141 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10142 tmp = tcg_temp_new_i32();
ecc7b3aa 10143 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10144 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10145 if (rs != 15)
10146 {
d9ba4830 10147 tmp2 = load_reg(s, rs);
9ef39277 10148 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10149 tcg_temp_free_i32(tmp2);
9ee6e8bb 10150 }
9ee6e8bb 10151 break;
838fa72d
AJ
10152 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10153 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10154 if (rs != 15) {
838fa72d
AJ
10155 tmp = load_reg(s, rs);
10156 if (insn & (1 << 20)) {
10157 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10158 } else {
838fa72d 10159 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10160 }
2c0262af 10161 }
838fa72d
AJ
10162 if (insn & (1 << 4)) {
10163 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10164 }
10165 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10166 tmp = tcg_temp_new_i32();
ecc7b3aa 10167 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10168 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10169 break;
10170 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10171 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10172 tcg_temp_free_i32(tmp2);
9ee6e8bb 10173 if (rs != 15) {
d9ba4830
PB
10174 tmp2 = load_reg(s, rs);
10175 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10176 tcg_temp_free_i32(tmp2);
5fd46862 10177 }
9ee6e8bb 10178 break;
2c0262af 10179 }
d9ba4830 10180 store_reg(s, rd, tmp);
2c0262af 10181 break;
9ee6e8bb
PB
10182 case 6: case 7: /* 64-bit multiply, Divide. */
10183 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10184 tmp = load_reg(s, rn);
10185 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10186 if ((op & 0x50) == 0x10) {
10187 /* sdiv, udiv */
d614a513 10188 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10189 goto illegal_op;
47789990 10190 }
9ee6e8bb 10191 if (op & 0x20)
5e3f878a 10192 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10193 else
5e3f878a 10194 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10195 tcg_temp_free_i32(tmp2);
5e3f878a 10196 store_reg(s, rd, tmp);
9ee6e8bb
PB
10197 } else if ((op & 0xe) == 0xc) {
10198 /* Dual multiply accumulate long. */
62b44f05
AR
10199 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10200 tcg_temp_free_i32(tmp);
10201 tcg_temp_free_i32(tmp2);
10202 goto illegal_op;
10203 }
9ee6e8bb 10204 if (op & 1)
5e3f878a
PB
10205 gen_swap_half(tmp2);
10206 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10207 if (op & 0x10) {
5e3f878a 10208 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10209 } else {
5e3f878a 10210 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10211 }
7d1b0095 10212 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10213 /* BUGFIX */
10214 tmp64 = tcg_temp_new_i64();
10215 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10216 tcg_temp_free_i32(tmp);
a7812ae4
PB
10217 gen_addq(s, tmp64, rs, rd);
10218 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10219 tcg_temp_free_i64(tmp64);
2c0262af 10220 } else {
9ee6e8bb
PB
10221 if (op & 0x20) {
10222 /* Unsigned 64-bit multiply */
a7812ae4 10223 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10224 } else {
9ee6e8bb
PB
10225 if (op & 8) {
10226 /* smlalxy */
62b44f05
AR
10227 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10228 tcg_temp_free_i32(tmp2);
10229 tcg_temp_free_i32(tmp);
10230 goto illegal_op;
10231 }
5e3f878a 10232 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10233 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10234 tmp64 = tcg_temp_new_i64();
10235 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10236 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10237 } else {
10238 /* Signed 64-bit multiply */
a7812ae4 10239 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10240 }
b5ff1b31 10241 }
9ee6e8bb
PB
10242 if (op & 4) {
10243 /* umaal */
62b44f05
AR
10244 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10245 tcg_temp_free_i64(tmp64);
10246 goto illegal_op;
10247 }
a7812ae4
PB
10248 gen_addq_lo(s, tmp64, rs);
10249 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10250 } else if (op & 0x40) {
10251 /* 64-bit accumulate. */
a7812ae4 10252 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10253 }
a7812ae4 10254 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10255 tcg_temp_free_i64(tmp64);
5fd46862 10256 }
2c0262af 10257 break;
9ee6e8bb
PB
10258 }
10259 break;
10260 case 6: case 7: case 14: case 15:
10261 /* Coprocessor. */
10262 if (((insn >> 24) & 3) == 3) {
10263 /* Translate into the equivalent ARM encoding. */
f06053e3 10264 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10265 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10266 goto illegal_op;
7dcc1f89 10267 }
6a57f3eb 10268 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10269 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10270 goto illegal_op;
10271 }
9ee6e8bb
PB
10272 } else {
10273 if (insn & (1 << 28))
10274 goto illegal_op;
7dcc1f89 10275 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10276 goto illegal_op;
7dcc1f89 10277 }
9ee6e8bb
PB
10278 }
10279 break;
10280 case 8: case 9: case 10: case 11:
10281 if (insn & (1 << 15)) {
10282 /* Branches, misc control. */
10283 if (insn & 0x5000) {
10284 /* Unconditional branch. */
10285 /* signextend(hw1[10:0]) -> offset[:12]. */
10286 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10287 /* hw1[10:0] -> offset[11:1]. */
10288 offset |= (insn & 0x7ff) << 1;
10289 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10290 offset[24:22] already have the same value because of the
10291 sign extension above. */
10292 offset ^= ((~insn) & (1 << 13)) << 10;
10293 offset ^= ((~insn) & (1 << 11)) << 11;
10294
9ee6e8bb
PB
10295 if (insn & (1 << 14)) {
10296 /* Branch and link. */
3174f8e9 10297 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10298 }
3b46e624 10299
b0109805 10300 offset += s->pc;
9ee6e8bb
PB
10301 if (insn & (1 << 12)) {
10302 /* b/bl */
b0109805 10303 gen_jmp(s, offset);
9ee6e8bb
PB
10304 } else {
10305 /* blx */
b0109805 10306 offset &= ~(uint32_t)2;
be5e7a76 10307 /* thumb2 bx, no need to check */
b0109805 10308 gen_bx_im(s, offset);
2c0262af 10309 }
9ee6e8bb
PB
10310 } else if (((insn >> 23) & 7) == 7) {
10311 /* Misc control */
10312 if (insn & (1 << 13))
10313 goto illegal_op;
10314
10315 if (insn & (1 << 26)) {
37e6456e
PM
10316 if (!(insn & (1 << 20))) {
10317 /* Hypervisor call (v7) */
10318 int imm16 = extract32(insn, 16, 4) << 12
10319 | extract32(insn, 0, 12);
10320 ARCH(7);
10321 if (IS_USER(s)) {
10322 goto illegal_op;
10323 }
10324 gen_hvc(s, imm16);
10325 } else {
10326 /* Secure monitor call (v6+) */
10327 ARCH(6K);
10328 if (IS_USER(s)) {
10329 goto illegal_op;
10330 }
10331 gen_smc(s);
10332 }
2c0262af 10333 } else {
9ee6e8bb
PB
10334 op = (insn >> 20) & 7;
10335 switch (op) {
10336 case 0: /* msr cpsr. */
b53d8923 10337 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10338 tmp = load_reg(s, rn);
10339 addr = tcg_const_i32(insn & 0xff);
10340 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10341 tcg_temp_free_i32(addr);
7d1b0095 10342 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10343 gen_lookup_tb(s);
10344 break;
10345 }
10346 /* fall through */
10347 case 1: /* msr spsr. */
b53d8923 10348 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10349 goto illegal_op;
b53d8923 10350 }
8bfd0550
PM
10351
10352 if (extract32(insn, 5, 1)) {
10353 /* MSR (banked) */
10354 int sysm = extract32(insn, 8, 4) |
10355 (extract32(insn, 4, 1) << 4);
10356 int r = op & 1;
10357
10358 gen_msr_banked(s, r, sysm, rm);
10359 break;
10360 }
10361
10362 /* MSR (for PSRs) */
2fbac54b
FN
10363 tmp = load_reg(s, rn);
10364 if (gen_set_psr(s,
7dcc1f89 10365 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10366 op == 1, tmp))
9ee6e8bb
PB
10367 goto illegal_op;
10368 break;
10369 case 2: /* cps, nop-hint. */
10370 if (((insn >> 8) & 7) == 0) {
10371 gen_nop_hint(s, insn & 0xff);
10372 }
10373 /* Implemented as NOP in user mode. */
10374 if (IS_USER(s))
10375 break;
10376 offset = 0;
10377 imm = 0;
10378 if (insn & (1 << 10)) {
10379 if (insn & (1 << 7))
10380 offset |= CPSR_A;
10381 if (insn & (1 << 6))
10382 offset |= CPSR_I;
10383 if (insn & (1 << 5))
10384 offset |= CPSR_F;
10385 if (insn & (1 << 9))
10386 imm = CPSR_A | CPSR_I | CPSR_F;
10387 }
10388 if (insn & (1 << 8)) {
10389 offset |= 0x1f;
10390 imm |= (insn & 0x1f);
10391 }
10392 if (offset) {
2fbac54b 10393 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10394 }
10395 break;
10396 case 3: /* Special control operations. */
426f5abc 10397 ARCH(7);
9ee6e8bb
PB
10398 op = (insn >> 4) & 0xf;
10399 switch (op) {
10400 case 2: /* clrex */
426f5abc 10401 gen_clrex(s);
9ee6e8bb
PB
10402 break;
10403 case 4: /* dsb */
10404 case 5: /* dmb */
9ee6e8bb 10405 /* These execute as NOPs. */
9ee6e8bb 10406 break;
6df99dec
SS
10407 case 6: /* isb */
10408 /* We need to break the TB after this insn
10409 * to execute self-modifying code correctly
10410 * and also to take any pending interrupts
10411 * immediately.
10412 */
10413 gen_lookup_tb(s);
10414 break;
9ee6e8bb
PB
10415 default:
10416 goto illegal_op;
10417 }
10418 break;
10419 case 4: /* bxj */
10420 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10421 tmp = load_reg(s, rn);
10422 gen_bx(s, tmp);
9ee6e8bb
PB
10423 break;
10424 case 5: /* Exception return. */
b8b45b68
RV
10425 if (IS_USER(s)) {
10426 goto illegal_op;
10427 }
10428 if (rn != 14 || rd != 15) {
10429 goto illegal_op;
10430 }
10431 tmp = load_reg(s, rn);
10432 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10433 gen_exception_return(s, tmp);
10434 break;
8bfd0550
PM
10435 case 6: /* MRS */
10436 if (extract32(insn, 5, 1)) {
10437 /* MRS (banked) */
10438 int sysm = extract32(insn, 16, 4) |
10439 (extract32(insn, 4, 1) << 4);
10440
10441 gen_mrs_banked(s, 0, sysm, rd);
10442 break;
10443 }
10444
10445 /* mrs cpsr */
7d1b0095 10446 tmp = tcg_temp_new_i32();
b53d8923 10447 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10448 addr = tcg_const_i32(insn & 0xff);
10449 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10450 tcg_temp_free_i32(addr);
9ee6e8bb 10451 } else {
9ef39277 10452 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10453 }
8984bd2e 10454 store_reg(s, rd, tmp);
9ee6e8bb 10455 break;
8bfd0550
PM
10456 case 7: /* MRS */
10457 if (extract32(insn, 5, 1)) {
10458 /* MRS (banked) */
10459 int sysm = extract32(insn, 16, 4) |
10460 (extract32(insn, 4, 1) << 4);
10461
10462 gen_mrs_banked(s, 1, sysm, rd);
10463 break;
10464 }
10465
10466 /* mrs spsr. */
9ee6e8bb 10467 /* Not accessible in user mode. */
b53d8923 10468 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10469 goto illegal_op;
b53d8923 10470 }
d9ba4830
PB
10471 tmp = load_cpu_field(spsr);
10472 store_reg(s, rd, tmp);
9ee6e8bb 10473 break;
2c0262af
FB
10474 }
10475 }
9ee6e8bb
PB
10476 } else {
10477 /* Conditional branch. */
10478 op = (insn >> 22) & 0xf;
10479 /* Generate a conditional jump to next instruction. */
10480 s->condlabel = gen_new_label();
39fb730a 10481 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10482 s->condjmp = 1;
10483
10484 /* offset[11:1] = insn[10:0] */
10485 offset = (insn & 0x7ff) << 1;
10486 /* offset[17:12] = insn[21:16]. */
10487 offset |= (insn & 0x003f0000) >> 4;
10488 /* offset[31:20] = insn[26]. */
10489 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10490 /* offset[18] = insn[13]. */
10491 offset |= (insn & (1 << 13)) << 5;
10492 /* offset[19] = insn[11]. */
10493 offset |= (insn & (1 << 11)) << 8;
10494
10495 /* jump to the offset */
b0109805 10496 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10497 }
10498 } else {
10499 /* Data processing immediate. */
10500 if (insn & (1 << 25)) {
10501 if (insn & (1 << 24)) {
10502 if (insn & (1 << 20))
10503 goto illegal_op;
10504 /* Bitfield/Saturate. */
10505 op = (insn >> 21) & 7;
10506 imm = insn & 0x1f;
10507 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10508 if (rn == 15) {
7d1b0095 10509 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10510 tcg_gen_movi_i32(tmp, 0);
10511 } else {
10512 tmp = load_reg(s, rn);
10513 }
9ee6e8bb
PB
10514 switch (op) {
10515 case 2: /* Signed bitfield extract. */
10516 imm++;
10517 if (shift + imm > 32)
10518 goto illegal_op;
10519 if (imm < 32)
6ddbc6e4 10520 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10521 break;
10522 case 6: /* Unsigned bitfield extract. */
10523 imm++;
10524 if (shift + imm > 32)
10525 goto illegal_op;
10526 if (imm < 32)
6ddbc6e4 10527 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10528 break;
10529 case 3: /* Bitfield insert/clear. */
10530 if (imm < shift)
10531 goto illegal_op;
10532 imm = imm + 1 - shift;
10533 if (imm != 32) {
6ddbc6e4 10534 tmp2 = load_reg(s, rd);
d593c48e 10535 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10536 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10537 }
10538 break;
10539 case 7:
10540 goto illegal_op;
10541 default: /* Saturate. */
9ee6e8bb
PB
10542 if (shift) {
10543 if (op & 1)
6ddbc6e4 10544 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10545 else
6ddbc6e4 10546 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10547 }
6ddbc6e4 10548 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10549 if (op & 4) {
10550 /* Unsigned. */
62b44f05
AR
10551 if ((op & 1) && shift == 0) {
10552 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10553 tcg_temp_free_i32(tmp);
10554 tcg_temp_free_i32(tmp2);
10555 goto illegal_op;
10556 }
9ef39277 10557 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10558 } else {
9ef39277 10559 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10560 }
2c0262af 10561 } else {
9ee6e8bb 10562 /* Signed. */
62b44f05
AR
10563 if ((op & 1) && shift == 0) {
10564 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10565 tcg_temp_free_i32(tmp);
10566 tcg_temp_free_i32(tmp2);
10567 goto illegal_op;
10568 }
9ef39277 10569 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10570 } else {
9ef39277 10571 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10572 }
2c0262af 10573 }
b75263d6 10574 tcg_temp_free_i32(tmp2);
9ee6e8bb 10575 break;
2c0262af 10576 }
6ddbc6e4 10577 store_reg(s, rd, tmp);
9ee6e8bb
PB
10578 } else {
10579 imm = ((insn & 0x04000000) >> 15)
10580 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10581 if (insn & (1 << 22)) {
10582 /* 16-bit immediate. */
10583 imm |= (insn >> 4) & 0xf000;
10584 if (insn & (1 << 23)) {
10585 /* movt */
5e3f878a 10586 tmp = load_reg(s, rd);
86831435 10587 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10588 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10589 } else {
9ee6e8bb 10590 /* movw */
7d1b0095 10591 tmp = tcg_temp_new_i32();
5e3f878a 10592 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10593 }
10594 } else {
9ee6e8bb
PB
10595 /* Add/sub 12-bit immediate. */
10596 if (rn == 15) {
b0109805 10597 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10598 if (insn & (1 << 23))
b0109805 10599 offset -= imm;
9ee6e8bb 10600 else
b0109805 10601 offset += imm;
7d1b0095 10602 tmp = tcg_temp_new_i32();
5e3f878a 10603 tcg_gen_movi_i32(tmp, offset);
2c0262af 10604 } else {
5e3f878a 10605 tmp = load_reg(s, rn);
9ee6e8bb 10606 if (insn & (1 << 23))
5e3f878a 10607 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10608 else
5e3f878a 10609 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10610 }
9ee6e8bb 10611 }
5e3f878a 10612 store_reg(s, rd, tmp);
191abaa2 10613 }
9ee6e8bb
PB
10614 } else {
10615 int shifter_out = 0;
10616 /* modified 12-bit immediate. */
10617 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10618 imm = (insn & 0xff);
10619 switch (shift) {
10620 case 0: /* XY */
10621 /* Nothing to do. */
10622 break;
10623 case 1: /* 00XY00XY */
10624 imm |= imm << 16;
10625 break;
10626 case 2: /* XY00XY00 */
10627 imm |= imm << 16;
10628 imm <<= 8;
10629 break;
10630 case 3: /* XYXYXYXY */
10631 imm |= imm << 16;
10632 imm |= imm << 8;
10633 break;
10634 default: /* Rotated constant. */
10635 shift = (shift << 1) | (imm >> 7);
10636 imm |= 0x80;
10637 imm = imm << (32 - shift);
10638 shifter_out = 1;
10639 break;
b5ff1b31 10640 }
7d1b0095 10641 tmp2 = tcg_temp_new_i32();
3174f8e9 10642 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10643 rn = (insn >> 16) & 0xf;
3174f8e9 10644 if (rn == 15) {
7d1b0095 10645 tmp = tcg_temp_new_i32();
3174f8e9
FN
10646 tcg_gen_movi_i32(tmp, 0);
10647 } else {
10648 tmp = load_reg(s, rn);
10649 }
9ee6e8bb
PB
10650 op = (insn >> 21) & 0xf;
10651 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10652 shifter_out, tmp, tmp2))
9ee6e8bb 10653 goto illegal_op;
7d1b0095 10654 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10655 rd = (insn >> 8) & 0xf;
10656 if (rd != 15) {
3174f8e9
FN
10657 store_reg(s, rd, tmp);
10658 } else {
7d1b0095 10659 tcg_temp_free_i32(tmp);
2c0262af 10660 }
2c0262af 10661 }
9ee6e8bb
PB
10662 }
10663 break;
10664 case 12: /* Load/store single data item. */
10665 {
10666 int postinc = 0;
10667 int writeback = 0;
a99caa48 10668 int memidx;
9ee6e8bb 10669 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10670 if (disas_neon_ls_insn(s, insn)) {
c1713132 10671 goto illegal_op;
7dcc1f89 10672 }
9ee6e8bb
PB
10673 break;
10674 }
a2fdc890
PM
10675 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10676 if (rs == 15) {
10677 if (!(insn & (1 << 20))) {
10678 goto illegal_op;
10679 }
10680 if (op != 2) {
10681 /* Byte or halfword load space with dest == r15 : memory hints.
10682 * Catch them early so we don't emit pointless addressing code.
10683 * This space is a mix of:
10684 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10685 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10686 * cores)
10687 * unallocated hints, which must be treated as NOPs
10688 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10689 * which is easiest for the decoding logic
10690 * Some space which must UNDEF
10691 */
10692 int op1 = (insn >> 23) & 3;
10693 int op2 = (insn >> 6) & 0x3f;
10694 if (op & 2) {
10695 goto illegal_op;
10696 }
10697 if (rn == 15) {
02afbf64
PM
10698 /* UNPREDICTABLE, unallocated hint or
10699 * PLD/PLDW/PLI (literal)
10700 */
a2fdc890
PM
10701 return 0;
10702 }
10703 if (op1 & 1) {
02afbf64 10704 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10705 }
10706 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10707 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10708 }
10709 /* UNDEF space, or an UNPREDICTABLE */
10710 return 1;
10711 }
10712 }
a99caa48 10713 memidx = get_mem_index(s);
9ee6e8bb 10714 if (rn == 15) {
7d1b0095 10715 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10716 /* PC relative. */
10717 /* s->pc has already been incremented by 4. */
10718 imm = s->pc & 0xfffffffc;
10719 if (insn & (1 << 23))
10720 imm += insn & 0xfff;
10721 else
10722 imm -= insn & 0xfff;
b0109805 10723 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10724 } else {
b0109805 10725 addr = load_reg(s, rn);
9ee6e8bb
PB
10726 if (insn & (1 << 23)) {
10727 /* Positive offset. */
10728 imm = insn & 0xfff;
b0109805 10729 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10730 } else {
9ee6e8bb 10731 imm = insn & 0xff;
2a0308c5
PM
10732 switch ((insn >> 8) & 0xf) {
10733 case 0x0: /* Shifted Register. */
9ee6e8bb 10734 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10735 if (shift > 3) {
10736 tcg_temp_free_i32(addr);
18c9b560 10737 goto illegal_op;
2a0308c5 10738 }
b26eefb6 10739 tmp = load_reg(s, rm);
9ee6e8bb 10740 if (shift)
b26eefb6 10741 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10742 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10743 tcg_temp_free_i32(tmp);
9ee6e8bb 10744 break;
2a0308c5 10745 case 0xc: /* Negative offset. */
b0109805 10746 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10747 break;
2a0308c5 10748 case 0xe: /* User privilege. */
b0109805 10749 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10750 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10751 break;
2a0308c5 10752 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10753 imm = -imm;
10754 /* Fall through. */
2a0308c5 10755 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10756 postinc = 1;
10757 writeback = 1;
10758 break;
2a0308c5 10759 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10760 imm = -imm;
10761 /* Fall through. */
2a0308c5 10762 case 0xf: /* Pre-increment. */
b0109805 10763 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10764 writeback = 1;
10765 break;
10766 default:
2a0308c5 10767 tcg_temp_free_i32(addr);
b7bcbe95 10768 goto illegal_op;
9ee6e8bb
PB
10769 }
10770 }
10771 }
9ee6e8bb
PB
10772 if (insn & (1 << 20)) {
10773 /* Load. */
5a839c0d 10774 tmp = tcg_temp_new_i32();
a2fdc890 10775 switch (op) {
5a839c0d 10776 case 0:
12dcc321 10777 gen_aa32_ld8u(s, tmp, addr, memidx);
5a839c0d
PM
10778 break;
10779 case 4:
12dcc321 10780 gen_aa32_ld8s(s, tmp, addr, memidx);
5a839c0d
PM
10781 break;
10782 case 1:
12dcc321 10783 gen_aa32_ld16u(s, tmp, addr, memidx);
5a839c0d
PM
10784 break;
10785 case 5:
12dcc321 10786 gen_aa32_ld16s(s, tmp, addr, memidx);
5a839c0d
PM
10787 break;
10788 case 2:
12dcc321 10789 gen_aa32_ld32u(s, tmp, addr, memidx);
5a839c0d 10790 break;
2a0308c5 10791 default:
5a839c0d 10792 tcg_temp_free_i32(tmp);
2a0308c5
PM
10793 tcg_temp_free_i32(addr);
10794 goto illegal_op;
a2fdc890
PM
10795 }
10796 if (rs == 15) {
10797 gen_bx(s, tmp);
9ee6e8bb 10798 } else {
a2fdc890 10799 store_reg(s, rs, tmp);
9ee6e8bb
PB
10800 }
10801 } else {
10802 /* Store. */
b0109805 10803 tmp = load_reg(s, rs);
9ee6e8bb 10804 switch (op) {
5a839c0d 10805 case 0:
12dcc321 10806 gen_aa32_st8(s, tmp, addr, memidx);
5a839c0d
PM
10807 break;
10808 case 1:
12dcc321 10809 gen_aa32_st16(s, tmp, addr, memidx);
5a839c0d
PM
10810 break;
10811 case 2:
12dcc321 10812 gen_aa32_st32(s, tmp, addr, memidx);
5a839c0d 10813 break;
2a0308c5 10814 default:
5a839c0d 10815 tcg_temp_free_i32(tmp);
2a0308c5
PM
10816 tcg_temp_free_i32(addr);
10817 goto illegal_op;
b7bcbe95 10818 }
5a839c0d 10819 tcg_temp_free_i32(tmp);
2c0262af 10820 }
9ee6e8bb 10821 if (postinc)
b0109805
PB
10822 tcg_gen_addi_i32(addr, addr, imm);
10823 if (writeback) {
10824 store_reg(s, rn, addr);
10825 } else {
7d1b0095 10826 tcg_temp_free_i32(addr);
b0109805 10827 }
9ee6e8bb
PB
10828 }
10829 break;
10830 default:
10831 goto illegal_op;
2c0262af 10832 }
9ee6e8bb
PB
10833 return 0;
10834illegal_op:
10835 return 1;
2c0262af
FB
10836}
10837
0ecb72a5 10838static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10839{
10840 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10841 int32_t offset;
10842 int i;
39d5492a
PM
10843 TCGv_i32 tmp;
10844 TCGv_i32 tmp2;
10845 TCGv_i32 addr;
99c475ab 10846
9ee6e8bb
PB
10847 if (s->condexec_mask) {
10848 cond = s->condexec_cond;
bedd2912
JB
10849 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10850 s->condlabel = gen_new_label();
39fb730a 10851 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10852 s->condjmp = 1;
10853 }
9ee6e8bb
PB
10854 }
10855
f9fd40eb 10856 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 10857 s->pc += 2;
b5ff1b31 10858
99c475ab
FB
10859 switch (insn >> 12) {
10860 case 0: case 1:
396e467c 10861
99c475ab
FB
10862 rd = insn & 7;
10863 op = (insn >> 11) & 3;
10864 if (op == 3) {
10865 /* add/subtract */
10866 rn = (insn >> 3) & 7;
396e467c 10867 tmp = load_reg(s, rn);
99c475ab
FB
10868 if (insn & (1 << 10)) {
10869 /* immediate */
7d1b0095 10870 tmp2 = tcg_temp_new_i32();
396e467c 10871 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10872 } else {
10873 /* reg */
10874 rm = (insn >> 6) & 7;
396e467c 10875 tmp2 = load_reg(s, rm);
99c475ab 10876 }
9ee6e8bb
PB
10877 if (insn & (1 << 9)) {
10878 if (s->condexec_mask)
396e467c 10879 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10880 else
72485ec4 10881 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10882 } else {
10883 if (s->condexec_mask)
396e467c 10884 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10885 else
72485ec4 10886 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10887 }
7d1b0095 10888 tcg_temp_free_i32(tmp2);
396e467c 10889 store_reg(s, rd, tmp);
99c475ab
FB
10890 } else {
10891 /* shift immediate */
10892 rm = (insn >> 3) & 7;
10893 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10894 tmp = load_reg(s, rm);
10895 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10896 if (!s->condexec_mask)
10897 gen_logic_CC(tmp);
10898 store_reg(s, rd, tmp);
99c475ab
FB
10899 }
10900 break;
10901 case 2: case 3:
10902 /* arithmetic large immediate */
10903 op = (insn >> 11) & 3;
10904 rd = (insn >> 8) & 0x7;
396e467c 10905 if (op == 0) { /* mov */
7d1b0095 10906 tmp = tcg_temp_new_i32();
396e467c 10907 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10908 if (!s->condexec_mask)
396e467c
FN
10909 gen_logic_CC(tmp);
10910 store_reg(s, rd, tmp);
10911 } else {
10912 tmp = load_reg(s, rd);
7d1b0095 10913 tmp2 = tcg_temp_new_i32();
396e467c
FN
10914 tcg_gen_movi_i32(tmp2, insn & 0xff);
10915 switch (op) {
10916 case 1: /* cmp */
72485ec4 10917 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10918 tcg_temp_free_i32(tmp);
10919 tcg_temp_free_i32(tmp2);
396e467c
FN
10920 break;
10921 case 2: /* add */
10922 if (s->condexec_mask)
10923 tcg_gen_add_i32(tmp, tmp, tmp2);
10924 else
72485ec4 10925 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10926 tcg_temp_free_i32(tmp2);
396e467c
FN
10927 store_reg(s, rd, tmp);
10928 break;
10929 case 3: /* sub */
10930 if (s->condexec_mask)
10931 tcg_gen_sub_i32(tmp, tmp, tmp2);
10932 else
72485ec4 10933 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10934 tcg_temp_free_i32(tmp2);
396e467c
FN
10935 store_reg(s, rd, tmp);
10936 break;
10937 }
99c475ab 10938 }
99c475ab
FB
10939 break;
10940 case 4:
10941 if (insn & (1 << 11)) {
10942 rd = (insn >> 8) & 7;
5899f386
FB
10943 /* load pc-relative. Bit 1 of PC is ignored. */
10944 val = s->pc + 2 + ((insn & 0xff) * 4);
10945 val &= ~(uint32_t)2;
7d1b0095 10946 addr = tcg_temp_new_i32();
b0109805 10947 tcg_gen_movi_i32(addr, val);
c40c8556 10948 tmp = tcg_temp_new_i32();
12dcc321 10949 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7d1b0095 10950 tcg_temp_free_i32(addr);
b0109805 10951 store_reg(s, rd, tmp);
99c475ab
FB
10952 break;
10953 }
10954 if (insn & (1 << 10)) {
10955 /* data processing extended or blx */
10956 rd = (insn & 7) | ((insn >> 4) & 8);
10957 rm = (insn >> 3) & 0xf;
10958 op = (insn >> 8) & 3;
10959 switch (op) {
10960 case 0: /* add */
396e467c
FN
10961 tmp = load_reg(s, rd);
10962 tmp2 = load_reg(s, rm);
10963 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10964 tcg_temp_free_i32(tmp2);
396e467c 10965 store_reg(s, rd, tmp);
99c475ab
FB
10966 break;
10967 case 1: /* cmp */
396e467c
FN
10968 tmp = load_reg(s, rd);
10969 tmp2 = load_reg(s, rm);
72485ec4 10970 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10971 tcg_temp_free_i32(tmp2);
10972 tcg_temp_free_i32(tmp);
99c475ab
FB
10973 break;
10974 case 2: /* mov/cpy */
396e467c
FN
10975 tmp = load_reg(s, rm);
10976 store_reg(s, rd, tmp);
99c475ab
FB
10977 break;
10978 case 3:/* branch [and link] exchange thumb register */
b0109805 10979 tmp = load_reg(s, rm);
99c475ab 10980 if (insn & (1 << 7)) {
be5e7a76 10981 ARCH(5);
99c475ab 10982 val = (uint32_t)s->pc | 1;
7d1b0095 10983 tmp2 = tcg_temp_new_i32();
b0109805
PB
10984 tcg_gen_movi_i32(tmp2, val);
10985 store_reg(s, 14, tmp2);
99c475ab 10986 }
be5e7a76 10987 /* already thumb, no need to check */
d9ba4830 10988 gen_bx(s, tmp);
99c475ab
FB
10989 break;
10990 }
10991 break;
10992 }
10993
10994 /* data processing register */
10995 rd = insn & 7;
10996 rm = (insn >> 3) & 7;
10997 op = (insn >> 6) & 0xf;
10998 if (op == 2 || op == 3 || op == 4 || op == 7) {
10999 /* the shift/rotate ops want the operands backwards */
11000 val = rm;
11001 rm = rd;
11002 rd = val;
11003 val = 1;
11004 } else {
11005 val = 0;
11006 }
11007
396e467c 11008 if (op == 9) { /* neg */
7d1b0095 11009 tmp = tcg_temp_new_i32();
396e467c
FN
11010 tcg_gen_movi_i32(tmp, 0);
11011 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11012 tmp = load_reg(s, rd);
11013 } else {
39d5492a 11014 TCGV_UNUSED_I32(tmp);
396e467c 11015 }
99c475ab 11016
396e467c 11017 tmp2 = load_reg(s, rm);
5899f386 11018 switch (op) {
99c475ab 11019 case 0x0: /* and */
396e467c 11020 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11021 if (!s->condexec_mask)
396e467c 11022 gen_logic_CC(tmp);
99c475ab
FB
11023 break;
11024 case 0x1: /* eor */
396e467c 11025 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11026 if (!s->condexec_mask)
396e467c 11027 gen_logic_CC(tmp);
99c475ab
FB
11028 break;
11029 case 0x2: /* lsl */
9ee6e8bb 11030 if (s->condexec_mask) {
365af80e 11031 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11032 } else {
9ef39277 11033 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11034 gen_logic_CC(tmp2);
9ee6e8bb 11035 }
99c475ab
FB
11036 break;
11037 case 0x3: /* lsr */
9ee6e8bb 11038 if (s->condexec_mask) {
365af80e 11039 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11040 } else {
9ef39277 11041 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11042 gen_logic_CC(tmp2);
9ee6e8bb 11043 }
99c475ab
FB
11044 break;
11045 case 0x4: /* asr */
9ee6e8bb 11046 if (s->condexec_mask) {
365af80e 11047 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11048 } else {
9ef39277 11049 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11050 gen_logic_CC(tmp2);
9ee6e8bb 11051 }
99c475ab
FB
11052 break;
11053 case 0x5: /* adc */
49b4c31e 11054 if (s->condexec_mask) {
396e467c 11055 gen_adc(tmp, tmp2);
49b4c31e
RH
11056 } else {
11057 gen_adc_CC(tmp, tmp, tmp2);
11058 }
99c475ab
FB
11059 break;
11060 case 0x6: /* sbc */
2de68a49 11061 if (s->condexec_mask) {
396e467c 11062 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11063 } else {
11064 gen_sbc_CC(tmp, tmp, tmp2);
11065 }
99c475ab
FB
11066 break;
11067 case 0x7: /* ror */
9ee6e8bb 11068 if (s->condexec_mask) {
f669df27
AJ
11069 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11070 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11071 } else {
9ef39277 11072 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11073 gen_logic_CC(tmp2);
9ee6e8bb 11074 }
99c475ab
FB
11075 break;
11076 case 0x8: /* tst */
396e467c
FN
11077 tcg_gen_and_i32(tmp, tmp, tmp2);
11078 gen_logic_CC(tmp);
99c475ab 11079 rd = 16;
5899f386 11080 break;
99c475ab 11081 case 0x9: /* neg */
9ee6e8bb 11082 if (s->condexec_mask)
396e467c 11083 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11084 else
72485ec4 11085 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11086 break;
11087 case 0xa: /* cmp */
72485ec4 11088 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11089 rd = 16;
11090 break;
11091 case 0xb: /* cmn */
72485ec4 11092 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11093 rd = 16;
11094 break;
11095 case 0xc: /* orr */
396e467c 11096 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11097 if (!s->condexec_mask)
396e467c 11098 gen_logic_CC(tmp);
99c475ab
FB
11099 break;
11100 case 0xd: /* mul */
7b2919a0 11101 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11102 if (!s->condexec_mask)
396e467c 11103 gen_logic_CC(tmp);
99c475ab
FB
11104 break;
11105 case 0xe: /* bic */
f669df27 11106 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11107 if (!s->condexec_mask)
396e467c 11108 gen_logic_CC(tmp);
99c475ab
FB
11109 break;
11110 case 0xf: /* mvn */
396e467c 11111 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11112 if (!s->condexec_mask)
396e467c 11113 gen_logic_CC(tmp2);
99c475ab 11114 val = 1;
5899f386 11115 rm = rd;
99c475ab
FB
11116 break;
11117 }
11118 if (rd != 16) {
396e467c
FN
11119 if (val) {
11120 store_reg(s, rm, tmp2);
11121 if (op != 0xf)
7d1b0095 11122 tcg_temp_free_i32(tmp);
396e467c
FN
11123 } else {
11124 store_reg(s, rd, tmp);
7d1b0095 11125 tcg_temp_free_i32(tmp2);
396e467c
FN
11126 }
11127 } else {
7d1b0095
PM
11128 tcg_temp_free_i32(tmp);
11129 tcg_temp_free_i32(tmp2);
99c475ab
FB
11130 }
11131 break;
11132
11133 case 5:
11134 /* load/store register offset. */
11135 rd = insn & 7;
11136 rn = (insn >> 3) & 7;
11137 rm = (insn >> 6) & 7;
11138 op = (insn >> 9) & 7;
b0109805 11139 addr = load_reg(s, rn);
b26eefb6 11140 tmp = load_reg(s, rm);
b0109805 11141 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11142 tcg_temp_free_i32(tmp);
99c475ab 11143
c40c8556 11144 if (op < 3) { /* store */
b0109805 11145 tmp = load_reg(s, rd);
c40c8556
PM
11146 } else {
11147 tmp = tcg_temp_new_i32();
11148 }
99c475ab
FB
11149
11150 switch (op) {
11151 case 0: /* str */
12dcc321 11152 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11153 break;
11154 case 1: /* strh */
12dcc321 11155 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11156 break;
11157 case 2: /* strb */
12dcc321 11158 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11159 break;
11160 case 3: /* ldrsb */
12dcc321 11161 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11162 break;
11163 case 4: /* ldr */
12dcc321 11164 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11165 break;
11166 case 5: /* ldrh */
12dcc321 11167 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11168 break;
11169 case 6: /* ldrb */
12dcc321 11170 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11171 break;
11172 case 7: /* ldrsh */
12dcc321 11173 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11174 break;
11175 }
c40c8556 11176 if (op >= 3) { /* load */
b0109805 11177 store_reg(s, rd, tmp);
c40c8556
PM
11178 } else {
11179 tcg_temp_free_i32(tmp);
11180 }
7d1b0095 11181 tcg_temp_free_i32(addr);
99c475ab
FB
11182 break;
11183
11184 case 6:
11185 /* load/store word immediate offset */
11186 rd = insn & 7;
11187 rn = (insn >> 3) & 7;
b0109805 11188 addr = load_reg(s, rn);
99c475ab 11189 val = (insn >> 4) & 0x7c;
b0109805 11190 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11191
11192 if (insn & (1 << 11)) {
11193 /* load */
c40c8556 11194 tmp = tcg_temp_new_i32();
12dcc321 11195 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11196 store_reg(s, rd, tmp);
99c475ab
FB
11197 } else {
11198 /* store */
b0109805 11199 tmp = load_reg(s, rd);
12dcc321 11200 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11201 tcg_temp_free_i32(tmp);
99c475ab 11202 }
7d1b0095 11203 tcg_temp_free_i32(addr);
99c475ab
FB
11204 break;
11205
11206 case 7:
11207 /* load/store byte immediate offset */
11208 rd = insn & 7;
11209 rn = (insn >> 3) & 7;
b0109805 11210 addr = load_reg(s, rn);
99c475ab 11211 val = (insn >> 6) & 0x1f;
b0109805 11212 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11213
11214 if (insn & (1 << 11)) {
11215 /* load */
c40c8556 11216 tmp = tcg_temp_new_i32();
12dcc321 11217 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
b0109805 11218 store_reg(s, rd, tmp);
99c475ab
FB
11219 } else {
11220 /* store */
b0109805 11221 tmp = load_reg(s, rd);
12dcc321 11222 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
c40c8556 11223 tcg_temp_free_i32(tmp);
99c475ab 11224 }
7d1b0095 11225 tcg_temp_free_i32(addr);
99c475ab
FB
11226 break;
11227
11228 case 8:
11229 /* load/store halfword immediate offset */
11230 rd = insn & 7;
11231 rn = (insn >> 3) & 7;
b0109805 11232 addr = load_reg(s, rn);
99c475ab 11233 val = (insn >> 5) & 0x3e;
b0109805 11234 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11235
11236 if (insn & (1 << 11)) {
11237 /* load */
c40c8556 11238 tmp = tcg_temp_new_i32();
12dcc321 11239 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
b0109805 11240 store_reg(s, rd, tmp);
99c475ab
FB
11241 } else {
11242 /* store */
b0109805 11243 tmp = load_reg(s, rd);
12dcc321 11244 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
c40c8556 11245 tcg_temp_free_i32(tmp);
99c475ab 11246 }
7d1b0095 11247 tcg_temp_free_i32(addr);
99c475ab
FB
11248 break;
11249
11250 case 9:
11251 /* load/store from stack */
11252 rd = (insn >> 8) & 7;
b0109805 11253 addr = load_reg(s, 13);
99c475ab 11254 val = (insn & 0xff) * 4;
b0109805 11255 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11256
11257 if (insn & (1 << 11)) {
11258 /* load */
c40c8556 11259 tmp = tcg_temp_new_i32();
12dcc321 11260 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11261 store_reg(s, rd, tmp);
99c475ab
FB
11262 } else {
11263 /* store */
b0109805 11264 tmp = load_reg(s, rd);
12dcc321 11265 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11266 tcg_temp_free_i32(tmp);
99c475ab 11267 }
7d1b0095 11268 tcg_temp_free_i32(addr);
99c475ab
FB
11269 break;
11270
11271 case 10:
11272 /* add to high reg */
11273 rd = (insn >> 8) & 7;
5899f386
FB
11274 if (insn & (1 << 11)) {
11275 /* SP */
5e3f878a 11276 tmp = load_reg(s, 13);
5899f386
FB
11277 } else {
11278 /* PC. bit 1 is ignored. */
7d1b0095 11279 tmp = tcg_temp_new_i32();
5e3f878a 11280 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11281 }
99c475ab 11282 val = (insn & 0xff) * 4;
5e3f878a
PB
11283 tcg_gen_addi_i32(tmp, tmp, val);
11284 store_reg(s, rd, tmp);
99c475ab
FB
11285 break;
11286
11287 case 11:
11288 /* misc */
11289 op = (insn >> 8) & 0xf;
11290 switch (op) {
11291 case 0:
11292 /* adjust stack pointer */
b26eefb6 11293 tmp = load_reg(s, 13);
99c475ab
FB
11294 val = (insn & 0x7f) * 4;
11295 if (insn & (1 << 7))
6a0d8a1d 11296 val = -(int32_t)val;
b26eefb6
PB
11297 tcg_gen_addi_i32(tmp, tmp, val);
11298 store_reg(s, 13, tmp);
99c475ab
FB
11299 break;
11300
9ee6e8bb
PB
11301 case 2: /* sign/zero extend. */
11302 ARCH(6);
11303 rd = insn & 7;
11304 rm = (insn >> 3) & 7;
b0109805 11305 tmp = load_reg(s, rm);
9ee6e8bb 11306 switch ((insn >> 6) & 3) {
b0109805
PB
11307 case 0: gen_sxth(tmp); break;
11308 case 1: gen_sxtb(tmp); break;
11309 case 2: gen_uxth(tmp); break;
11310 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11311 }
b0109805 11312 store_reg(s, rd, tmp);
9ee6e8bb 11313 break;
99c475ab
FB
11314 case 4: case 5: case 0xc: case 0xd:
11315 /* push/pop */
b0109805 11316 addr = load_reg(s, 13);
5899f386
FB
11317 if (insn & (1 << 8))
11318 offset = 4;
99c475ab 11319 else
5899f386
FB
11320 offset = 0;
11321 for (i = 0; i < 8; i++) {
11322 if (insn & (1 << i))
11323 offset += 4;
11324 }
11325 if ((insn & (1 << 11)) == 0) {
b0109805 11326 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11327 }
99c475ab
FB
11328 for (i = 0; i < 8; i++) {
11329 if (insn & (1 << i)) {
11330 if (insn & (1 << 11)) {
11331 /* pop */
c40c8556 11332 tmp = tcg_temp_new_i32();
12dcc321 11333 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11334 store_reg(s, i, tmp);
99c475ab
FB
11335 } else {
11336 /* push */
b0109805 11337 tmp = load_reg(s, i);
12dcc321 11338 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11339 tcg_temp_free_i32(tmp);
99c475ab 11340 }
5899f386 11341 /* advance to the next address. */
b0109805 11342 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11343 }
11344 }
39d5492a 11345 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11346 if (insn & (1 << 8)) {
11347 if (insn & (1 << 11)) {
11348 /* pop pc */
c40c8556 11349 tmp = tcg_temp_new_i32();
12dcc321 11350 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11351 /* don't set the pc until the rest of the instruction
11352 has completed */
11353 } else {
11354 /* push lr */
b0109805 11355 tmp = load_reg(s, 14);
12dcc321 11356 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11357 tcg_temp_free_i32(tmp);
99c475ab 11358 }
b0109805 11359 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11360 }
5899f386 11361 if ((insn & (1 << 11)) == 0) {
b0109805 11362 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11363 }
99c475ab 11364 /* write back the new stack pointer */
b0109805 11365 store_reg(s, 13, addr);
99c475ab 11366 /* set the new PC value */
be5e7a76 11367 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11368 store_reg_from_load(s, 15, tmp);
be5e7a76 11369 }
99c475ab
FB
11370 break;
11371
9ee6e8bb
PB
11372 case 1: case 3: case 9: case 11: /* czb */
11373 rm = insn & 7;
d9ba4830 11374 tmp = load_reg(s, rm);
9ee6e8bb
PB
11375 s->condlabel = gen_new_label();
11376 s->condjmp = 1;
11377 if (insn & (1 << 11))
cb63669a 11378 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11379 else
cb63669a 11380 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11381 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11382 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11383 val = (uint32_t)s->pc + 2;
11384 val += offset;
11385 gen_jmp(s, val);
11386 break;
11387
11388 case 15: /* IT, nop-hint. */
11389 if ((insn & 0xf) == 0) {
11390 gen_nop_hint(s, (insn >> 4) & 0xf);
11391 break;
11392 }
11393 /* If Then. */
11394 s->condexec_cond = (insn >> 4) & 0xe;
11395 s->condexec_mask = insn & 0x1f;
11396 /* No actual code generated for this insn, just setup state. */
11397 break;
11398
06c949e6 11399 case 0xe: /* bkpt */
d4a2dc67
PM
11400 {
11401 int imm8 = extract32(insn, 0, 8);
be5e7a76 11402 ARCH(5);
73710361
GB
11403 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11404 default_exception_el(s));
06c949e6 11405 break;
d4a2dc67 11406 }
06c949e6 11407
9ee6e8bb
PB
11408 case 0xa: /* rev */
11409 ARCH(6);
11410 rn = (insn >> 3) & 0x7;
11411 rd = insn & 0x7;
b0109805 11412 tmp = load_reg(s, rn);
9ee6e8bb 11413 switch ((insn >> 6) & 3) {
66896cb8 11414 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11415 case 1: gen_rev16(tmp); break;
11416 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
11417 default: goto illegal_op;
11418 }
b0109805 11419 store_reg(s, rd, tmp);
9ee6e8bb
PB
11420 break;
11421
d9e028c1
PM
11422 case 6:
11423 switch ((insn >> 5) & 7) {
11424 case 2:
11425 /* setend */
11426 ARCH(6);
9886ecdf
PB
11427 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11428 gen_helper_setend(cpu_env);
11429 s->is_jmp = DISAS_UPDATE;
d9e028c1 11430 }
9ee6e8bb 11431 break;
d9e028c1
PM
11432 case 3:
11433 /* cps */
11434 ARCH(6);
11435 if (IS_USER(s)) {
11436 break;
8984bd2e 11437 }
b53d8923 11438 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11439 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11440 /* FAULTMASK */
11441 if (insn & 1) {
11442 addr = tcg_const_i32(19);
11443 gen_helper_v7m_msr(cpu_env, addr, tmp);
11444 tcg_temp_free_i32(addr);
11445 }
11446 /* PRIMASK */
11447 if (insn & 2) {
11448 addr = tcg_const_i32(16);
11449 gen_helper_v7m_msr(cpu_env, addr, tmp);
11450 tcg_temp_free_i32(addr);
11451 }
11452 tcg_temp_free_i32(tmp);
11453 gen_lookup_tb(s);
11454 } else {
11455 if (insn & (1 << 4)) {
11456 shift = CPSR_A | CPSR_I | CPSR_F;
11457 } else {
11458 shift = 0;
11459 }
11460 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11461 }
d9e028c1
PM
11462 break;
11463 default:
11464 goto undef;
9ee6e8bb
PB
11465 }
11466 break;
11467
99c475ab
FB
11468 default:
11469 goto undef;
11470 }
11471 break;
11472
11473 case 12:
a7d3970d 11474 {
99c475ab 11475 /* load/store multiple */
39d5492a
PM
11476 TCGv_i32 loaded_var;
11477 TCGV_UNUSED_I32(loaded_var);
99c475ab 11478 rn = (insn >> 8) & 0x7;
b0109805 11479 addr = load_reg(s, rn);
99c475ab
FB
11480 for (i = 0; i < 8; i++) {
11481 if (insn & (1 << i)) {
99c475ab
FB
11482 if (insn & (1 << 11)) {
11483 /* load */
c40c8556 11484 tmp = tcg_temp_new_i32();
12dcc321 11485 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11486 if (i == rn) {
11487 loaded_var = tmp;
11488 } else {
11489 store_reg(s, i, tmp);
11490 }
99c475ab
FB
11491 } else {
11492 /* store */
b0109805 11493 tmp = load_reg(s, i);
12dcc321 11494 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11495 tcg_temp_free_i32(tmp);
99c475ab 11496 }
5899f386 11497 /* advance to the next address */
b0109805 11498 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11499 }
11500 }
b0109805 11501 if ((insn & (1 << rn)) == 0) {
a7d3970d 11502 /* base reg not in list: base register writeback */
b0109805
PB
11503 store_reg(s, rn, addr);
11504 } else {
a7d3970d
PM
11505 /* base reg in list: if load, complete it now */
11506 if (insn & (1 << 11)) {
11507 store_reg(s, rn, loaded_var);
11508 }
7d1b0095 11509 tcg_temp_free_i32(addr);
b0109805 11510 }
99c475ab 11511 break;
a7d3970d 11512 }
99c475ab
FB
11513 case 13:
11514 /* conditional branch or swi */
11515 cond = (insn >> 8) & 0xf;
11516 if (cond == 0xe)
11517 goto undef;
11518
11519 if (cond == 0xf) {
11520 /* swi */
eaed129d 11521 gen_set_pc_im(s, s->pc);
d4a2dc67 11522 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11523 s->is_jmp = DISAS_SWI;
99c475ab
FB
11524 break;
11525 }
11526 /* generate a conditional jump to next instruction */
e50e6a20 11527 s->condlabel = gen_new_label();
39fb730a 11528 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11529 s->condjmp = 1;
99c475ab
FB
11530
11531 /* jump to the offset */
5899f386 11532 val = (uint32_t)s->pc + 2;
99c475ab 11533 offset = ((int32_t)insn << 24) >> 24;
5899f386 11534 val += offset << 1;
8aaca4c0 11535 gen_jmp(s, val);
99c475ab
FB
11536 break;
11537
11538 case 14:
358bf29e 11539 if (insn & (1 << 11)) {
9ee6e8bb
PB
11540 if (disas_thumb2_insn(env, s, insn))
11541 goto undef32;
358bf29e
PB
11542 break;
11543 }
9ee6e8bb 11544 /* unconditional branch */
99c475ab
FB
11545 val = (uint32_t)s->pc;
11546 offset = ((int32_t)insn << 21) >> 21;
11547 val += (offset << 1) + 2;
8aaca4c0 11548 gen_jmp(s, val);
99c475ab
FB
11549 break;
11550
11551 case 15:
9ee6e8bb 11552 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11553 goto undef32;
9ee6e8bb 11554 break;
99c475ab
FB
11555 }
11556 return;
9ee6e8bb 11557undef32:
73710361
GB
11558 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11559 default_exception_el(s));
9ee6e8bb
PB
11560 return;
11561illegal_op:
99c475ab 11562undef:
73710361
GB
11563 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11564 default_exception_el(s));
99c475ab
FB
11565}
11566
541ebcd4
PM
11567static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11568{
11569 /* Return true if the insn at dc->pc might cross a page boundary.
11570 * (False positives are OK, false negatives are not.)
11571 */
11572 uint16_t insn;
11573
11574 if ((s->pc & 3) == 0) {
11575 /* At a 4-aligned address we can't be crossing a page */
11576 return false;
11577 }
11578
11579 /* This must be a Thumb insn */
f9fd40eb 11580 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11581
11582 if ((insn >> 11) >= 0x1d) {
11583 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11584 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11585 * end up actually treating this as two 16-bit insns (see the
11586 * code at the start of disas_thumb2_insn()) but we don't bother
11587 * to check for that as it is unlikely, and false positives here
11588 * are harmless.
11589 */
11590 return true;
11591 }
11592 /* Definitely a 16-bit insn, can't be crossing a page. */
11593 return false;
11594}
11595
20157705 11596/* generate intermediate code for basic block 'tb'. */
4e5e1215 11597void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11598{
4e5e1215 11599 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11600 CPUState *cs = CPU(cpu);
2c0262af 11601 DisasContext dc1, *dc = &dc1;
0fa85d43 11602 target_ulong pc_start;
0a2461fa 11603 target_ulong next_page_start;
2e70f6ef
PB
11604 int num_insns;
11605 int max_insns;
541ebcd4 11606 bool end_of_page;
3b46e624 11607
2c0262af 11608 /* generate intermediate code */
40f860cd
PM
11609
11610 /* The A64 decoder has its own top level loop, because it doesn't need
11611 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11612 */
11613 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11614 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11615 return;
11616 }
11617
0fa85d43 11618 pc_start = tb->pc;
3b46e624 11619
2c0262af
FB
11620 dc->tb = tb;
11621
2c0262af
FB
11622 dc->is_jmp = DISAS_NEXT;
11623 dc->pc = pc_start;
ed2803da 11624 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11625 dc->condjmp = 0;
3926cc84 11626
40f860cd 11627 dc->aarch64 = 0;
cef9ee70
SS
11628 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11629 * there is no secure EL1, so we route exceptions to EL3.
11630 */
11631 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11632 !arm_el_is_aa64(env, 3);
40f860cd 11633 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11634 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11635 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11636 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11637 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11638 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11639 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11640#if !defined(CONFIG_USER_ONLY)
c1e37810 11641 dc->user = (dc->current_el == 0);
3926cc84 11642#endif
3f342b9e 11643 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11644 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11645 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11646 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11647 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11648 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11649 dc->cp_regs = cpu->cp_regs;
a984e42c 11650 dc->features = env->features;
40f860cd 11651
50225ad0
PM
11652 /* Single step state. The code-generation logic here is:
11653 * SS_ACTIVE == 0:
11654 * generate code with no special handling for single-stepping (except
11655 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11656 * this happens anyway because those changes are all system register or
11657 * PSTATE writes).
11658 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11659 * emit code for one insn
11660 * emit code to clear PSTATE.SS
11661 * emit code to generate software step exception for completed step
11662 * end TB (as usual for having generated an exception)
11663 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11664 * emit code to generate a software step exception
11665 * end the TB
11666 */
11667 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11668 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11669 dc->is_ldex = false;
11670 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11671
a7812ae4
PB
11672 cpu_F0s = tcg_temp_new_i32();
11673 cpu_F1s = tcg_temp_new_i32();
11674 cpu_F0d = tcg_temp_new_i64();
11675 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11676 cpu_V0 = cpu_F0d;
11677 cpu_V1 = cpu_F1d;
e677137d 11678 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11679 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11680 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11681 num_insns = 0;
11682 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11683 if (max_insns == 0) {
2e70f6ef 11684 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11685 }
11686 if (max_insns > TCG_MAX_INSNS) {
11687 max_insns = TCG_MAX_INSNS;
11688 }
2e70f6ef 11689
cd42d5b2 11690 gen_tb_start(tb);
e12ce78d 11691
3849902c
PM
11692 tcg_clear_temp_count();
11693
e12ce78d
PM
11694 /* A note on handling of the condexec (IT) bits:
11695 *
11696 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11697 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11698 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11699 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11700 * to do it at the end of the block. (For example if we don't do this
11701 * it's hard to identify whether we can safely skip writing condexec
11702 * at the end of the TB, which we definitely want to do for the case
11703 * where a TB doesn't do anything with the IT state at all.)
11704 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11705 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11706 * This is done both for leaving the TB at the end, and for leaving
11707 * it because of an exception we know will happen, which is done in
11708 * gen_exception_insn(). The latter is necessary because we need to
11709 * leave the TB with the PC/IT state just prior to execution of the
11710 * instruction which caused the exception.
11711 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11712 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11713 * This is handled in the same way as restoration of the
4e5e1215
RH
11714 * PC in these situations; we save the value of the condexec bits
11715 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11716 * then uses this to restore them after an exception.
e12ce78d
PM
11717 *
11718 * Note that there are no instructions which can read the condexec
11719 * bits, and none which can write non-static values to them, so
0ecb72a5 11720 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11721 * middle of a TB.
11722 */
11723
9ee6e8bb
PB
11724 /* Reset the conditional execution bits immediately. This avoids
11725 complications trying to do it at the end of the block. */
98eac7ca 11726 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11727 {
39d5492a 11728 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11729 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11730 store_cpu_field(tmp, condexec_bits);
8f01245e 11731 }
2c0262af 11732 do {
52e971d9
RH
11733 tcg_gen_insn_start(dc->pc,
11734 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
b933066a
RH
11735 num_insns++;
11736
fbb4a2e3
PB
11737#ifdef CONFIG_USER_ONLY
11738 /* Intercept jump to the magic kernel page. */
40f860cd 11739 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11740 /* We always get here via a jump, so know we are not in a
11741 conditional execution block. */
d4a2dc67 11742 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11743 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11744 break;
11745 }
11746#else
b53d8923 11747 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11748 /* We always get here via a jump, so know we are not in a
11749 conditional execution block. */
d4a2dc67 11750 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11751 dc->is_jmp = DISAS_EXC;
d60bb01c 11752 break;
9ee6e8bb
PB
11753 }
11754#endif
11755
f0c3c505 11756 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11757 CPUBreakpoint *bp;
f0c3c505 11758 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11759 if (bp->pc == dc->pc) {
5d98bf8f 11760 if (bp->flags & BP_CPU) {
ce8a1b54 11761 gen_set_condexec(dc);
ed6c6448 11762 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11763 gen_helper_check_breakpoints(cpu_env);
11764 /* End the TB early; it's likely not going to be executed */
11765 dc->is_jmp = DISAS_UPDATE;
11766 } else {
11767 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11768 /* The address covered by the breakpoint must be
11769 included in [tb->pc, tb->pc + tb->size) in order
11770 to for it to be properly cleared -- thus we
11771 increment the PC here so that the logic setting
11772 tb->size below does the right thing. */
5d98bf8f
SF
11773 /* TODO: Advance PC by correct instruction length to
11774 * avoid disassembler error messages */
11775 dc->pc += 2;
11776 goto done_generating;
11777 }
11778 break;
1fddef4b
FB
11779 }
11780 }
11781 }
e50e6a20 11782
959082fc 11783 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11784 gen_io_start();
959082fc 11785 }
2e70f6ef 11786
50225ad0
PM
11787 if (dc->ss_active && !dc->pstate_ss) {
11788 /* Singlestep state is Active-pending.
11789 * If we're in this state at the start of a TB then either
11790 * a) we just took an exception to an EL which is being debugged
11791 * and this is the first insn in the exception handler
11792 * b) debug exceptions were masked and we just unmasked them
11793 * without changing EL (eg by clearing PSTATE.D)
11794 * In either case we're going to take a swstep exception in the
11795 * "did not step an insn" case, and so the syndrome ISV and EX
11796 * bits should be zero.
11797 */
959082fc 11798 assert(num_insns == 1);
73710361
GB
11799 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11800 default_exception_el(dc));
50225ad0
PM
11801 goto done_generating;
11802 }
11803
40f860cd 11804 if (dc->thumb) {
9ee6e8bb
PB
11805 disas_thumb_insn(env, dc);
11806 if (dc->condexec_mask) {
11807 dc->condexec_cond = (dc->condexec_cond & 0xe)
11808 | ((dc->condexec_mask >> 4) & 1);
11809 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11810 if (dc->condexec_mask == 0) {
11811 dc->condexec_cond = 0;
11812 }
11813 }
11814 } else {
f9fd40eb 11815 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11816 dc->pc += 4;
11817 disas_arm_insn(dc, insn);
9ee6e8bb 11818 }
e50e6a20
FB
11819
11820 if (dc->condjmp && !dc->is_jmp) {
11821 gen_set_label(dc->condlabel);
11822 dc->condjmp = 0;
11823 }
3849902c
PM
11824
11825 if (tcg_check_temp_count()) {
0a2461fa
AG
11826 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11827 dc->pc);
3849902c
PM
11828 }
11829
aaf2d97d 11830 /* Translation stops when a conditional branch is encountered.
e50e6a20 11831 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11832 * Also stop translation when a page boundary is reached. This
bf20dc07 11833 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11834
11835 /* We want to stop the TB if the next insn starts in a new page,
11836 * or if it spans between this page and the next. This means that
11837 * if we're looking at the last halfword in the page we need to
11838 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11839 * or a 32-bit Thumb insn (which won't).
11840 * This is to avoid generating a silly TB with a single 16-bit insn
11841 * in it at the end of this page (which would execute correctly
11842 * but isn't very efficient).
11843 */
11844 end_of_page = (dc->pc >= next_page_start) ||
11845 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11846
fe700adb 11847 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11848 !cs->singlestep_enabled &&
1b530a6d 11849 !singlestep &&
50225ad0 11850 !dc->ss_active &&
541ebcd4 11851 !end_of_page &&
2e70f6ef
PB
11852 num_insns < max_insns);
11853
11854 if (tb->cflags & CF_LAST_IO) {
11855 if (dc->condjmp) {
11856 /* FIXME: This can theoretically happen with self-modifying
11857 code. */
a47dddd7 11858 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11859 }
11860 gen_io_end();
11861 }
9ee6e8bb 11862
b5ff1b31 11863 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11864 instruction was a conditional branch or trap, and the PC has
11865 already been written. */
50225ad0 11866 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11867 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11868 gen_set_condexec(dc);
7999a5c8
SF
11869 switch (dc->is_jmp) {
11870 case DISAS_SWI:
50225ad0 11871 gen_ss_advance(dc);
73710361
GB
11872 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11873 default_exception_el(dc));
7999a5c8
SF
11874 break;
11875 case DISAS_HVC:
37e6456e 11876 gen_ss_advance(dc);
73710361 11877 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11878 break;
11879 case DISAS_SMC:
37e6456e 11880 gen_ss_advance(dc);
73710361 11881 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11882 break;
11883 case DISAS_NEXT:
11884 case DISAS_UPDATE:
11885 gen_set_pc_im(dc, dc->pc);
11886 /* fall through */
11887 default:
11888 if (dc->ss_active) {
11889 gen_step_complete_exception(dc);
11890 } else {
11891 /* FIXME: Single stepping a WFI insn will not halt
11892 the CPU. */
11893 gen_exception_internal(EXCP_DEBUG);
11894 }
11895 }
11896 if (dc->condjmp) {
11897 /* "Condition failed" instruction codepath. */
11898 gen_set_label(dc->condlabel);
11899 gen_set_condexec(dc);
11900 gen_set_pc_im(dc, dc->pc);
11901 if (dc->ss_active) {
11902 gen_step_complete_exception(dc);
11903 } else {
11904 gen_exception_internal(EXCP_DEBUG);
11905 }
9ee6e8bb 11906 }
8aaca4c0 11907 } else {
9ee6e8bb
PB
11908 /* While branches must always occur at the end of an IT block,
11909 there are a few other things that can cause us to terminate
65626741 11910 the TB in the middle of an IT block:
9ee6e8bb
PB
11911 - Exception generating instructions (bkpt, swi, undefined).
11912 - Page boundaries.
11913 - Hardware watchpoints.
11914 Hardware breakpoints have already been handled and skip this code.
11915 */
11916 gen_set_condexec(dc);
8aaca4c0 11917 switch(dc->is_jmp) {
8aaca4c0 11918 case DISAS_NEXT:
6e256c93 11919 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11920 break;
8aaca4c0 11921 case DISAS_UPDATE:
577bf808
SF
11922 gen_set_pc_im(dc, dc->pc);
11923 /* fall through */
11924 case DISAS_JUMP:
11925 default:
8aaca4c0 11926 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11927 tcg_gen_exit_tb(0);
8aaca4c0
FB
11928 break;
11929 case DISAS_TB_JUMP:
11930 /* nothing more to generate */
11931 break;
9ee6e8bb 11932 case DISAS_WFI:
1ce94f81 11933 gen_helper_wfi(cpu_env);
84549b6d
PM
11934 /* The helper doesn't necessarily throw an exception, but we
11935 * must go back to the main loop to check for interrupts anyway.
11936 */
11937 tcg_gen_exit_tb(0);
9ee6e8bb 11938 break;
72c1d3af
PM
11939 case DISAS_WFE:
11940 gen_helper_wfe(cpu_env);
11941 break;
c87e5a61
PM
11942 case DISAS_YIELD:
11943 gen_helper_yield(cpu_env);
11944 break;
9ee6e8bb 11945 case DISAS_SWI:
73710361
GB
11946 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11947 default_exception_el(dc));
9ee6e8bb 11948 break;
37e6456e 11949 case DISAS_HVC:
73710361 11950 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11951 break;
11952 case DISAS_SMC:
73710361 11953 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 11954 break;
8aaca4c0 11955 }
e50e6a20
FB
11956 if (dc->condjmp) {
11957 gen_set_label(dc->condlabel);
9ee6e8bb 11958 gen_set_condexec(dc);
6e256c93 11959 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11960 dc->condjmp = 0;
11961 }
2c0262af 11962 }
2e70f6ef 11963
9ee6e8bb 11964done_generating:
806f352d 11965 gen_tb_end(tb, num_insns);
2c0262af
FB
11966
11967#ifdef DEBUG_DISAS
06486077
AB
11968 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
11969 qemu_log_in_addr_range(pc_start)) {
93fcfe39
AL
11970 qemu_log("----------------\n");
11971 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 11972 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 11973 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 11974 qemu_log("\n");
2c0262af
FB
11975 }
11976#endif
4e5e1215
RH
11977 tb->size = dc->pc - pc_start;
11978 tb->icount = num_insns;
2c0262af
FB
11979}
11980
b5ff1b31 11981static const char *cpu_mode_names[16] = {
28c9457d
EI
11982 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11983 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11984};
9ee6e8bb 11985
878096ee
AF
11986void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11987 int flags)
2c0262af 11988{
878096ee
AF
11989 ARMCPU *cpu = ARM_CPU(cs);
11990 CPUARMState *env = &cpu->env;
2c0262af 11991 int i;
b5ff1b31 11992 uint32_t psr;
06e5cf7a 11993 const char *ns_status;
2c0262af 11994
17731115
PM
11995 if (is_a64(env)) {
11996 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11997 return;
11998 }
11999
2c0262af 12000 for(i=0;i<16;i++) {
7fe48483 12001 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12002 if ((i % 4) == 3)
7fe48483 12003 cpu_fprintf(f, "\n");
2c0262af 12004 else
7fe48483 12005 cpu_fprintf(f, " ");
2c0262af 12006 }
b5ff1b31 12007 psr = cpsr_read(env);
06e5cf7a
PM
12008
12009 if (arm_feature(env, ARM_FEATURE_EL3) &&
12010 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12011 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12012 } else {
12013 ns_status = "";
12014 }
12015
12016 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12017 psr,
b5ff1b31
FB
12018 psr & (1 << 31) ? 'N' : '-',
12019 psr & (1 << 30) ? 'Z' : '-',
12020 psr & (1 << 29) ? 'C' : '-',
12021 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12022 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12023 ns_status,
b5ff1b31 12024 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12025
f2617cfc
PM
12026 if (flags & CPU_DUMP_FPU) {
12027 int numvfpregs = 0;
12028 if (arm_feature(env, ARM_FEATURE_VFP)) {
12029 numvfpregs += 16;
12030 }
12031 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12032 numvfpregs += 16;
12033 }
12034 for (i = 0; i < numvfpregs; i++) {
12035 uint64_t v = float64_val(env->vfp.regs[i]);
12036 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12037 i * 2, (uint32_t)v,
12038 i * 2 + 1, (uint32_t)(v >> 32),
12039 i, v);
12040 }
12041 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12042 }
2c0262af 12043}
a6b025d3 12044
bad729e2
RH
12045void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12046 target_ulong *data)
d2856f1a 12047{
3926cc84 12048 if (is_a64(env)) {
bad729e2 12049 env->pc = data[0];
40f860cd 12050 env->condexec_bits = 0;
3926cc84 12051 } else {
bad729e2
RH
12052 env->regs[15] = data[0];
12053 env->condexec_bits = data[1];
3926cc84 12054 }
d2856f1a 12055}