]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Correct misleading 'is_thumb' syn_* parameter names
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
57fec1fe 26#include "tcg-op.h"
1de7afc9 27#include "qemu/log.h"
534df156 28#include "qemu/bitops.h"
1d854765 29#include "arm_ldst.h"
1497c961 30
2ef6175a
RH
31#include "exec/helper-proto.h"
32#include "exec/helper-gen.h"
2c0262af 33
a7e30d84 34#include "trace-tcg.h"
508127e2 35#include "exec/log.h"
a7e30d84
LV
36
37
2b51668f
PM
38#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
39#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 40/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 41#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 42#define ENABLE_ARCH_5J 0
2b51668f
PM
43#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
44#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
45#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
46#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
47#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 48
86753403 49#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 50
f570c61e 51#include "translate.h"
e12ce78d 52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
3407ad0e 59TCGv_ptr cpu_env;
ad69471c 60/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 61static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 62static TCGv_i32 cpu_R[16];
78bcaa3e
RH
63TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
64TCGv_i64 cpu_exclusive_addr;
65TCGv_i64 cpu_exclusive_val;
426f5abc 66#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
67TCGv_i64 cpu_exclusive_test;
68TCGv_i32 cpu_exclusive_info;
426f5abc 69#endif
ad69471c 70
b26eefb6 71/* FIXME: These should be removed. */
39d5492a 72static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 73static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 74
022c62cb 75#include "exec/gen-icount.h"
2e70f6ef 76
155c3eac
FN
77static const char *regnames[] =
78 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
79 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
80
b26eefb6
PB
81/* initialize TCG globals. */
82void arm_translate_init(void)
83{
155c3eac
FN
84 int i;
85
a7812ae4
PB
86 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
87
155c3eac 88 for (i = 0; i < 16; i++) {
e1ccc054 89 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 90 offsetof(CPUARMState, regs[i]),
155c3eac
FN
91 regnames[i]);
92 }
e1ccc054
RH
93 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
94 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
95 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
96 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 97
e1ccc054 98 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 99 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 100 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 102#ifdef CONFIG_USER_ONLY
e1ccc054 103 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 104 offsetof(CPUARMState, exclusive_test), "exclusive_test");
e1ccc054 105 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 106 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 107#endif
155c3eac 108
14ade10f 109 a64_translate_init();
b26eefb6
PB
110}
111
579d21cc
PM
112static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
113{
114 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
115 * insns:
116 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
117 * otherwise, access as if at PL0.
118 */
119 switch (s->mmu_idx) {
120 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
121 case ARMMMUIdx_S12NSE0:
122 case ARMMMUIdx_S12NSE1:
123 return ARMMMUIdx_S12NSE0;
124 case ARMMMUIdx_S1E3:
125 case ARMMMUIdx_S1SE0:
126 case ARMMMUIdx_S1SE1:
127 return ARMMMUIdx_S1SE0;
128 case ARMMMUIdx_S2NS:
129 default:
130 g_assert_not_reached();
131 }
132}
133
39d5492a 134static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 135{
39d5492a 136 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
0ecb72a5 141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 142
39d5492a 143static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 146 tcg_temp_free_i32(var);
d9ba4830
PB
147}
148
149#define store_cpu_field(var, name) \
0ecb72a5 150 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 151
b26eefb6 152/* Set a variable to the value of a CPU register. */
39d5492a 153static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
154{
155 if (reg == 15) {
156 uint32_t addr;
b90372ad 157 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
155c3eac 164 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 169static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 170{
39d5492a 171 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
39d5492a 178static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
155c3eac 184 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 185 tcg_temp_free_i32(var);
b26eefb6
PB
186}
187
b26eefb6 188/* Value extensions. */
86831435
PB
189#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
190#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
191#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
192#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
193
1497c961
PB
194#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
195#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 196
b26eefb6 197
39d5492a 198static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 199{
39d5492a 200 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 201 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
202 tcg_temp_free_i32(tmp_mask);
203}
d9ba4830
PB
204/* Set NZCV flags from the high 4 bits of var. */
205#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
206
d4a2dc67 207static void gen_exception_internal(int excp)
d9ba4830 208{
d4a2dc67
PM
209 TCGv_i32 tcg_excp = tcg_const_i32(excp);
210
211 assert(excp_is_internal(excp));
212 gen_helper_exception_internal(cpu_env, tcg_excp);
213 tcg_temp_free_i32(tcg_excp);
214}
215
73710361 216static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
217{
218 TCGv_i32 tcg_excp = tcg_const_i32(excp);
219 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 220 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 221
73710361
GB
222 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
223 tcg_syn, tcg_el);
224
225 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
228}
229
50225ad0
PM
230static void gen_ss_advance(DisasContext *s)
231{
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
234 */
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
238 }
239}
240
241static void gen_step_complete_exception(DisasContext *s)
242{
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
251 */
252 gen_ss_advance(s);
73710361
GB
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
254 default_exception_el(s));
50225ad0
PM
255 s->is_jmp = DISAS_EXC;
256}
257
39d5492a 258static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 259{
39d5492a
PM
260 TCGv_i32 tmp1 = tcg_temp_new_i32();
261 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
262 tcg_gen_ext16s_i32(tmp1, a);
263 tcg_gen_ext16s_i32(tmp2, b);
3670669c 264 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 265 tcg_temp_free_i32(tmp2);
3670669c
PB
266 tcg_gen_sari_i32(a, a, 16);
267 tcg_gen_sari_i32(b, b, 16);
268 tcg_gen_mul_i32(b, b, a);
269 tcg_gen_mov_i32(a, tmp1);
7d1b0095 270 tcg_temp_free_i32(tmp1);
3670669c
PB
271}
272
273/* Byteswap each halfword. */
39d5492a 274static void gen_rev16(TCGv_i32 var)
3670669c 275{
39d5492a 276 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_andi_i32(var, var, 0xff00ff00);
281 tcg_gen_or_i32(var, var, tmp);
7d1b0095 282 tcg_temp_free_i32(tmp);
3670669c
PB
283}
284
285/* Byteswap low halfword and sign extend. */
39d5492a 286static void gen_revsh(TCGv_i32 var)
3670669c 287{
1a855029
AJ
288 tcg_gen_ext16u_i32(var, var);
289 tcg_gen_bswap16_i32(var, var);
290 tcg_gen_ext16s_i32(var, var);
3670669c
PB
291}
292
293/* Unsigned bitfield extract. */
39d5492a 294static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
295{
296 if (shift)
297 tcg_gen_shri_i32(var, var, shift);
298 tcg_gen_andi_i32(var, var, mask);
299}
300
301/* Signed bitfield extract. */
39d5492a 302static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
303{
304 uint32_t signbit;
305
306 if (shift)
307 tcg_gen_sari_i32(var, var, shift);
308 if (shift + width < 32) {
309 signbit = 1u << (width - 1);
310 tcg_gen_andi_i32(var, var, (1u << width) - 1);
311 tcg_gen_xori_i32(var, var, signbit);
312 tcg_gen_subi_i32(var, var, signbit);
313 }
314}
315
838fa72d 316/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 317static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 318{
838fa72d
AJ
319 TCGv_i64 tmp64 = tcg_temp_new_i64();
320
321 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 322 tcg_temp_free_i32(b);
838fa72d
AJ
323 tcg_gen_shli_i64(tmp64, tmp64, 32);
324 tcg_gen_add_i64(a, tmp64, a);
325
326 tcg_temp_free_i64(tmp64);
327 return a;
328}
329
330/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 331static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
332{
333 TCGv_i64 tmp64 = tcg_temp_new_i64();
334
335 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 336 tcg_temp_free_i32(b);
838fa72d
AJ
337 tcg_gen_shli_i64(tmp64, tmp64, 32);
338 tcg_gen_sub_i64(a, tmp64, a);
339
340 tcg_temp_free_i64(tmp64);
341 return a;
3670669c
PB
342}
343
5e3f878a 344/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 345static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 346{
39d5492a
PM
347 TCGv_i32 lo = tcg_temp_new_i32();
348 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 349 TCGv_i64 ret;
5e3f878a 350
831d7fe8 351 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 352 tcg_temp_free_i32(a);
7d1b0095 353 tcg_temp_free_i32(b);
831d7fe8
RH
354
355 ret = tcg_temp_new_i64();
356 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
357 tcg_temp_free_i32(lo);
358 tcg_temp_free_i32(hi);
831d7fe8
RH
359
360 return ret;
5e3f878a
PB
361}
362
39d5492a 363static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 364{
39d5492a
PM
365 TCGv_i32 lo = tcg_temp_new_i32();
366 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 367 TCGv_i64 ret;
5e3f878a 368
831d7fe8 369 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 370 tcg_temp_free_i32(a);
7d1b0095 371 tcg_temp_free_i32(b);
831d7fe8
RH
372
373 ret = tcg_temp_new_i64();
374 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
375 tcg_temp_free_i32(lo);
376 tcg_temp_free_i32(hi);
831d7fe8
RH
377
378 return ret;
5e3f878a
PB
379}
380
8f01245e 381/* Swap low and high halfwords. */
39d5492a 382static void gen_swap_half(TCGv_i32 var)
8f01245e 383{
39d5492a 384 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
7d1b0095 388 tcg_temp_free_i32(tmp);
8f01245e
PB
389}
390
b26eefb6
PB
391/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
396 */
397
39d5492a 398static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 399{
39d5492a 400 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
407 tcg_temp_free_i32(tmp);
408 tcg_temp_free_i32(t1);
b26eefb6
PB
409}
410
411/* Set CF to the top bit of var. */
39d5492a 412static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 413{
66c374de 414 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
415}
416
417/* Set N and Z flags from var. */
39d5492a 418static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 419{
66c374de
AJ
420 tcg_gen_mov_i32(cpu_NF, var);
421 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
422}
423
424/* T0 += T1 + CF. */
39d5492a 425static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 426{
396e467c 427 tcg_gen_add_i32(t0, t0, t1);
66c374de 428 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
429}
430
e9bb4aa9 431/* dest = T0 + T1 + CF. */
39d5492a 432static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 433{
e9bb4aa9 434 tcg_gen_add_i32(dest, t0, t1);
66c374de 435 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
436}
437
3670669c 438/* dest = T0 - T1 + CF - 1. */
39d5492a 439static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 440{
3670669c 441 tcg_gen_sub_i32(dest, t0, t1);
66c374de 442 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 443 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
444}
445
72485ec4 446/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 447static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 448{
39d5492a 449 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
450 tcg_gen_movi_i32(tmp, 0);
451 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 452 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 453 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
454 tcg_gen_xor_i32(tmp, t0, t1);
455 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
456 tcg_temp_free_i32(tmp);
457 tcg_gen_mov_i32(dest, cpu_NF);
458}
459
49b4c31e 460/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 461static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 462{
39d5492a 463 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
464 if (TCG_TARGET_HAS_add2_i32) {
465 tcg_gen_movi_i32(tmp, 0);
466 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 467 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
468 } else {
469 TCGv_i64 q0 = tcg_temp_new_i64();
470 TCGv_i64 q1 = tcg_temp_new_i64();
471 tcg_gen_extu_i32_i64(q0, t0);
472 tcg_gen_extu_i32_i64(q1, t1);
473 tcg_gen_add_i64(q0, q0, q1);
474 tcg_gen_extu_i32_i64(q1, cpu_CF);
475 tcg_gen_add_i64(q0, q0, q1);
476 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
477 tcg_temp_free_i64(q0);
478 tcg_temp_free_i64(q1);
479 }
480 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
481 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
482 tcg_gen_xor_i32(tmp, t0, t1);
483 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
484 tcg_temp_free_i32(tmp);
485 tcg_gen_mov_i32(dest, cpu_NF);
486}
487
72485ec4 488/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 489static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 490{
39d5492a 491 TCGv_i32 tmp;
72485ec4
AJ
492 tcg_gen_sub_i32(cpu_NF, t0, t1);
493 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
494 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
495 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
496 tmp = tcg_temp_new_i32();
497 tcg_gen_xor_i32(tmp, t0, t1);
498 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
499 tcg_temp_free_i32(tmp);
500 tcg_gen_mov_i32(dest, cpu_NF);
501}
502
e77f0832 503/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 504static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 505{
39d5492a 506 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
507 tcg_gen_not_i32(tmp, t1);
508 gen_adc_CC(dest, t0, tmp);
39d5492a 509 tcg_temp_free_i32(tmp);
2de68a49
RH
510}
511
365af80e 512#define GEN_SHIFT(name) \
39d5492a 513static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 514{ \
39d5492a 515 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
516 tmp1 = tcg_temp_new_i32(); \
517 tcg_gen_andi_i32(tmp1, t1, 0xff); \
518 tmp2 = tcg_const_i32(0); \
519 tmp3 = tcg_const_i32(0x1f); \
520 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
521 tcg_temp_free_i32(tmp3); \
522 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
523 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
524 tcg_temp_free_i32(tmp2); \
525 tcg_temp_free_i32(tmp1); \
526}
527GEN_SHIFT(shl)
528GEN_SHIFT(shr)
529#undef GEN_SHIFT
530
39d5492a 531static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 532{
39d5492a 533 TCGv_i32 tmp1, tmp2;
365af80e
AJ
534 tmp1 = tcg_temp_new_i32();
535 tcg_gen_andi_i32(tmp1, t1, 0xff);
536 tmp2 = tcg_const_i32(0x1f);
537 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
538 tcg_temp_free_i32(tmp2);
539 tcg_gen_sar_i32(dest, t0, tmp1);
540 tcg_temp_free_i32(tmp1);
541}
542
39d5492a 543static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 544{
39d5492a
PM
545 TCGv_i32 c0 = tcg_const_i32(0);
546 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
547 tcg_gen_neg_i32(tmp, src);
548 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
549 tcg_temp_free_i32(c0);
550 tcg_temp_free_i32(tmp);
551}
ad69471c 552
39d5492a 553static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 554{
9a119ff6 555 if (shift == 0) {
66c374de 556 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 557 } else {
66c374de
AJ
558 tcg_gen_shri_i32(cpu_CF, var, shift);
559 if (shift != 31) {
560 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
561 }
9a119ff6 562 }
9a119ff6 563}
b26eefb6 564
9a119ff6 565/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
566static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
567 int shift, int flags)
9a119ff6
PB
568{
569 switch (shiftop) {
570 case 0: /* LSL */
571 if (shift != 0) {
572 if (flags)
573 shifter_out_im(var, 32 - shift);
574 tcg_gen_shli_i32(var, var, shift);
575 }
576 break;
577 case 1: /* LSR */
578 if (shift == 0) {
579 if (flags) {
66c374de 580 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
581 }
582 tcg_gen_movi_i32(var, 0);
583 } else {
584 if (flags)
585 shifter_out_im(var, shift - 1);
586 tcg_gen_shri_i32(var, var, shift);
587 }
588 break;
589 case 2: /* ASR */
590 if (shift == 0)
591 shift = 32;
592 if (flags)
593 shifter_out_im(var, shift - 1);
594 if (shift == 32)
595 shift = 31;
596 tcg_gen_sari_i32(var, var, shift);
597 break;
598 case 3: /* ROR/RRX */
599 if (shift != 0) {
600 if (flags)
601 shifter_out_im(var, shift - 1);
f669df27 602 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 603 } else {
39d5492a 604 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 605 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
606 if (flags)
607 shifter_out_im(var, 0);
608 tcg_gen_shri_i32(var, var, 1);
b26eefb6 609 tcg_gen_or_i32(var, var, tmp);
7d1b0095 610 tcg_temp_free_i32(tmp);
b26eefb6
PB
611 }
612 }
613};
614
39d5492a
PM
615static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
616 TCGv_i32 shift, int flags)
8984bd2e
PB
617{
618 if (flags) {
619 switch (shiftop) {
9ef39277
BS
620 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
621 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
622 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
623 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
624 }
625 } else {
626 switch (shiftop) {
365af80e
AJ
627 case 0:
628 gen_shl(var, var, shift);
629 break;
630 case 1:
631 gen_shr(var, var, shift);
632 break;
633 case 2:
634 gen_sar(var, var, shift);
635 break;
f669df27
AJ
636 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
637 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
638 }
639 }
7d1b0095 640 tcg_temp_free_i32(shift);
8984bd2e
PB
641}
642
6ddbc6e4
PB
643#define PAS_OP(pfx) \
644 switch (op2) { \
645 case 0: gen_pas_helper(glue(pfx,add16)); break; \
646 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
647 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
648 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
649 case 4: gen_pas_helper(glue(pfx,add8)); break; \
650 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
651 }
39d5492a 652static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 653{
a7812ae4 654 TCGv_ptr tmp;
6ddbc6e4
PB
655
656 switch (op1) {
657#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
658 case 1:
a7812ae4 659 tmp = tcg_temp_new_ptr();
0ecb72a5 660 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 661 PAS_OP(s)
b75263d6 662 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
663 break;
664 case 5:
a7812ae4 665 tmp = tcg_temp_new_ptr();
0ecb72a5 666 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 667 PAS_OP(u)
b75263d6 668 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
669 break;
670#undef gen_pas_helper
671#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
672 case 2:
673 PAS_OP(q);
674 break;
675 case 3:
676 PAS_OP(sh);
677 break;
678 case 6:
679 PAS_OP(uq);
680 break;
681 case 7:
682 PAS_OP(uh);
683 break;
684#undef gen_pas_helper
685 }
686}
9ee6e8bb
PB
687#undef PAS_OP
688
6ddbc6e4
PB
689/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
690#define PAS_OP(pfx) \
ed89a2f1 691 switch (op1) { \
6ddbc6e4
PB
692 case 0: gen_pas_helper(glue(pfx,add8)); break; \
693 case 1: gen_pas_helper(glue(pfx,add16)); break; \
694 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
695 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
696 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
697 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
698 }
39d5492a 699static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 700{
a7812ae4 701 TCGv_ptr tmp;
6ddbc6e4 702
ed89a2f1 703 switch (op2) {
6ddbc6e4
PB
704#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
705 case 0:
a7812ae4 706 tmp = tcg_temp_new_ptr();
0ecb72a5 707 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 708 PAS_OP(s)
b75263d6 709 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
710 break;
711 case 4:
a7812ae4 712 tmp = tcg_temp_new_ptr();
0ecb72a5 713 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 714 PAS_OP(u)
b75263d6 715 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
716 break;
717#undef gen_pas_helper
718#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
719 case 1:
720 PAS_OP(q);
721 break;
722 case 2:
723 PAS_OP(sh);
724 break;
725 case 5:
726 PAS_OP(uq);
727 break;
728 case 6:
729 PAS_OP(uh);
730 break;
731#undef gen_pas_helper
732 }
733}
9ee6e8bb
PB
734#undef PAS_OP
735
39fb730a 736/*
6c2c63d3 737 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
738 * This is common between ARM and Aarch64 targets.
739 */
6c2c63d3 740void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 741{
6c2c63d3
RH
742 TCGv_i32 value;
743 TCGCond cond;
744 bool global = true;
d9ba4830 745
d9ba4830
PB
746 switch (cc) {
747 case 0: /* eq: Z */
d9ba4830 748 case 1: /* ne: !Z */
6c2c63d3
RH
749 cond = TCG_COND_EQ;
750 value = cpu_ZF;
d9ba4830 751 break;
6c2c63d3 752
d9ba4830 753 case 2: /* cs: C */
d9ba4830 754 case 3: /* cc: !C */
6c2c63d3
RH
755 cond = TCG_COND_NE;
756 value = cpu_CF;
d9ba4830 757 break;
6c2c63d3 758
d9ba4830 759 case 4: /* mi: N */
d9ba4830 760 case 5: /* pl: !N */
6c2c63d3
RH
761 cond = TCG_COND_LT;
762 value = cpu_NF;
d9ba4830 763 break;
6c2c63d3 764
d9ba4830 765 case 6: /* vs: V */
d9ba4830 766 case 7: /* vc: !V */
6c2c63d3
RH
767 cond = TCG_COND_LT;
768 value = cpu_VF;
d9ba4830 769 break;
6c2c63d3 770
d9ba4830 771 case 8: /* hi: C && !Z */
6c2c63d3
RH
772 case 9: /* ls: !C || Z -> !(C && !Z) */
773 cond = TCG_COND_NE;
774 value = tcg_temp_new_i32();
775 global = false;
776 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
777 ZF is non-zero for !Z; so AND the two subexpressions. */
778 tcg_gen_neg_i32(value, cpu_CF);
779 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 780 break;
6c2c63d3 781
d9ba4830 782 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 783 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
784 /* Since we're only interested in the sign bit, == 0 is >= 0. */
785 cond = TCG_COND_GE;
786 value = tcg_temp_new_i32();
787 global = false;
788 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 789 break;
6c2c63d3 790
d9ba4830 791 case 12: /* gt: !Z && N == V */
d9ba4830 792 case 13: /* le: Z || N != V */
6c2c63d3
RH
793 cond = TCG_COND_NE;
794 value = tcg_temp_new_i32();
795 global = false;
796 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
797 * the sign bit then AND with ZF to yield the result. */
798 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
799 tcg_gen_sari_i32(value, value, 31);
800 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 801 break;
6c2c63d3 802
9305eac0
RH
803 case 14: /* always */
804 case 15: /* always */
805 /* Use the ALWAYS condition, which will fold early.
806 * It doesn't matter what we use for the value. */
807 cond = TCG_COND_ALWAYS;
808 value = cpu_ZF;
809 goto no_invert;
810
d9ba4830
PB
811 default:
812 fprintf(stderr, "Bad condition code 0x%x\n", cc);
813 abort();
814 }
6c2c63d3
RH
815
816 if (cc & 1) {
817 cond = tcg_invert_cond(cond);
818 }
819
9305eac0 820 no_invert:
6c2c63d3
RH
821 cmp->cond = cond;
822 cmp->value = value;
823 cmp->value_global = global;
824}
825
826void arm_free_cc(DisasCompare *cmp)
827{
828 if (!cmp->value_global) {
829 tcg_temp_free_i32(cmp->value);
830 }
831}
832
833void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
834{
835 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
836}
837
838void arm_gen_test_cc(int cc, TCGLabel *label)
839{
840 DisasCompare cmp;
841 arm_test_cc(&cmp, cc);
842 arm_jump_cc(&cmp, label);
843 arm_free_cc(&cmp);
d9ba4830 844}
2c0262af 845
b1d8e52e 846static const uint8_t table_logic_cc[16] = {
2c0262af
FB
847 1, /* and */
848 1, /* xor */
849 0, /* sub */
850 0, /* rsb */
851 0, /* add */
852 0, /* adc */
853 0, /* sbc */
854 0, /* rsc */
855 1, /* andl */
856 1, /* xorl */
857 0, /* cmp */
858 0, /* cmn */
859 1, /* orr */
860 1, /* mov */
861 1, /* bic */
862 1, /* mvn */
863};
3b46e624 864
d9ba4830
PB
865/* Set PC and Thumb state from an immediate address. */
866static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 867{
39d5492a 868 TCGv_i32 tmp;
99c475ab 869
577bf808 870 s->is_jmp = DISAS_JUMP;
d9ba4830 871 if (s->thumb != (addr & 1)) {
7d1b0095 872 tmp = tcg_temp_new_i32();
d9ba4830 873 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 874 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 875 tcg_temp_free_i32(tmp);
d9ba4830 876 }
155c3eac 877 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
878}
879
880/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 881static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 882{
577bf808 883 s->is_jmp = DISAS_JUMP;
155c3eac
FN
884 tcg_gen_andi_i32(cpu_R[15], var, ~1);
885 tcg_gen_andi_i32(var, var, 1);
886 store_cpu_field(var, thumb);
d9ba4830
PB
887}
888
21aeb343
JR
889/* Variant of store_reg which uses branch&exchange logic when storing
890 to r15 in ARM architecture v7 and above. The source must be a temporary
891 and will be marked as dead. */
7dcc1f89 892static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
893{
894 if (reg == 15 && ENABLE_ARCH_7) {
895 gen_bx(s, var);
896 } else {
897 store_reg(s, reg, var);
898 }
899}
900
be5e7a76
DES
901/* Variant of store_reg which uses branch&exchange logic when storing
902 * to r15 in ARM architecture v5T and above. This is used for storing
903 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
904 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 905static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
906{
907 if (reg == 15 && ENABLE_ARCH_5) {
908 gen_bx(s, var);
909 } else {
910 store_reg(s, reg, var);
911 }
912}
913
08307563
PM
914/* Abstractions of "generate code to do a guest load/store for
915 * AArch32", where a vaddr is always 32 bits (and is zero
916 * extended if we're a 64 bit core) and data is also
917 * 32 bits unless specifically doing a 64 bit access.
918 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 919 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
920 */
921#if TARGET_LONG_BITS == 32
922
09f78135
RH
923#define DO_GEN_LD(SUFF, OPC) \
924static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 925{ \
30901475 926 tcg_gen_qemu_ld_i32(val, addr, index, (OPC)); \
08307563
PM
927}
928
09f78135
RH
929#define DO_GEN_ST(SUFF, OPC) \
930static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 931{ \
30901475 932 tcg_gen_qemu_st_i32(val, addr, index, (OPC)); \
08307563
PM
933}
934
935static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
936{
09f78135 937 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
938}
939
940static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
941{
09f78135 942 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
943}
944
945#else
946
09f78135
RH
947#define DO_GEN_LD(SUFF, OPC) \
948static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
949{ \
950 TCGv addr64 = tcg_temp_new(); \
08307563 951 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 952 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 953 tcg_temp_free(addr64); \
08307563
PM
954}
955
09f78135
RH
956#define DO_GEN_ST(SUFF, OPC) \
957static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
958{ \
959 TCGv addr64 = tcg_temp_new(); \
08307563 960 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 961 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 962 tcg_temp_free(addr64); \
08307563
PM
963}
964
965static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
966{
967 TCGv addr64 = tcg_temp_new();
968 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 969 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
970 tcg_temp_free(addr64);
971}
972
973static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
974{
975 TCGv addr64 = tcg_temp_new();
976 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 977 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
978 tcg_temp_free(addr64);
979}
980
981#endif
982
09f78135
RH
983DO_GEN_LD(8s, MO_SB)
984DO_GEN_LD(8u, MO_UB)
985DO_GEN_LD(16s, MO_TESW)
986DO_GEN_LD(16u, MO_TEUW)
987DO_GEN_LD(32u, MO_TEUL)
30901475
AB
988/* 'a' variants include an alignment check */
989DO_GEN_LD(16ua, MO_TEUW | MO_ALIGN)
990DO_GEN_LD(32ua, MO_TEUL | MO_ALIGN)
09f78135
RH
991DO_GEN_ST(8, MO_UB)
992DO_GEN_ST(16, MO_TEUW)
993DO_GEN_ST(32, MO_TEUL)
08307563 994
eaed129d 995static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 996{
40f860cd 997 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
998}
999
37e6456e
PM
1000static inline void gen_hvc(DisasContext *s, int imm16)
1001{
1002 /* The pre HVC helper handles cases when HVC gets trapped
1003 * as an undefined insn by runtime configuration (ie before
1004 * the insn really executes).
1005 */
1006 gen_set_pc_im(s, s->pc - 4);
1007 gen_helper_pre_hvc(cpu_env);
1008 /* Otherwise we will treat this as a real exception which
1009 * happens after execution of the insn. (The distinction matters
1010 * for the PC value reported to the exception handler and also
1011 * for single stepping.)
1012 */
1013 s->svc_imm = imm16;
1014 gen_set_pc_im(s, s->pc);
1015 s->is_jmp = DISAS_HVC;
1016}
1017
1018static inline void gen_smc(DisasContext *s)
1019{
1020 /* As with HVC, we may take an exception either before or after
1021 * the insn executes.
1022 */
1023 TCGv_i32 tmp;
1024
1025 gen_set_pc_im(s, s->pc - 4);
1026 tmp = tcg_const_i32(syn_aa32_smc());
1027 gen_helper_pre_smc(cpu_env, tmp);
1028 tcg_temp_free_i32(tmp);
1029 gen_set_pc_im(s, s->pc);
1030 s->is_jmp = DISAS_SMC;
1031}
1032
d4a2dc67
PM
1033static inline void
1034gen_set_condexec (DisasContext *s)
1035{
1036 if (s->condexec_mask) {
1037 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1038 TCGv_i32 tmp = tcg_temp_new_i32();
1039 tcg_gen_movi_i32(tmp, val);
1040 store_cpu_field(tmp, condexec_bits);
1041 }
1042}
1043
1044static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1045{
1046 gen_set_condexec(s);
1047 gen_set_pc_im(s, s->pc - offset);
1048 gen_exception_internal(excp);
1049 s->is_jmp = DISAS_JUMP;
1050}
1051
73710361
GB
1052static void gen_exception_insn(DisasContext *s, int offset, int excp,
1053 int syn, uint32_t target_el)
d4a2dc67
PM
1054{
1055 gen_set_condexec(s);
1056 gen_set_pc_im(s, s->pc - offset);
73710361 1057 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1058 s->is_jmp = DISAS_JUMP;
1059}
1060
b5ff1b31
FB
1061/* Force a TB lookup after an instruction that changes the CPU state. */
1062static inline void gen_lookup_tb(DisasContext *s)
1063{
a6445c52 1064 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1065 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1066}
1067
b0109805 1068static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1069 TCGv_i32 var)
2c0262af 1070{
1e8d4eec 1071 int val, rm, shift, shiftop;
39d5492a 1072 TCGv_i32 offset;
2c0262af
FB
1073
1074 if (!(insn & (1 << 25))) {
1075 /* immediate */
1076 val = insn & 0xfff;
1077 if (!(insn & (1 << 23)))
1078 val = -val;
537730b9 1079 if (val != 0)
b0109805 1080 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1081 } else {
1082 /* shift/register */
1083 rm = (insn) & 0xf;
1084 shift = (insn >> 7) & 0x1f;
1e8d4eec 1085 shiftop = (insn >> 5) & 3;
b26eefb6 1086 offset = load_reg(s, rm);
9a119ff6 1087 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1088 if (!(insn & (1 << 23)))
b0109805 1089 tcg_gen_sub_i32(var, var, offset);
2c0262af 1090 else
b0109805 1091 tcg_gen_add_i32(var, var, offset);
7d1b0095 1092 tcg_temp_free_i32(offset);
2c0262af
FB
1093 }
1094}
1095
191f9a93 1096static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1097 int extra, TCGv_i32 var)
2c0262af
FB
1098{
1099 int val, rm;
39d5492a 1100 TCGv_i32 offset;
3b46e624 1101
2c0262af
FB
1102 if (insn & (1 << 22)) {
1103 /* immediate */
1104 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1105 if (!(insn & (1 << 23)))
1106 val = -val;
18acad92 1107 val += extra;
537730b9 1108 if (val != 0)
b0109805 1109 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1110 } else {
1111 /* register */
191f9a93 1112 if (extra)
b0109805 1113 tcg_gen_addi_i32(var, var, extra);
2c0262af 1114 rm = (insn) & 0xf;
b26eefb6 1115 offset = load_reg(s, rm);
2c0262af 1116 if (!(insn & (1 << 23)))
b0109805 1117 tcg_gen_sub_i32(var, var, offset);
2c0262af 1118 else
b0109805 1119 tcg_gen_add_i32(var, var, offset);
7d1b0095 1120 tcg_temp_free_i32(offset);
2c0262af
FB
1121 }
1122}
1123
5aaebd13
PM
1124static TCGv_ptr get_fpstatus_ptr(int neon)
1125{
1126 TCGv_ptr statusptr = tcg_temp_new_ptr();
1127 int offset;
1128 if (neon) {
0ecb72a5 1129 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1130 } else {
0ecb72a5 1131 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1132 }
1133 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1134 return statusptr;
1135}
1136
4373f3ce
PB
1137#define VFP_OP2(name) \
1138static inline void gen_vfp_##name(int dp) \
1139{ \
ae1857ec
PM
1140 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1141 if (dp) { \
1142 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1143 } else { \
1144 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1145 } \
1146 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1147}
1148
4373f3ce
PB
1149VFP_OP2(add)
1150VFP_OP2(sub)
1151VFP_OP2(mul)
1152VFP_OP2(div)
1153
1154#undef VFP_OP2
1155
605a6aed
PM
1156static inline void gen_vfp_F1_mul(int dp)
1157{
1158 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1159 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1160 if (dp) {
ae1857ec 1161 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1162 } else {
ae1857ec 1163 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1164 }
ae1857ec 1165 tcg_temp_free_ptr(fpst);
605a6aed
PM
1166}
1167
1168static inline void gen_vfp_F1_neg(int dp)
1169{
1170 /* Like gen_vfp_neg() but put result in F1 */
1171 if (dp) {
1172 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1173 } else {
1174 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1175 }
1176}
1177
4373f3ce
PB
1178static inline void gen_vfp_abs(int dp)
1179{
1180 if (dp)
1181 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1182 else
1183 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1184}
1185
1186static inline void gen_vfp_neg(int dp)
1187{
1188 if (dp)
1189 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1190 else
1191 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1192}
1193
1194static inline void gen_vfp_sqrt(int dp)
1195{
1196 if (dp)
1197 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1198 else
1199 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1200}
1201
1202static inline void gen_vfp_cmp(int dp)
1203{
1204 if (dp)
1205 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1206 else
1207 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1208}
1209
1210static inline void gen_vfp_cmpe(int dp)
1211{
1212 if (dp)
1213 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1214 else
1215 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1216}
1217
1218static inline void gen_vfp_F1_ld0(int dp)
1219{
1220 if (dp)
5b340b51 1221 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1222 else
5b340b51 1223 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1224}
1225
5500b06c
PM
1226#define VFP_GEN_ITOF(name) \
1227static inline void gen_vfp_##name(int dp, int neon) \
1228{ \
5aaebd13 1229 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1230 if (dp) { \
1231 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1232 } else { \
1233 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1234 } \
b7fa9214 1235 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1236}
1237
5500b06c
PM
1238VFP_GEN_ITOF(uito)
1239VFP_GEN_ITOF(sito)
1240#undef VFP_GEN_ITOF
4373f3ce 1241
5500b06c
PM
1242#define VFP_GEN_FTOI(name) \
1243static inline void gen_vfp_##name(int dp, int neon) \
1244{ \
5aaebd13 1245 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1246 if (dp) { \
1247 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1248 } else { \
1249 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1250 } \
b7fa9214 1251 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1252}
1253
5500b06c
PM
1254VFP_GEN_FTOI(toui)
1255VFP_GEN_FTOI(touiz)
1256VFP_GEN_FTOI(tosi)
1257VFP_GEN_FTOI(tosiz)
1258#undef VFP_GEN_FTOI
4373f3ce 1259
16d5b3ca 1260#define VFP_GEN_FIX(name, round) \
5500b06c 1261static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1262{ \
39d5492a 1263 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1264 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1265 if (dp) { \
16d5b3ca
WN
1266 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1267 statusptr); \
5500b06c 1268 } else { \
16d5b3ca
WN
1269 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1270 statusptr); \
5500b06c 1271 } \
b75263d6 1272 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1273 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1274}
16d5b3ca
WN
1275VFP_GEN_FIX(tosh, _round_to_zero)
1276VFP_GEN_FIX(tosl, _round_to_zero)
1277VFP_GEN_FIX(touh, _round_to_zero)
1278VFP_GEN_FIX(toul, _round_to_zero)
1279VFP_GEN_FIX(shto, )
1280VFP_GEN_FIX(slto, )
1281VFP_GEN_FIX(uhto, )
1282VFP_GEN_FIX(ulto, )
4373f3ce 1283#undef VFP_GEN_FIX
9ee6e8bb 1284
39d5492a 1285static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1286{
08307563 1287 if (dp) {
6ce2faf4 1288 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1289 } else {
6ce2faf4 1290 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1291 }
b5ff1b31
FB
1292}
1293
39d5492a 1294static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1295{
08307563 1296 if (dp) {
6ce2faf4 1297 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1298 } else {
6ce2faf4 1299 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1300 }
b5ff1b31
FB
1301}
1302
8e96005d
FB
1303static inline long
1304vfp_reg_offset (int dp, int reg)
1305{
1306 if (dp)
1307 return offsetof(CPUARMState, vfp.regs[reg]);
1308 else if (reg & 1) {
1309 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1310 + offsetof(CPU_DoubleU, l.upper);
1311 } else {
1312 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1313 + offsetof(CPU_DoubleU, l.lower);
1314 }
1315}
9ee6e8bb
PB
1316
1317/* Return the offset of a 32-bit piece of a NEON register.
1318 zero is the least significant end of the register. */
1319static inline long
1320neon_reg_offset (int reg, int n)
1321{
1322 int sreg;
1323 sreg = reg * 2 + n;
1324 return vfp_reg_offset(0, sreg);
1325}
1326
39d5492a 1327static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1328{
39d5492a 1329 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1330 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1331 return tmp;
1332}
1333
39d5492a 1334static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1335{
1336 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1337 tcg_temp_free_i32(var);
8f8e3aa4
PB
1338}
1339
a7812ae4 1340static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1341{
1342 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1343}
1344
a7812ae4 1345static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1346{
1347 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1348}
1349
4373f3ce
PB
1350#define tcg_gen_ld_f32 tcg_gen_ld_i32
1351#define tcg_gen_ld_f64 tcg_gen_ld_i64
1352#define tcg_gen_st_f32 tcg_gen_st_i32
1353#define tcg_gen_st_f64 tcg_gen_st_i64
1354
b7bcbe95
FB
1355static inline void gen_mov_F0_vreg(int dp, int reg)
1356{
1357 if (dp)
4373f3ce 1358 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1359 else
4373f3ce 1360 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1361}
1362
1363static inline void gen_mov_F1_vreg(int dp, int reg)
1364{
1365 if (dp)
4373f3ce 1366 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1367 else
4373f3ce 1368 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1369}
1370
1371static inline void gen_mov_vreg_F0(int dp, int reg)
1372{
1373 if (dp)
4373f3ce 1374 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1375 else
4373f3ce 1376 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1377}
1378
18c9b560
AZ
1379#define ARM_CP_RW_BIT (1 << 20)
1380
a7812ae4 1381static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1382{
0ecb72a5 1383 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1384}
1385
a7812ae4 1386static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1387{
0ecb72a5 1388 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1389}
1390
39d5492a 1391static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1392{
39d5492a 1393 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1394 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1395 return var;
e677137d
PB
1396}
1397
39d5492a 1398static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1399{
0ecb72a5 1400 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1401 tcg_temp_free_i32(var);
e677137d
PB
1402}
1403
1404static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1405{
1406 iwmmxt_store_reg(cpu_M0, rn);
1407}
1408
1409static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1410{
1411 iwmmxt_load_reg(cpu_M0, rn);
1412}
1413
1414static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1415{
1416 iwmmxt_load_reg(cpu_V1, rn);
1417 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1418}
1419
1420static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1421{
1422 iwmmxt_load_reg(cpu_V1, rn);
1423 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1424}
1425
1426static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1427{
1428 iwmmxt_load_reg(cpu_V1, rn);
1429 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1430}
1431
1432#define IWMMXT_OP(name) \
1433static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1434{ \
1435 iwmmxt_load_reg(cpu_V1, rn); \
1436 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1437}
1438
477955bd
PM
1439#define IWMMXT_OP_ENV(name) \
1440static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1441{ \
1442 iwmmxt_load_reg(cpu_V1, rn); \
1443 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1444}
1445
1446#define IWMMXT_OP_ENV_SIZE(name) \
1447IWMMXT_OP_ENV(name##b) \
1448IWMMXT_OP_ENV(name##w) \
1449IWMMXT_OP_ENV(name##l)
e677137d 1450
477955bd 1451#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1452static inline void gen_op_iwmmxt_##name##_M0(void) \
1453{ \
477955bd 1454 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1455}
1456
1457IWMMXT_OP(maddsq)
1458IWMMXT_OP(madduq)
1459IWMMXT_OP(sadb)
1460IWMMXT_OP(sadw)
1461IWMMXT_OP(mulslw)
1462IWMMXT_OP(mulshw)
1463IWMMXT_OP(mululw)
1464IWMMXT_OP(muluhw)
1465IWMMXT_OP(macsw)
1466IWMMXT_OP(macuw)
1467
477955bd
PM
1468IWMMXT_OP_ENV_SIZE(unpackl)
1469IWMMXT_OP_ENV_SIZE(unpackh)
1470
1471IWMMXT_OP_ENV1(unpacklub)
1472IWMMXT_OP_ENV1(unpackluw)
1473IWMMXT_OP_ENV1(unpacklul)
1474IWMMXT_OP_ENV1(unpackhub)
1475IWMMXT_OP_ENV1(unpackhuw)
1476IWMMXT_OP_ENV1(unpackhul)
1477IWMMXT_OP_ENV1(unpacklsb)
1478IWMMXT_OP_ENV1(unpacklsw)
1479IWMMXT_OP_ENV1(unpacklsl)
1480IWMMXT_OP_ENV1(unpackhsb)
1481IWMMXT_OP_ENV1(unpackhsw)
1482IWMMXT_OP_ENV1(unpackhsl)
1483
1484IWMMXT_OP_ENV_SIZE(cmpeq)
1485IWMMXT_OP_ENV_SIZE(cmpgtu)
1486IWMMXT_OP_ENV_SIZE(cmpgts)
1487
1488IWMMXT_OP_ENV_SIZE(mins)
1489IWMMXT_OP_ENV_SIZE(minu)
1490IWMMXT_OP_ENV_SIZE(maxs)
1491IWMMXT_OP_ENV_SIZE(maxu)
1492
1493IWMMXT_OP_ENV_SIZE(subn)
1494IWMMXT_OP_ENV_SIZE(addn)
1495IWMMXT_OP_ENV_SIZE(subu)
1496IWMMXT_OP_ENV_SIZE(addu)
1497IWMMXT_OP_ENV_SIZE(subs)
1498IWMMXT_OP_ENV_SIZE(adds)
1499
1500IWMMXT_OP_ENV(avgb0)
1501IWMMXT_OP_ENV(avgb1)
1502IWMMXT_OP_ENV(avgw0)
1503IWMMXT_OP_ENV(avgw1)
e677137d 1504
477955bd
PM
1505IWMMXT_OP_ENV(packuw)
1506IWMMXT_OP_ENV(packul)
1507IWMMXT_OP_ENV(packuq)
1508IWMMXT_OP_ENV(packsw)
1509IWMMXT_OP_ENV(packsl)
1510IWMMXT_OP_ENV(packsq)
e677137d 1511
e677137d
PB
1512static void gen_op_iwmmxt_set_mup(void)
1513{
39d5492a 1514 TCGv_i32 tmp;
e677137d
PB
1515 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1516 tcg_gen_ori_i32(tmp, tmp, 2);
1517 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1518}
1519
1520static void gen_op_iwmmxt_set_cup(void)
1521{
39d5492a 1522 TCGv_i32 tmp;
e677137d
PB
1523 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1524 tcg_gen_ori_i32(tmp, tmp, 1);
1525 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1526}
1527
1528static void gen_op_iwmmxt_setpsr_nz(void)
1529{
39d5492a 1530 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1531 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1532 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1533}
1534
1535static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1536{
1537 iwmmxt_load_reg(cpu_V1, rn);
86831435 1538 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1539 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1540}
1541
39d5492a
PM
1542static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1543 TCGv_i32 dest)
18c9b560
AZ
1544{
1545 int rd;
1546 uint32_t offset;
39d5492a 1547 TCGv_i32 tmp;
18c9b560
AZ
1548
1549 rd = (insn >> 16) & 0xf;
da6b5335 1550 tmp = load_reg(s, rd);
18c9b560
AZ
1551
1552 offset = (insn & 0xff) << ((insn >> 7) & 2);
1553 if (insn & (1 << 24)) {
1554 /* Pre indexed */
1555 if (insn & (1 << 23))
da6b5335 1556 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1557 else
da6b5335
FN
1558 tcg_gen_addi_i32(tmp, tmp, -offset);
1559 tcg_gen_mov_i32(dest, tmp);
18c9b560 1560 if (insn & (1 << 21))
da6b5335
FN
1561 store_reg(s, rd, tmp);
1562 else
7d1b0095 1563 tcg_temp_free_i32(tmp);
18c9b560
AZ
1564 } else if (insn & (1 << 21)) {
1565 /* Post indexed */
da6b5335 1566 tcg_gen_mov_i32(dest, tmp);
18c9b560 1567 if (insn & (1 << 23))
da6b5335 1568 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1569 else
da6b5335
FN
1570 tcg_gen_addi_i32(tmp, tmp, -offset);
1571 store_reg(s, rd, tmp);
18c9b560
AZ
1572 } else if (!(insn & (1 << 23)))
1573 return 1;
1574 return 0;
1575}
1576
39d5492a 1577static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1578{
1579 int rd = (insn >> 0) & 0xf;
39d5492a 1580 TCGv_i32 tmp;
18c9b560 1581
da6b5335
FN
1582 if (insn & (1 << 8)) {
1583 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1584 return 1;
da6b5335
FN
1585 } else {
1586 tmp = iwmmxt_load_creg(rd);
1587 }
1588 } else {
7d1b0095 1589 tmp = tcg_temp_new_i32();
da6b5335 1590 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1591 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1592 }
1593 tcg_gen_andi_i32(tmp, tmp, mask);
1594 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1595 tcg_temp_free_i32(tmp);
18c9b560
AZ
1596 return 0;
1597}
1598
a1c7273b 1599/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1600 (ie. an undefined instruction). */
7dcc1f89 1601static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1602{
1603 int rd, wrd;
1604 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1605 TCGv_i32 addr;
1606 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1607
1608 if ((insn & 0x0e000e00) == 0x0c000000) {
1609 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1610 wrd = insn & 0xf;
1611 rdlo = (insn >> 12) & 0xf;
1612 rdhi = (insn >> 16) & 0xf;
1613 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1614 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1615 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1616 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1617 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1618 } else { /* TMCRR */
da6b5335
FN
1619 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1620 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1621 gen_op_iwmmxt_set_mup();
1622 }
1623 return 0;
1624 }
1625
1626 wrd = (insn >> 12) & 0xf;
7d1b0095 1627 addr = tcg_temp_new_i32();
da6b5335 1628 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1629 tcg_temp_free_i32(addr);
18c9b560 1630 return 1;
da6b5335 1631 }
18c9b560
AZ
1632 if (insn & ARM_CP_RW_BIT) {
1633 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1634 tmp = tcg_temp_new_i32();
6ce2faf4 1635 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1636 iwmmxt_store_creg(wrd, tmp);
18c9b560 1637 } else {
e677137d
PB
1638 i = 1;
1639 if (insn & (1 << 8)) {
1640 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1641 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1642 i = 0;
1643 } else { /* WLDRW wRd */
29531141 1644 tmp = tcg_temp_new_i32();
6ce2faf4 1645 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1646 }
1647 } else {
29531141 1648 tmp = tcg_temp_new_i32();
e677137d 1649 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1650 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1651 } else { /* WLDRB */
6ce2faf4 1652 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1653 }
1654 }
1655 if (i) {
1656 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1657 tcg_temp_free_i32(tmp);
e677137d 1658 }
18c9b560
AZ
1659 gen_op_iwmmxt_movq_wRn_M0(wrd);
1660 }
1661 } else {
1662 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1663 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1664 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1665 } else {
1666 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1667 tmp = tcg_temp_new_i32();
e677137d
PB
1668 if (insn & (1 << 8)) {
1669 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1670 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d 1671 } else { /* WSTRW wRd */
ecc7b3aa 1672 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
6ce2faf4 1673 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1674 }
1675 } else {
1676 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1677 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
6ce2faf4 1678 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d 1679 } else { /* WSTRB */
ecc7b3aa 1680 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
6ce2faf4 1681 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1682 }
1683 }
18c9b560 1684 }
29531141 1685 tcg_temp_free_i32(tmp);
18c9b560 1686 }
7d1b0095 1687 tcg_temp_free_i32(addr);
18c9b560
AZ
1688 return 0;
1689 }
1690
1691 if ((insn & 0x0f000000) != 0x0e000000)
1692 return 1;
1693
1694 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1695 case 0x000: /* WOR */
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 0) & 0xf;
1698 rd1 = (insn >> 16) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0);
1700 gen_op_iwmmxt_orq_M0_wRn(rd1);
1701 gen_op_iwmmxt_setpsr_nz();
1702 gen_op_iwmmxt_movq_wRn_M0(wrd);
1703 gen_op_iwmmxt_set_mup();
1704 gen_op_iwmmxt_set_cup();
1705 break;
1706 case 0x011: /* TMCR */
1707 if (insn & 0xf)
1708 return 1;
1709 rd = (insn >> 12) & 0xf;
1710 wrd = (insn >> 16) & 0xf;
1711 switch (wrd) {
1712 case ARM_IWMMXT_wCID:
1713 case ARM_IWMMXT_wCASF:
1714 break;
1715 case ARM_IWMMXT_wCon:
1716 gen_op_iwmmxt_set_cup();
1717 /* Fall through. */
1718 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1719 tmp = iwmmxt_load_creg(wrd);
1720 tmp2 = load_reg(s, rd);
f669df27 1721 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1722 tcg_temp_free_i32(tmp2);
da6b5335 1723 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1724 break;
1725 case ARM_IWMMXT_wCGR0:
1726 case ARM_IWMMXT_wCGR1:
1727 case ARM_IWMMXT_wCGR2:
1728 case ARM_IWMMXT_wCGR3:
1729 gen_op_iwmmxt_set_cup();
da6b5335
FN
1730 tmp = load_reg(s, rd);
1731 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1732 break;
1733 default:
1734 return 1;
1735 }
1736 break;
1737 case 0x100: /* WXOR */
1738 wrd = (insn >> 12) & 0xf;
1739 rd0 = (insn >> 0) & 0xf;
1740 rd1 = (insn >> 16) & 0xf;
1741 gen_op_iwmmxt_movq_M0_wRn(rd0);
1742 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1743 gen_op_iwmmxt_setpsr_nz();
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1746 gen_op_iwmmxt_set_cup();
1747 break;
1748 case 0x111: /* TMRC */
1749 if (insn & 0xf)
1750 return 1;
1751 rd = (insn >> 12) & 0xf;
1752 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1753 tmp = iwmmxt_load_creg(wrd);
1754 store_reg(s, rd, tmp);
18c9b560
AZ
1755 break;
1756 case 0x300: /* WANDN */
1757 wrd = (insn >> 12) & 0xf;
1758 rd0 = (insn >> 0) & 0xf;
1759 rd1 = (insn >> 16) & 0xf;
1760 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1761 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1762 gen_op_iwmmxt_andq_M0_wRn(rd1);
1763 gen_op_iwmmxt_setpsr_nz();
1764 gen_op_iwmmxt_movq_wRn_M0(wrd);
1765 gen_op_iwmmxt_set_mup();
1766 gen_op_iwmmxt_set_cup();
1767 break;
1768 case 0x200: /* WAND */
1769 wrd = (insn >> 12) & 0xf;
1770 rd0 = (insn >> 0) & 0xf;
1771 rd1 = (insn >> 16) & 0xf;
1772 gen_op_iwmmxt_movq_M0_wRn(rd0);
1773 gen_op_iwmmxt_andq_M0_wRn(rd1);
1774 gen_op_iwmmxt_setpsr_nz();
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 gen_op_iwmmxt_set_cup();
1778 break;
1779 case 0x810: case 0xa10: /* WMADD */
1780 wrd = (insn >> 12) & 0xf;
1781 rd0 = (insn >> 0) & 0xf;
1782 rd1 = (insn >> 16) & 0xf;
1783 gen_op_iwmmxt_movq_M0_wRn(rd0);
1784 if (insn & (1 << 21))
1785 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1788 gen_op_iwmmxt_movq_wRn_M0(wrd);
1789 gen_op_iwmmxt_set_mup();
1790 break;
1791 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1792 wrd = (insn >> 12) & 0xf;
1793 rd0 = (insn >> 16) & 0xf;
1794 rd1 = (insn >> 0) & 0xf;
1795 gen_op_iwmmxt_movq_M0_wRn(rd0);
1796 switch ((insn >> 22) & 3) {
1797 case 0:
1798 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1799 break;
1800 case 1:
1801 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1802 break;
1803 case 2:
1804 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1805 break;
1806 case 3:
1807 return 1;
1808 }
1809 gen_op_iwmmxt_movq_wRn_M0(wrd);
1810 gen_op_iwmmxt_set_mup();
1811 gen_op_iwmmxt_set_cup();
1812 break;
1813 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1814 wrd = (insn >> 12) & 0xf;
1815 rd0 = (insn >> 16) & 0xf;
1816 rd1 = (insn >> 0) & 0xf;
1817 gen_op_iwmmxt_movq_M0_wRn(rd0);
1818 switch ((insn >> 22) & 3) {
1819 case 0:
1820 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1821 break;
1822 case 1:
1823 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1824 break;
1825 case 2:
1826 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1827 break;
1828 case 3:
1829 return 1;
1830 }
1831 gen_op_iwmmxt_movq_wRn_M0(wrd);
1832 gen_op_iwmmxt_set_mup();
1833 gen_op_iwmmxt_set_cup();
1834 break;
1835 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1836 wrd = (insn >> 12) & 0xf;
1837 rd0 = (insn >> 16) & 0xf;
1838 rd1 = (insn >> 0) & 0xf;
1839 gen_op_iwmmxt_movq_M0_wRn(rd0);
1840 if (insn & (1 << 22))
1841 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1842 else
1843 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1844 if (!(insn & (1 << 20)))
1845 gen_op_iwmmxt_addl_M0_wRn(wrd);
1846 gen_op_iwmmxt_movq_wRn_M0(wrd);
1847 gen_op_iwmmxt_set_mup();
1848 break;
1849 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1850 wrd = (insn >> 12) & 0xf;
1851 rd0 = (insn >> 16) & 0xf;
1852 rd1 = (insn >> 0) & 0xf;
1853 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1854 if (insn & (1 << 21)) {
1855 if (insn & (1 << 20))
1856 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1857 else
1858 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1859 } else {
1860 if (insn & (1 << 20))
1861 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1862 else
1863 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1864 }
18c9b560
AZ
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 break;
1868 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1869 wrd = (insn >> 12) & 0xf;
1870 rd0 = (insn >> 16) & 0xf;
1871 rd1 = (insn >> 0) & 0xf;
1872 gen_op_iwmmxt_movq_M0_wRn(rd0);
1873 if (insn & (1 << 21))
1874 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1875 else
1876 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1877 if (!(insn & (1 << 20))) {
e677137d
PB
1878 iwmmxt_load_reg(cpu_V1, wrd);
1879 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1880 }
1881 gen_op_iwmmxt_movq_wRn_M0(wrd);
1882 gen_op_iwmmxt_set_mup();
1883 break;
1884 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1885 wrd = (insn >> 12) & 0xf;
1886 rd0 = (insn >> 16) & 0xf;
1887 rd1 = (insn >> 0) & 0xf;
1888 gen_op_iwmmxt_movq_M0_wRn(rd0);
1889 switch ((insn >> 22) & 3) {
1890 case 0:
1891 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1892 break;
1893 case 1:
1894 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1895 break;
1896 case 2:
1897 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1898 break;
1899 case 3:
1900 return 1;
1901 }
1902 gen_op_iwmmxt_movq_wRn_M0(wrd);
1903 gen_op_iwmmxt_set_mup();
1904 gen_op_iwmmxt_set_cup();
1905 break;
1906 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1907 wrd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
1909 rd1 = (insn >> 0) & 0xf;
1910 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1911 if (insn & (1 << 22)) {
1912 if (insn & (1 << 20))
1913 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1914 else
1915 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1916 } else {
1917 if (insn & (1 << 20))
1918 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1919 else
1920 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1921 }
18c9b560
AZ
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 gen_op_iwmmxt_set_cup();
1925 break;
1926 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 rd1 = (insn >> 0) & 0xf;
1930 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1931 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1932 tcg_gen_andi_i32(tmp, tmp, 7);
1933 iwmmxt_load_reg(cpu_V1, rd1);
1934 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1935 tcg_temp_free_i32(tmp);
18c9b560
AZ
1936 gen_op_iwmmxt_movq_wRn_M0(wrd);
1937 gen_op_iwmmxt_set_mup();
1938 break;
1939 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1940 if (((insn >> 6) & 3) == 3)
1941 return 1;
18c9b560
AZ
1942 rd = (insn >> 12) & 0xf;
1943 wrd = (insn >> 16) & 0xf;
da6b5335 1944 tmp = load_reg(s, rd);
18c9b560
AZ
1945 gen_op_iwmmxt_movq_M0_wRn(wrd);
1946 switch ((insn >> 6) & 3) {
1947 case 0:
da6b5335
FN
1948 tmp2 = tcg_const_i32(0xff);
1949 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1950 break;
1951 case 1:
da6b5335
FN
1952 tmp2 = tcg_const_i32(0xffff);
1953 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1954 break;
1955 case 2:
da6b5335
FN
1956 tmp2 = tcg_const_i32(0xffffffff);
1957 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1958 break;
da6b5335 1959 default:
39d5492a
PM
1960 TCGV_UNUSED_I32(tmp2);
1961 TCGV_UNUSED_I32(tmp3);
18c9b560 1962 }
da6b5335 1963 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1964 tcg_temp_free_i32(tmp3);
1965 tcg_temp_free_i32(tmp2);
7d1b0095 1966 tcg_temp_free_i32(tmp);
18c9b560
AZ
1967 gen_op_iwmmxt_movq_wRn_M0(wrd);
1968 gen_op_iwmmxt_set_mup();
1969 break;
1970 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1971 rd = (insn >> 12) & 0xf;
1972 wrd = (insn >> 16) & 0xf;
da6b5335 1973 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1974 return 1;
1975 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1976 tmp = tcg_temp_new_i32();
18c9b560
AZ
1977 switch ((insn >> 22) & 3) {
1978 case 0:
da6b5335 1979 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 1980 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
1981 if (insn & 8) {
1982 tcg_gen_ext8s_i32(tmp, tmp);
1983 } else {
1984 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1985 }
1986 break;
1987 case 1:
da6b5335 1988 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 1989 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
1990 if (insn & 8) {
1991 tcg_gen_ext16s_i32(tmp, tmp);
1992 } else {
1993 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1994 }
1995 break;
1996 case 2:
da6b5335 1997 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 1998 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 1999 break;
18c9b560 2000 }
da6b5335 2001 store_reg(s, rd, tmp);
18c9b560
AZ
2002 break;
2003 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2004 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2005 return 1;
da6b5335 2006 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2007 switch ((insn >> 22) & 3) {
2008 case 0:
da6b5335 2009 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2010 break;
2011 case 1:
da6b5335 2012 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2013 break;
2014 case 2:
da6b5335 2015 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2016 break;
18c9b560 2017 }
da6b5335
FN
2018 tcg_gen_shli_i32(tmp, tmp, 28);
2019 gen_set_nzcv(tmp);
7d1b0095 2020 tcg_temp_free_i32(tmp);
18c9b560
AZ
2021 break;
2022 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2023 if (((insn >> 6) & 3) == 3)
2024 return 1;
18c9b560
AZ
2025 rd = (insn >> 12) & 0xf;
2026 wrd = (insn >> 16) & 0xf;
da6b5335 2027 tmp = load_reg(s, rd);
18c9b560
AZ
2028 switch ((insn >> 6) & 3) {
2029 case 0:
da6b5335 2030 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2031 break;
2032 case 1:
da6b5335 2033 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2034 break;
2035 case 2:
da6b5335 2036 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2037 break;
18c9b560 2038 }
7d1b0095 2039 tcg_temp_free_i32(tmp);
18c9b560
AZ
2040 gen_op_iwmmxt_movq_wRn_M0(wrd);
2041 gen_op_iwmmxt_set_mup();
2042 break;
2043 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2044 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2045 return 1;
da6b5335 2046 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2047 tmp2 = tcg_temp_new_i32();
da6b5335 2048 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2049 switch ((insn >> 22) & 3) {
2050 case 0:
2051 for (i = 0; i < 7; i ++) {
da6b5335
FN
2052 tcg_gen_shli_i32(tmp2, tmp2, 4);
2053 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2054 }
2055 break;
2056 case 1:
2057 for (i = 0; i < 3; i ++) {
da6b5335
FN
2058 tcg_gen_shli_i32(tmp2, tmp2, 8);
2059 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2060 }
2061 break;
2062 case 2:
da6b5335
FN
2063 tcg_gen_shli_i32(tmp2, tmp2, 16);
2064 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2065 break;
18c9b560 2066 }
da6b5335 2067 gen_set_nzcv(tmp);
7d1b0095
PM
2068 tcg_temp_free_i32(tmp2);
2069 tcg_temp_free_i32(tmp);
18c9b560
AZ
2070 break;
2071 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2072 wrd = (insn >> 12) & 0xf;
2073 rd0 = (insn >> 16) & 0xf;
2074 gen_op_iwmmxt_movq_M0_wRn(rd0);
2075 switch ((insn >> 22) & 3) {
2076 case 0:
e677137d 2077 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2078 break;
2079 case 1:
e677137d 2080 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2081 break;
2082 case 2:
e677137d 2083 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2084 break;
2085 case 3:
2086 return 1;
2087 }
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 break;
2091 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2092 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2093 return 1;
da6b5335 2094 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2095 tmp2 = tcg_temp_new_i32();
da6b5335 2096 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2097 switch ((insn >> 22) & 3) {
2098 case 0:
2099 for (i = 0; i < 7; i ++) {
da6b5335
FN
2100 tcg_gen_shli_i32(tmp2, tmp2, 4);
2101 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2102 }
2103 break;
2104 case 1:
2105 for (i = 0; i < 3; i ++) {
da6b5335
FN
2106 tcg_gen_shli_i32(tmp2, tmp2, 8);
2107 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2108 }
2109 break;
2110 case 2:
da6b5335
FN
2111 tcg_gen_shli_i32(tmp2, tmp2, 16);
2112 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2113 break;
18c9b560 2114 }
da6b5335 2115 gen_set_nzcv(tmp);
7d1b0095
PM
2116 tcg_temp_free_i32(tmp2);
2117 tcg_temp_free_i32(tmp);
18c9b560
AZ
2118 break;
2119 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2120 rd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
da6b5335 2122 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2123 return 1;
2124 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2125 tmp = tcg_temp_new_i32();
18c9b560
AZ
2126 switch ((insn >> 22) & 3) {
2127 case 0:
da6b5335 2128 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2129 break;
2130 case 1:
da6b5335 2131 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2132 break;
2133 case 2:
da6b5335 2134 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2135 break;
18c9b560 2136 }
da6b5335 2137 store_reg(s, rd, tmp);
18c9b560
AZ
2138 break;
2139 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2140 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2141 wrd = (insn >> 12) & 0xf;
2142 rd0 = (insn >> 16) & 0xf;
2143 rd1 = (insn >> 0) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2149 else
2150 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2151 break;
2152 case 1:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2155 else
2156 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2157 break;
2158 case 2:
2159 if (insn & (1 << 21))
2160 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2161 else
2162 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2163 break;
2164 case 3:
2165 return 1;
2166 }
2167 gen_op_iwmmxt_movq_wRn_M0(wrd);
2168 gen_op_iwmmxt_set_mup();
2169 gen_op_iwmmxt_set_cup();
2170 break;
2171 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2172 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_unpacklsb_M0();
2180 else
2181 gen_op_iwmmxt_unpacklub_M0();
2182 break;
2183 case 1:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_unpacklsw_M0();
2186 else
2187 gen_op_iwmmxt_unpackluw_M0();
2188 break;
2189 case 2:
2190 if (insn & (1 << 21))
2191 gen_op_iwmmxt_unpacklsl_M0();
2192 else
2193 gen_op_iwmmxt_unpacklul_M0();
2194 break;
2195 case 3:
2196 return 1;
2197 }
2198 gen_op_iwmmxt_movq_wRn_M0(wrd);
2199 gen_op_iwmmxt_set_mup();
2200 gen_op_iwmmxt_set_cup();
2201 break;
2202 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2203 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 gen_op_iwmmxt_movq_M0_wRn(rd0);
2207 switch ((insn >> 22) & 3) {
2208 case 0:
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_unpackhsb_M0();
2211 else
2212 gen_op_iwmmxt_unpackhub_M0();
2213 break;
2214 case 1:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_unpackhsw_M0();
2217 else
2218 gen_op_iwmmxt_unpackhuw_M0();
2219 break;
2220 case 2:
2221 if (insn & (1 << 21))
2222 gen_op_iwmmxt_unpackhsl_M0();
2223 else
2224 gen_op_iwmmxt_unpackhul_M0();
2225 break;
2226 case 3:
2227 return 1;
2228 }
2229 gen_op_iwmmxt_movq_wRn_M0(wrd);
2230 gen_op_iwmmxt_set_mup();
2231 gen_op_iwmmxt_set_cup();
2232 break;
2233 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2234 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2235 if (((insn >> 22) & 3) == 0)
2236 return 1;
18c9b560
AZ
2237 wrd = (insn >> 12) & 0xf;
2238 rd0 = (insn >> 16) & 0xf;
2239 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2240 tmp = tcg_temp_new_i32();
da6b5335 2241 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2242 tcg_temp_free_i32(tmp);
18c9b560 2243 return 1;
da6b5335 2244 }
18c9b560 2245 switch ((insn >> 22) & 3) {
18c9b560 2246 case 1:
477955bd 2247 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2248 break;
2249 case 2:
477955bd 2250 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2251 break;
2252 case 3:
477955bd 2253 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2254 break;
2255 }
7d1b0095 2256 tcg_temp_free_i32(tmp);
18c9b560
AZ
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2262 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2263 if (((insn >> 22) & 3) == 0)
2264 return 1;
18c9b560
AZ
2265 wrd = (insn >> 12) & 0xf;
2266 rd0 = (insn >> 16) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2268 tmp = tcg_temp_new_i32();
da6b5335 2269 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2270 tcg_temp_free_i32(tmp);
18c9b560 2271 return 1;
da6b5335 2272 }
18c9b560 2273 switch ((insn >> 22) & 3) {
18c9b560 2274 case 1:
477955bd 2275 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2276 break;
2277 case 2:
477955bd 2278 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2279 break;
2280 case 3:
477955bd 2281 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2282 break;
2283 }
7d1b0095 2284 tcg_temp_free_i32(tmp);
18c9b560
AZ
2285 gen_op_iwmmxt_movq_wRn_M0(wrd);
2286 gen_op_iwmmxt_set_mup();
2287 gen_op_iwmmxt_set_cup();
2288 break;
2289 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2290 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2291 if (((insn >> 22) & 3) == 0)
2292 return 1;
18c9b560
AZ
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2296 tmp = tcg_temp_new_i32();
da6b5335 2297 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2298 tcg_temp_free_i32(tmp);
18c9b560 2299 return 1;
da6b5335 2300 }
18c9b560 2301 switch ((insn >> 22) & 3) {
18c9b560 2302 case 1:
477955bd 2303 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2304 break;
2305 case 2:
477955bd 2306 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2307 break;
2308 case 3:
477955bd 2309 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2310 break;
2311 }
7d1b0095 2312 tcg_temp_free_i32(tmp);
18c9b560
AZ
2313 gen_op_iwmmxt_movq_wRn_M0(wrd);
2314 gen_op_iwmmxt_set_mup();
2315 gen_op_iwmmxt_set_cup();
2316 break;
2317 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2318 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2319 if (((insn >> 22) & 3) == 0)
2320 return 1;
18c9b560
AZ
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2324 tmp = tcg_temp_new_i32();
18c9b560 2325 switch ((insn >> 22) & 3) {
18c9b560 2326 case 1:
da6b5335 2327 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2328 tcg_temp_free_i32(tmp);
18c9b560 2329 return 1;
da6b5335 2330 }
477955bd 2331 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2332 break;
2333 case 2:
da6b5335 2334 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2335 tcg_temp_free_i32(tmp);
18c9b560 2336 return 1;
da6b5335 2337 }
477955bd 2338 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2339 break;
2340 case 3:
da6b5335 2341 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2342 tcg_temp_free_i32(tmp);
18c9b560 2343 return 1;
da6b5335 2344 }
477955bd 2345 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2346 break;
2347 }
7d1b0095 2348 tcg_temp_free_i32(tmp);
18c9b560
AZ
2349 gen_op_iwmmxt_movq_wRn_M0(wrd);
2350 gen_op_iwmmxt_set_mup();
2351 gen_op_iwmmxt_set_cup();
2352 break;
2353 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2354 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 switch ((insn >> 22) & 3) {
2360 case 0:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_minub_M0_wRn(rd1);
2365 break;
2366 case 1:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2371 break;
2372 case 2:
2373 if (insn & (1 << 21))
2374 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2375 else
2376 gen_op_iwmmxt_minul_M0_wRn(rd1);
2377 break;
2378 case 3:
2379 return 1;
2380 }
2381 gen_op_iwmmxt_movq_wRn_M0(wrd);
2382 gen_op_iwmmxt_set_mup();
2383 break;
2384 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2385 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2386 wrd = (insn >> 12) & 0xf;
2387 rd0 = (insn >> 16) & 0xf;
2388 rd1 = (insn >> 0) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
2390 switch ((insn >> 22) & 3) {
2391 case 0:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2394 else
2395 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2396 break;
2397 case 1:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2402 break;
2403 case 2:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2408 break;
2409 case 3:
2410 return 1;
2411 }
2412 gen_op_iwmmxt_movq_wRn_M0(wrd);
2413 gen_op_iwmmxt_set_mup();
2414 break;
2415 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2416 case 0x402: case 0x502: case 0x602: case 0x702:
2417 wrd = (insn >> 12) & 0xf;
2418 rd0 = (insn >> 16) & 0xf;
2419 rd1 = (insn >> 0) & 0xf;
2420 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2421 tmp = tcg_const_i32((insn >> 20) & 3);
2422 iwmmxt_load_reg(cpu_V1, rd1);
2423 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2424 tcg_temp_free_i32(tmp);
18c9b560
AZ
2425 gen_op_iwmmxt_movq_wRn_M0(wrd);
2426 gen_op_iwmmxt_set_mup();
2427 break;
2428 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2429 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2430 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2431 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2432 wrd = (insn >> 12) & 0xf;
2433 rd0 = (insn >> 16) & 0xf;
2434 rd1 = (insn >> 0) & 0xf;
2435 gen_op_iwmmxt_movq_M0_wRn(rd0);
2436 switch ((insn >> 20) & 0xf) {
2437 case 0x0:
2438 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2439 break;
2440 case 0x1:
2441 gen_op_iwmmxt_subub_M0_wRn(rd1);
2442 break;
2443 case 0x3:
2444 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2445 break;
2446 case 0x4:
2447 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2448 break;
2449 case 0x5:
2450 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2451 break;
2452 case 0x7:
2453 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2454 break;
2455 case 0x8:
2456 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2457 break;
2458 case 0x9:
2459 gen_op_iwmmxt_subul_M0_wRn(rd1);
2460 break;
2461 case 0xb:
2462 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2463 break;
2464 default:
2465 return 1;
2466 }
2467 gen_op_iwmmxt_movq_wRn_M0(wrd);
2468 gen_op_iwmmxt_set_mup();
2469 gen_op_iwmmxt_set_cup();
2470 break;
2471 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2472 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2473 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2474 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2475 wrd = (insn >> 12) & 0xf;
2476 rd0 = (insn >> 16) & 0xf;
2477 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2478 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2479 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2480 tcg_temp_free_i32(tmp);
18c9b560
AZ
2481 gen_op_iwmmxt_movq_wRn_M0(wrd);
2482 gen_op_iwmmxt_set_mup();
2483 gen_op_iwmmxt_set_cup();
2484 break;
2485 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2486 case 0x418: case 0x518: case 0x618: case 0x718:
2487 case 0x818: case 0x918: case 0xa18: case 0xb18:
2488 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2489 wrd = (insn >> 12) & 0xf;
2490 rd0 = (insn >> 16) & 0xf;
2491 rd1 = (insn >> 0) & 0xf;
2492 gen_op_iwmmxt_movq_M0_wRn(rd0);
2493 switch ((insn >> 20) & 0xf) {
2494 case 0x0:
2495 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2496 break;
2497 case 0x1:
2498 gen_op_iwmmxt_addub_M0_wRn(rd1);
2499 break;
2500 case 0x3:
2501 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2502 break;
2503 case 0x4:
2504 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2505 break;
2506 case 0x5:
2507 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2508 break;
2509 case 0x7:
2510 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2511 break;
2512 case 0x8:
2513 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2514 break;
2515 case 0x9:
2516 gen_op_iwmmxt_addul_M0_wRn(rd1);
2517 break;
2518 case 0xb:
2519 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2520 break;
2521 default:
2522 return 1;
2523 }
2524 gen_op_iwmmxt_movq_wRn_M0(wrd);
2525 gen_op_iwmmxt_set_mup();
2526 gen_op_iwmmxt_set_cup();
2527 break;
2528 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2529 case 0x408: case 0x508: case 0x608: case 0x708:
2530 case 0x808: case 0x908: case 0xa08: case 0xb08:
2531 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2532 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2533 return 1;
18c9b560
AZ
2534 wrd = (insn >> 12) & 0xf;
2535 rd0 = (insn >> 16) & 0xf;
2536 rd1 = (insn >> 0) & 0xf;
2537 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2538 switch ((insn >> 22) & 3) {
18c9b560
AZ
2539 case 1:
2540 if (insn & (1 << 21))
2541 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2542 else
2543 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2544 break;
2545 case 2:
2546 if (insn & (1 << 21))
2547 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2548 else
2549 gen_op_iwmmxt_packul_M0_wRn(rd1);
2550 break;
2551 case 3:
2552 if (insn & (1 << 21))
2553 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2554 else
2555 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2556 break;
2557 }
2558 gen_op_iwmmxt_movq_wRn_M0(wrd);
2559 gen_op_iwmmxt_set_mup();
2560 gen_op_iwmmxt_set_cup();
2561 break;
2562 case 0x201: case 0x203: case 0x205: case 0x207:
2563 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2564 case 0x211: case 0x213: case 0x215: case 0x217:
2565 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2566 wrd = (insn >> 5) & 0xf;
2567 rd0 = (insn >> 12) & 0xf;
2568 rd1 = (insn >> 0) & 0xf;
2569 if (rd0 == 0xf || rd1 == 0xf)
2570 return 1;
2571 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2572 tmp = load_reg(s, rd0);
2573 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2574 switch ((insn >> 16) & 0xf) {
2575 case 0x0: /* TMIA */
da6b5335 2576 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2577 break;
2578 case 0x8: /* TMIAPH */
da6b5335 2579 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2580 break;
2581 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2582 if (insn & (1 << 16))
da6b5335 2583 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2584 if (insn & (1 << 17))
da6b5335
FN
2585 tcg_gen_shri_i32(tmp2, tmp2, 16);
2586 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2587 break;
2588 default:
7d1b0095
PM
2589 tcg_temp_free_i32(tmp2);
2590 tcg_temp_free_i32(tmp);
18c9b560
AZ
2591 return 1;
2592 }
7d1b0095
PM
2593 tcg_temp_free_i32(tmp2);
2594 tcg_temp_free_i32(tmp);
18c9b560
AZ
2595 gen_op_iwmmxt_movq_wRn_M0(wrd);
2596 gen_op_iwmmxt_set_mup();
2597 break;
2598 default:
2599 return 1;
2600 }
2601
2602 return 0;
2603}
2604
a1c7273b 2605/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2606 (ie. an undefined instruction). */
7dcc1f89 2607static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2608{
2609 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2610 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2611
2612 if ((insn & 0x0ff00f10) == 0x0e200010) {
2613 /* Multiply with Internal Accumulate Format */
2614 rd0 = (insn >> 12) & 0xf;
2615 rd1 = insn & 0xf;
2616 acc = (insn >> 5) & 7;
2617
2618 if (acc != 0)
2619 return 1;
2620
3a554c0f
FN
2621 tmp = load_reg(s, rd0);
2622 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2623 switch ((insn >> 16) & 0xf) {
2624 case 0x0: /* MIA */
3a554c0f 2625 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2626 break;
2627 case 0x8: /* MIAPH */
3a554c0f 2628 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2629 break;
2630 case 0xc: /* MIABB */
2631 case 0xd: /* MIABT */
2632 case 0xe: /* MIATB */
2633 case 0xf: /* MIATT */
18c9b560 2634 if (insn & (1 << 16))
3a554c0f 2635 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2636 if (insn & (1 << 17))
3a554c0f
FN
2637 tcg_gen_shri_i32(tmp2, tmp2, 16);
2638 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2639 break;
2640 default:
2641 return 1;
2642 }
7d1b0095
PM
2643 tcg_temp_free_i32(tmp2);
2644 tcg_temp_free_i32(tmp);
18c9b560
AZ
2645
2646 gen_op_iwmmxt_movq_wRn_M0(acc);
2647 return 0;
2648 }
2649
2650 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2651 /* Internal Accumulator Access Format */
2652 rdhi = (insn >> 16) & 0xf;
2653 rdlo = (insn >> 12) & 0xf;
2654 acc = insn & 7;
2655
2656 if (acc != 0)
2657 return 1;
2658
2659 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2660 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2661 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2662 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2663 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2664 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2665 } else { /* MAR */
3a554c0f
FN
2666 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2667 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2668 }
2669 return 0;
2670 }
2671
2672 return 1;
2673}
2674
9ee6e8bb
PB
2675#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2676#define VFP_SREG(insn, bigbit, smallbit) \
2677 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2678#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2679 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2680 reg = (((insn) >> (bigbit)) & 0x0f) \
2681 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2682 } else { \
2683 if (insn & (1 << (smallbit))) \
2684 return 1; \
2685 reg = ((insn) >> (bigbit)) & 0x0f; \
2686 }} while (0)
2687
2688#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2689#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2690#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2691#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2692#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2693#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2694
4373f3ce 2695/* Move between integer and VFP cores. */
39d5492a 2696static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2697{
39d5492a 2698 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2699 tcg_gen_mov_i32(tmp, cpu_F0s);
2700 return tmp;
2701}
2702
39d5492a 2703static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2704{
2705 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2706 tcg_temp_free_i32(tmp);
4373f3ce
PB
2707}
2708
39d5492a 2709static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2710{
39d5492a 2711 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2712 if (shift)
2713 tcg_gen_shri_i32(var, var, shift);
86831435 2714 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2715 tcg_gen_shli_i32(tmp, var, 8);
2716 tcg_gen_or_i32(var, var, tmp);
2717 tcg_gen_shli_i32(tmp, var, 16);
2718 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2719 tcg_temp_free_i32(tmp);
ad69471c
PB
2720}
2721
39d5492a 2722static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2723{
39d5492a 2724 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2725 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2726 tcg_gen_shli_i32(tmp, var, 16);
2727 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2728 tcg_temp_free_i32(tmp);
ad69471c
PB
2729}
2730
39d5492a 2731static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2732{
39d5492a 2733 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2734 tcg_gen_andi_i32(var, var, 0xffff0000);
2735 tcg_gen_shri_i32(tmp, var, 16);
2736 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2737 tcg_temp_free_i32(tmp);
ad69471c
PB
2738}
2739
39d5492a 2740static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2741{
2742 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2743 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2744 switch (size) {
2745 case 0:
6ce2faf4 2746 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2747 gen_neon_dup_u8(tmp, 0);
2748 break;
2749 case 1:
6ce2faf4 2750 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2751 gen_neon_dup_low16(tmp);
2752 break;
2753 case 2:
6ce2faf4 2754 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2755 break;
2756 default: /* Avoid compiler warnings. */
2757 abort();
2758 }
2759 return tmp;
2760}
2761
04731fb5
WN
2762static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2763 uint32_t dp)
2764{
2765 uint32_t cc = extract32(insn, 20, 2);
2766
2767 if (dp) {
2768 TCGv_i64 frn, frm, dest;
2769 TCGv_i64 tmp, zero, zf, nf, vf;
2770
2771 zero = tcg_const_i64(0);
2772
2773 frn = tcg_temp_new_i64();
2774 frm = tcg_temp_new_i64();
2775 dest = tcg_temp_new_i64();
2776
2777 zf = tcg_temp_new_i64();
2778 nf = tcg_temp_new_i64();
2779 vf = tcg_temp_new_i64();
2780
2781 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2782 tcg_gen_ext_i32_i64(nf, cpu_NF);
2783 tcg_gen_ext_i32_i64(vf, cpu_VF);
2784
2785 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2786 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2787 switch (cc) {
2788 case 0: /* eq: Z */
2789 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2790 frn, frm);
2791 break;
2792 case 1: /* vs: V */
2793 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2794 frn, frm);
2795 break;
2796 case 2: /* ge: N == V -> N ^ V == 0 */
2797 tmp = tcg_temp_new_i64();
2798 tcg_gen_xor_i64(tmp, vf, nf);
2799 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2800 frn, frm);
2801 tcg_temp_free_i64(tmp);
2802 break;
2803 case 3: /* gt: !Z && N == V */
2804 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2805 frn, frm);
2806 tmp = tcg_temp_new_i64();
2807 tcg_gen_xor_i64(tmp, vf, nf);
2808 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2809 dest, frm);
2810 tcg_temp_free_i64(tmp);
2811 break;
2812 }
2813 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2814 tcg_temp_free_i64(frn);
2815 tcg_temp_free_i64(frm);
2816 tcg_temp_free_i64(dest);
2817
2818 tcg_temp_free_i64(zf);
2819 tcg_temp_free_i64(nf);
2820 tcg_temp_free_i64(vf);
2821
2822 tcg_temp_free_i64(zero);
2823 } else {
2824 TCGv_i32 frn, frm, dest;
2825 TCGv_i32 tmp, zero;
2826
2827 zero = tcg_const_i32(0);
2828
2829 frn = tcg_temp_new_i32();
2830 frm = tcg_temp_new_i32();
2831 dest = tcg_temp_new_i32();
2832 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2833 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2834 switch (cc) {
2835 case 0: /* eq: Z */
2836 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2837 frn, frm);
2838 break;
2839 case 1: /* vs: V */
2840 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2841 frn, frm);
2842 break;
2843 case 2: /* ge: N == V -> N ^ V == 0 */
2844 tmp = tcg_temp_new_i32();
2845 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2846 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2847 frn, frm);
2848 tcg_temp_free_i32(tmp);
2849 break;
2850 case 3: /* gt: !Z && N == V */
2851 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2852 frn, frm);
2853 tmp = tcg_temp_new_i32();
2854 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2855 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2856 dest, frm);
2857 tcg_temp_free_i32(tmp);
2858 break;
2859 }
2860 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2861 tcg_temp_free_i32(frn);
2862 tcg_temp_free_i32(frm);
2863 tcg_temp_free_i32(dest);
2864
2865 tcg_temp_free_i32(zero);
2866 }
2867
2868 return 0;
2869}
2870
40cfacdd
WN
2871static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2872 uint32_t rm, uint32_t dp)
2873{
2874 uint32_t vmin = extract32(insn, 6, 1);
2875 TCGv_ptr fpst = get_fpstatus_ptr(0);
2876
2877 if (dp) {
2878 TCGv_i64 frn, frm, dest;
2879
2880 frn = tcg_temp_new_i64();
2881 frm = tcg_temp_new_i64();
2882 dest = tcg_temp_new_i64();
2883
2884 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2885 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2886 if (vmin) {
f71a2ae5 2887 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2888 } else {
f71a2ae5 2889 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2890 }
2891 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2892 tcg_temp_free_i64(frn);
2893 tcg_temp_free_i64(frm);
2894 tcg_temp_free_i64(dest);
2895 } else {
2896 TCGv_i32 frn, frm, dest;
2897
2898 frn = tcg_temp_new_i32();
2899 frm = tcg_temp_new_i32();
2900 dest = tcg_temp_new_i32();
2901
2902 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2903 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2904 if (vmin) {
f71a2ae5 2905 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2906 } else {
f71a2ae5 2907 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2908 }
2909 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2910 tcg_temp_free_i32(frn);
2911 tcg_temp_free_i32(frm);
2912 tcg_temp_free_i32(dest);
2913 }
2914
2915 tcg_temp_free_ptr(fpst);
2916 return 0;
2917}
2918
7655f39b
WN
2919static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2920 int rounding)
2921{
2922 TCGv_ptr fpst = get_fpstatus_ptr(0);
2923 TCGv_i32 tcg_rmode;
2924
2925 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2926 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2927
2928 if (dp) {
2929 TCGv_i64 tcg_op;
2930 TCGv_i64 tcg_res;
2931 tcg_op = tcg_temp_new_i64();
2932 tcg_res = tcg_temp_new_i64();
2933 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2934 gen_helper_rintd(tcg_res, tcg_op, fpst);
2935 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2936 tcg_temp_free_i64(tcg_op);
2937 tcg_temp_free_i64(tcg_res);
2938 } else {
2939 TCGv_i32 tcg_op;
2940 TCGv_i32 tcg_res;
2941 tcg_op = tcg_temp_new_i32();
2942 tcg_res = tcg_temp_new_i32();
2943 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2944 gen_helper_rints(tcg_res, tcg_op, fpst);
2945 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2946 tcg_temp_free_i32(tcg_op);
2947 tcg_temp_free_i32(tcg_res);
2948 }
2949
2950 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2951 tcg_temp_free_i32(tcg_rmode);
2952
2953 tcg_temp_free_ptr(fpst);
2954 return 0;
2955}
2956
c9975a83
WN
2957static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2958 int rounding)
2959{
2960 bool is_signed = extract32(insn, 7, 1);
2961 TCGv_ptr fpst = get_fpstatus_ptr(0);
2962 TCGv_i32 tcg_rmode, tcg_shift;
2963
2964 tcg_shift = tcg_const_i32(0);
2965
2966 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2967 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2968
2969 if (dp) {
2970 TCGv_i64 tcg_double, tcg_res;
2971 TCGv_i32 tcg_tmp;
2972 /* Rd is encoded as a single precision register even when the source
2973 * is double precision.
2974 */
2975 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2976 tcg_double = tcg_temp_new_i64();
2977 tcg_res = tcg_temp_new_i64();
2978 tcg_tmp = tcg_temp_new_i32();
2979 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2980 if (is_signed) {
2981 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2982 } else {
2983 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2984 }
ecc7b3aa 2985 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
2986 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2987 tcg_temp_free_i32(tcg_tmp);
2988 tcg_temp_free_i64(tcg_res);
2989 tcg_temp_free_i64(tcg_double);
2990 } else {
2991 TCGv_i32 tcg_single, tcg_res;
2992 tcg_single = tcg_temp_new_i32();
2993 tcg_res = tcg_temp_new_i32();
2994 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2995 if (is_signed) {
2996 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2997 } else {
2998 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2999 }
3000 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3001 tcg_temp_free_i32(tcg_res);
3002 tcg_temp_free_i32(tcg_single);
3003 }
3004
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3006 tcg_temp_free_i32(tcg_rmode);
3007
3008 tcg_temp_free_i32(tcg_shift);
3009
3010 tcg_temp_free_ptr(fpst);
3011
3012 return 0;
3013}
7655f39b
WN
3014
3015/* Table for converting the most common AArch32 encoding of
3016 * rounding mode to arm_fprounding order (which matches the
3017 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3018 */
3019static const uint8_t fp_decode_rm[] = {
3020 FPROUNDING_TIEAWAY,
3021 FPROUNDING_TIEEVEN,
3022 FPROUNDING_POSINF,
3023 FPROUNDING_NEGINF,
3024};
3025
7dcc1f89 3026static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3027{
3028 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3029
d614a513 3030 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3031 return 1;
3032 }
3033
3034 if (dp) {
3035 VFP_DREG_D(rd, insn);
3036 VFP_DREG_N(rn, insn);
3037 VFP_DREG_M(rm, insn);
3038 } else {
3039 rd = VFP_SREG_D(insn);
3040 rn = VFP_SREG_N(insn);
3041 rm = VFP_SREG_M(insn);
3042 }
3043
3044 if ((insn & 0x0f800e50) == 0x0e000a00) {
3045 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3046 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3047 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3048 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3049 /* VRINTA, VRINTN, VRINTP, VRINTM */
3050 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3051 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3052 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3053 /* VCVTA, VCVTN, VCVTP, VCVTM */
3054 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3055 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3056 }
3057 return 1;
3058}
3059
a1c7273b 3060/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3061 (ie. an undefined instruction). */
7dcc1f89 3062static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3063{
3064 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3065 int dp, veclen;
39d5492a
PM
3066 TCGv_i32 addr;
3067 TCGv_i32 tmp;
3068 TCGv_i32 tmp2;
b7bcbe95 3069
d614a513 3070 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3071 return 1;
d614a513 3072 }
40f137e1 3073
2c7ffc41
PM
3074 /* FIXME: this access check should not take precedence over UNDEF
3075 * for invalid encodings; we will generate incorrect syndrome information
3076 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3077 */
9dbbc748 3078 if (s->fp_excp_el) {
2c7ffc41 3079 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 3080 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
3081 return 0;
3082 }
3083
5df8bac1 3084 if (!s->vfp_enabled) {
9ee6e8bb 3085 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3086 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3087 return 1;
3088 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3089 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3090 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3091 return 1;
a50c0f51 3092 }
40f137e1 3093 }
6a57f3eb
WN
3094
3095 if (extract32(insn, 28, 4) == 0xf) {
3096 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3097 * only used in v8 and above.
3098 */
7dcc1f89 3099 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3100 }
3101
b7bcbe95
FB
3102 dp = ((insn & 0xf00) == 0xb00);
3103 switch ((insn >> 24) & 0xf) {
3104 case 0xe:
3105 if (insn & (1 << 4)) {
3106 /* single register transfer */
b7bcbe95
FB
3107 rd = (insn >> 12) & 0xf;
3108 if (dp) {
9ee6e8bb
PB
3109 int size;
3110 int pass;
3111
3112 VFP_DREG_N(rn, insn);
3113 if (insn & 0xf)
b7bcbe95 3114 return 1;
9ee6e8bb 3115 if (insn & 0x00c00060
d614a513 3116 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3117 return 1;
d614a513 3118 }
9ee6e8bb
PB
3119
3120 pass = (insn >> 21) & 1;
3121 if (insn & (1 << 22)) {
3122 size = 0;
3123 offset = ((insn >> 5) & 3) * 8;
3124 } else if (insn & (1 << 5)) {
3125 size = 1;
3126 offset = (insn & (1 << 6)) ? 16 : 0;
3127 } else {
3128 size = 2;
3129 offset = 0;
3130 }
18c9b560 3131 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3132 /* vfp->arm */
ad69471c 3133 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3134 switch (size) {
3135 case 0:
9ee6e8bb 3136 if (offset)
ad69471c 3137 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3138 if (insn & (1 << 23))
ad69471c 3139 gen_uxtb(tmp);
9ee6e8bb 3140 else
ad69471c 3141 gen_sxtb(tmp);
9ee6e8bb
PB
3142 break;
3143 case 1:
9ee6e8bb
PB
3144 if (insn & (1 << 23)) {
3145 if (offset) {
ad69471c 3146 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3147 } else {
ad69471c 3148 gen_uxth(tmp);
9ee6e8bb
PB
3149 }
3150 } else {
3151 if (offset) {
ad69471c 3152 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3153 } else {
ad69471c 3154 gen_sxth(tmp);
9ee6e8bb
PB
3155 }
3156 }
3157 break;
3158 case 2:
9ee6e8bb
PB
3159 break;
3160 }
ad69471c 3161 store_reg(s, rd, tmp);
b7bcbe95
FB
3162 } else {
3163 /* arm->vfp */
ad69471c 3164 tmp = load_reg(s, rd);
9ee6e8bb
PB
3165 if (insn & (1 << 23)) {
3166 /* VDUP */
3167 if (size == 0) {
ad69471c 3168 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3169 } else if (size == 1) {
ad69471c 3170 gen_neon_dup_low16(tmp);
9ee6e8bb 3171 }
cbbccffc 3172 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3173 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3174 tcg_gen_mov_i32(tmp2, tmp);
3175 neon_store_reg(rn, n, tmp2);
3176 }
3177 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3178 } else {
3179 /* VMOV */
3180 switch (size) {
3181 case 0:
ad69471c 3182 tmp2 = neon_load_reg(rn, pass);
d593c48e 3183 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3184 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3185 break;
3186 case 1:
ad69471c 3187 tmp2 = neon_load_reg(rn, pass);
d593c48e 3188 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3189 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3190 break;
3191 case 2:
9ee6e8bb
PB
3192 break;
3193 }
ad69471c 3194 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3195 }
b7bcbe95 3196 }
9ee6e8bb
PB
3197 } else { /* !dp */
3198 if ((insn & 0x6f) != 0x00)
3199 return 1;
3200 rn = VFP_SREG_N(insn);
18c9b560 3201 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3202 /* vfp->arm */
3203 if (insn & (1 << 21)) {
3204 /* system register */
40f137e1 3205 rn >>= 1;
9ee6e8bb 3206
b7bcbe95 3207 switch (rn) {
40f137e1 3208 case ARM_VFP_FPSID:
4373f3ce 3209 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3210 VFP3 restricts all id registers to privileged
3211 accesses. */
3212 if (IS_USER(s)
d614a513 3213 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3214 return 1;
d614a513 3215 }
4373f3ce 3216 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3217 break;
40f137e1 3218 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3219 if (IS_USER(s))
3220 return 1;
4373f3ce 3221 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3222 break;
40f137e1
PB
3223 case ARM_VFP_FPINST:
3224 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3225 /* Not present in VFP3. */
3226 if (IS_USER(s)
d614a513 3227 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3228 return 1;
d614a513 3229 }
4373f3ce 3230 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3231 break;
40f137e1 3232 case ARM_VFP_FPSCR:
601d70b9 3233 if (rd == 15) {
4373f3ce
PB
3234 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3235 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3236 } else {
7d1b0095 3237 tmp = tcg_temp_new_i32();
4373f3ce
PB
3238 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3239 }
b7bcbe95 3240 break;
a50c0f51 3241 case ARM_VFP_MVFR2:
d614a513 3242 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3243 return 1;
3244 }
3245 /* fall through */
9ee6e8bb
PB
3246 case ARM_VFP_MVFR0:
3247 case ARM_VFP_MVFR1:
3248 if (IS_USER(s)
d614a513 3249 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3250 return 1;
d614a513 3251 }
4373f3ce 3252 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3253 break;
b7bcbe95
FB
3254 default:
3255 return 1;
3256 }
3257 } else {
3258 gen_mov_F0_vreg(0, rn);
4373f3ce 3259 tmp = gen_vfp_mrs();
b7bcbe95
FB
3260 }
3261 if (rd == 15) {
b5ff1b31 3262 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3263 gen_set_nzcv(tmp);
7d1b0095 3264 tcg_temp_free_i32(tmp);
4373f3ce
PB
3265 } else {
3266 store_reg(s, rd, tmp);
3267 }
b7bcbe95
FB
3268 } else {
3269 /* arm->vfp */
b7bcbe95 3270 if (insn & (1 << 21)) {
40f137e1 3271 rn >>= 1;
b7bcbe95
FB
3272 /* system register */
3273 switch (rn) {
40f137e1 3274 case ARM_VFP_FPSID:
9ee6e8bb
PB
3275 case ARM_VFP_MVFR0:
3276 case ARM_VFP_MVFR1:
b7bcbe95
FB
3277 /* Writes are ignored. */
3278 break;
40f137e1 3279 case ARM_VFP_FPSCR:
e4c1cfa5 3280 tmp = load_reg(s, rd);
4373f3ce 3281 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3282 tcg_temp_free_i32(tmp);
b5ff1b31 3283 gen_lookup_tb(s);
b7bcbe95 3284 break;
40f137e1 3285 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3286 if (IS_USER(s))
3287 return 1;
71b3c3de
JR
3288 /* TODO: VFP subarchitecture support.
3289 * For now, keep the EN bit only */
e4c1cfa5 3290 tmp = load_reg(s, rd);
71b3c3de 3291 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3292 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3293 gen_lookup_tb(s);
3294 break;
3295 case ARM_VFP_FPINST:
3296 case ARM_VFP_FPINST2:
23adb861
PM
3297 if (IS_USER(s)) {
3298 return 1;
3299 }
e4c1cfa5 3300 tmp = load_reg(s, rd);
4373f3ce 3301 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3302 break;
b7bcbe95
FB
3303 default:
3304 return 1;
3305 }
3306 } else {
e4c1cfa5 3307 tmp = load_reg(s, rd);
4373f3ce 3308 gen_vfp_msr(tmp);
b7bcbe95
FB
3309 gen_mov_vreg_F0(0, rn);
3310 }
3311 }
3312 }
3313 } else {
3314 /* data processing */
3315 /* The opcode is in bits 23, 21, 20 and 6. */
3316 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3317 if (dp) {
3318 if (op == 15) {
3319 /* rn is opcode */
3320 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3321 } else {
3322 /* rn is register number */
9ee6e8bb 3323 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3324 }
3325
239c20c7
WN
3326 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3327 ((rn & 0x1e) == 0x6))) {
3328 /* Integer or single/half precision destination. */
9ee6e8bb 3329 rd = VFP_SREG_D(insn);
b7bcbe95 3330 } else {
9ee6e8bb 3331 VFP_DREG_D(rd, insn);
b7bcbe95 3332 }
04595bf6 3333 if (op == 15 &&
239c20c7
WN
3334 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3335 ((rn & 0x1e) == 0x4))) {
3336 /* VCVT from int or half precision is always from S reg
3337 * regardless of dp bit. VCVT with immediate frac_bits
3338 * has same format as SREG_M.
04595bf6
PM
3339 */
3340 rm = VFP_SREG_M(insn);
b7bcbe95 3341 } else {
9ee6e8bb 3342 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3343 }
3344 } else {
9ee6e8bb 3345 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3346 if (op == 15 && rn == 15) {
3347 /* Double precision destination. */
9ee6e8bb
PB
3348 VFP_DREG_D(rd, insn);
3349 } else {
3350 rd = VFP_SREG_D(insn);
3351 }
04595bf6
PM
3352 /* NB that we implicitly rely on the encoding for the frac_bits
3353 * in VCVT of fixed to float being the same as that of an SREG_M
3354 */
9ee6e8bb 3355 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3356 }
3357
69d1fc22 3358 veclen = s->vec_len;
b7bcbe95
FB
3359 if (op == 15 && rn > 3)
3360 veclen = 0;
3361
3362 /* Shut up compiler warnings. */
3363 delta_m = 0;
3364 delta_d = 0;
3365 bank_mask = 0;
3b46e624 3366
b7bcbe95
FB
3367 if (veclen > 0) {
3368 if (dp)
3369 bank_mask = 0xc;
3370 else
3371 bank_mask = 0x18;
3372
3373 /* Figure out what type of vector operation this is. */
3374 if ((rd & bank_mask) == 0) {
3375 /* scalar */
3376 veclen = 0;
3377 } else {
3378 if (dp)
69d1fc22 3379 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3380 else
69d1fc22 3381 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3382
3383 if ((rm & bank_mask) == 0) {
3384 /* mixed scalar/vector */
3385 delta_m = 0;
3386 } else {
3387 /* vector */
3388 delta_m = delta_d;
3389 }
3390 }
3391 }
3392
3393 /* Load the initial operands. */
3394 if (op == 15) {
3395 switch (rn) {
3396 case 16:
3397 case 17:
3398 /* Integer source */
3399 gen_mov_F0_vreg(0, rm);
3400 break;
3401 case 8:
3402 case 9:
3403 /* Compare */
3404 gen_mov_F0_vreg(dp, rd);
3405 gen_mov_F1_vreg(dp, rm);
3406 break;
3407 case 10:
3408 case 11:
3409 /* Compare with zero */
3410 gen_mov_F0_vreg(dp, rd);
3411 gen_vfp_F1_ld0(dp);
3412 break;
9ee6e8bb
PB
3413 case 20:
3414 case 21:
3415 case 22:
3416 case 23:
644ad806
PB
3417 case 28:
3418 case 29:
3419 case 30:
3420 case 31:
9ee6e8bb
PB
3421 /* Source and destination the same. */
3422 gen_mov_F0_vreg(dp, rd);
3423 break;
6e0c0ed1
PM
3424 case 4:
3425 case 5:
3426 case 6:
3427 case 7:
239c20c7
WN
3428 /* VCVTB, VCVTT: only present with the halfprec extension
3429 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3430 * (we choose to UNDEF)
6e0c0ed1 3431 */
d614a513
PM
3432 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3433 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3434 return 1;
3435 }
239c20c7
WN
3436 if (!extract32(rn, 1, 1)) {
3437 /* Half precision source. */
3438 gen_mov_F0_vreg(0, rm);
3439 break;
3440 }
6e0c0ed1 3441 /* Otherwise fall through */
b7bcbe95
FB
3442 default:
3443 /* One source operand. */
3444 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3445 break;
b7bcbe95
FB
3446 }
3447 } else {
3448 /* Two source operands. */
3449 gen_mov_F0_vreg(dp, rn);
3450 gen_mov_F1_vreg(dp, rm);
3451 }
3452
3453 for (;;) {
3454 /* Perform the calculation. */
3455 switch (op) {
605a6aed
PM
3456 case 0: /* VMLA: fd + (fn * fm) */
3457 /* Note that order of inputs to the add matters for NaNs */
3458 gen_vfp_F1_mul(dp);
3459 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3460 gen_vfp_add(dp);
3461 break;
605a6aed 3462 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3463 gen_vfp_mul(dp);
605a6aed
PM
3464 gen_vfp_F1_neg(dp);
3465 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3466 gen_vfp_add(dp);
3467 break;
605a6aed
PM
3468 case 2: /* VNMLS: -fd + (fn * fm) */
3469 /* Note that it isn't valid to replace (-A + B) with (B - A)
3470 * or similar plausible looking simplifications
3471 * because this will give wrong results for NaNs.
3472 */
3473 gen_vfp_F1_mul(dp);
3474 gen_mov_F0_vreg(dp, rd);
3475 gen_vfp_neg(dp);
3476 gen_vfp_add(dp);
b7bcbe95 3477 break;
605a6aed 3478 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3479 gen_vfp_mul(dp);
605a6aed
PM
3480 gen_vfp_F1_neg(dp);
3481 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3482 gen_vfp_neg(dp);
605a6aed 3483 gen_vfp_add(dp);
b7bcbe95
FB
3484 break;
3485 case 4: /* mul: fn * fm */
3486 gen_vfp_mul(dp);
3487 break;
3488 case 5: /* nmul: -(fn * fm) */
3489 gen_vfp_mul(dp);
3490 gen_vfp_neg(dp);
3491 break;
3492 case 6: /* add: fn + fm */
3493 gen_vfp_add(dp);
3494 break;
3495 case 7: /* sub: fn - fm */
3496 gen_vfp_sub(dp);
3497 break;
3498 case 8: /* div: fn / fm */
3499 gen_vfp_div(dp);
3500 break;
da97f52c
PM
3501 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3502 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3503 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3504 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3505 /* These are fused multiply-add, and must be done as one
3506 * floating point operation with no rounding between the
3507 * multiplication and addition steps.
3508 * NB that doing the negations here as separate steps is
3509 * correct : an input NaN should come out with its sign bit
3510 * flipped if it is a negated-input.
3511 */
d614a513 3512 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3513 return 1;
3514 }
3515 if (dp) {
3516 TCGv_ptr fpst;
3517 TCGv_i64 frd;
3518 if (op & 1) {
3519 /* VFNMS, VFMS */
3520 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3521 }
3522 frd = tcg_temp_new_i64();
3523 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3524 if (op & 2) {
3525 /* VFNMA, VFNMS */
3526 gen_helper_vfp_negd(frd, frd);
3527 }
3528 fpst = get_fpstatus_ptr(0);
3529 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3530 cpu_F1d, frd, fpst);
3531 tcg_temp_free_ptr(fpst);
3532 tcg_temp_free_i64(frd);
3533 } else {
3534 TCGv_ptr fpst;
3535 TCGv_i32 frd;
3536 if (op & 1) {
3537 /* VFNMS, VFMS */
3538 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3539 }
3540 frd = tcg_temp_new_i32();
3541 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3542 if (op & 2) {
3543 gen_helper_vfp_negs(frd, frd);
3544 }
3545 fpst = get_fpstatus_ptr(0);
3546 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3547 cpu_F1s, frd, fpst);
3548 tcg_temp_free_ptr(fpst);
3549 tcg_temp_free_i32(frd);
3550 }
3551 break;
9ee6e8bb 3552 case 14: /* fconst */
d614a513
PM
3553 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3554 return 1;
3555 }
9ee6e8bb
PB
3556
3557 n = (insn << 12) & 0x80000000;
3558 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3559 if (dp) {
3560 if (i & 0x40)
3561 i |= 0x3f80;
3562 else
3563 i |= 0x4000;
3564 n |= i << 16;
4373f3ce 3565 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3566 } else {
3567 if (i & 0x40)
3568 i |= 0x780;
3569 else
3570 i |= 0x800;
3571 n |= i << 19;
5b340b51 3572 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3573 }
9ee6e8bb 3574 break;
b7bcbe95
FB
3575 case 15: /* extension space */
3576 switch (rn) {
3577 case 0: /* cpy */
3578 /* no-op */
3579 break;
3580 case 1: /* abs */
3581 gen_vfp_abs(dp);
3582 break;
3583 case 2: /* neg */
3584 gen_vfp_neg(dp);
3585 break;
3586 case 3: /* sqrt */
3587 gen_vfp_sqrt(dp);
3588 break;
239c20c7 3589 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3590 tmp = gen_vfp_mrs();
3591 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3592 if (dp) {
3593 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3594 cpu_env);
3595 } else {
3596 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3597 cpu_env);
3598 }
7d1b0095 3599 tcg_temp_free_i32(tmp);
60011498 3600 break;
239c20c7 3601 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3602 tmp = gen_vfp_mrs();
3603 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3604 if (dp) {
3605 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3606 cpu_env);
3607 } else {
3608 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3609 cpu_env);
3610 }
7d1b0095 3611 tcg_temp_free_i32(tmp);
60011498 3612 break;
239c20c7 3613 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3614 tmp = tcg_temp_new_i32();
239c20c7
WN
3615 if (dp) {
3616 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3617 cpu_env);
3618 } else {
3619 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3620 cpu_env);
3621 }
60011498
PB
3622 gen_mov_F0_vreg(0, rd);
3623 tmp2 = gen_vfp_mrs();
3624 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3625 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3626 tcg_temp_free_i32(tmp2);
60011498
PB
3627 gen_vfp_msr(tmp);
3628 break;
239c20c7 3629 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3630 tmp = tcg_temp_new_i32();
239c20c7
WN
3631 if (dp) {
3632 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3633 cpu_env);
3634 } else {
3635 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3636 cpu_env);
3637 }
60011498
PB
3638 tcg_gen_shli_i32(tmp, tmp, 16);
3639 gen_mov_F0_vreg(0, rd);
3640 tmp2 = gen_vfp_mrs();
3641 tcg_gen_ext16u_i32(tmp2, tmp2);
3642 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3643 tcg_temp_free_i32(tmp2);
60011498
PB
3644 gen_vfp_msr(tmp);
3645 break;
b7bcbe95
FB
3646 case 8: /* cmp */
3647 gen_vfp_cmp(dp);
3648 break;
3649 case 9: /* cmpe */
3650 gen_vfp_cmpe(dp);
3651 break;
3652 case 10: /* cmpz */
3653 gen_vfp_cmp(dp);
3654 break;
3655 case 11: /* cmpez */
3656 gen_vfp_F1_ld0(dp);
3657 gen_vfp_cmpe(dp);
3658 break;
664c6733
WN
3659 case 12: /* vrintr */
3660 {
3661 TCGv_ptr fpst = get_fpstatus_ptr(0);
3662 if (dp) {
3663 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3664 } else {
3665 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3666 }
3667 tcg_temp_free_ptr(fpst);
3668 break;
3669 }
a290c62a
WN
3670 case 13: /* vrintz */
3671 {
3672 TCGv_ptr fpst = get_fpstatus_ptr(0);
3673 TCGv_i32 tcg_rmode;
3674 tcg_rmode = tcg_const_i32(float_round_to_zero);
3675 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3676 if (dp) {
3677 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3678 } else {
3679 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3680 }
3681 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3682 tcg_temp_free_i32(tcg_rmode);
3683 tcg_temp_free_ptr(fpst);
3684 break;
3685 }
4e82bc01
WN
3686 case 14: /* vrintx */
3687 {
3688 TCGv_ptr fpst = get_fpstatus_ptr(0);
3689 if (dp) {
3690 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3691 } else {
3692 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3693 }
3694 tcg_temp_free_ptr(fpst);
3695 break;
3696 }
b7bcbe95
FB
3697 case 15: /* single<->double conversion */
3698 if (dp)
4373f3ce 3699 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3700 else
4373f3ce 3701 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3702 break;
3703 case 16: /* fuito */
5500b06c 3704 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3705 break;
3706 case 17: /* fsito */
5500b06c 3707 gen_vfp_sito(dp, 0);
b7bcbe95 3708 break;
9ee6e8bb 3709 case 20: /* fshto */
d614a513
PM
3710 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3711 return 1;
3712 }
5500b06c 3713 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3714 break;
3715 case 21: /* fslto */
d614a513
PM
3716 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3717 return 1;
3718 }
5500b06c 3719 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3720 break;
3721 case 22: /* fuhto */
d614a513
PM
3722 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3723 return 1;
3724 }
5500b06c 3725 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3726 break;
3727 case 23: /* fulto */
d614a513
PM
3728 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3729 return 1;
3730 }
5500b06c 3731 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3732 break;
b7bcbe95 3733 case 24: /* ftoui */
5500b06c 3734 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3735 break;
3736 case 25: /* ftouiz */
5500b06c 3737 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3738 break;
3739 case 26: /* ftosi */
5500b06c 3740 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3741 break;
3742 case 27: /* ftosiz */
5500b06c 3743 gen_vfp_tosiz(dp, 0);
b7bcbe95 3744 break;
9ee6e8bb 3745 case 28: /* ftosh */
d614a513
PM
3746 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3747 return 1;
3748 }
5500b06c 3749 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3750 break;
3751 case 29: /* ftosl */
d614a513
PM
3752 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3753 return 1;
3754 }
5500b06c 3755 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3756 break;
3757 case 30: /* ftouh */
d614a513
PM
3758 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3759 return 1;
3760 }
5500b06c 3761 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3762 break;
3763 case 31: /* ftoul */
d614a513
PM
3764 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3765 return 1;
3766 }
5500b06c 3767 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3768 break;
b7bcbe95 3769 default: /* undefined */
b7bcbe95
FB
3770 return 1;
3771 }
3772 break;
3773 default: /* undefined */
b7bcbe95
FB
3774 return 1;
3775 }
3776
3777 /* Write back the result. */
239c20c7
WN
3778 if (op == 15 && (rn >= 8 && rn <= 11)) {
3779 /* Comparison, do nothing. */
3780 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3781 (rn & 0x1e) == 0x6)) {
3782 /* VCVT double to int: always integer result.
3783 * VCVT double to half precision is always a single
3784 * precision result.
3785 */
b7bcbe95 3786 gen_mov_vreg_F0(0, rd);
239c20c7 3787 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3788 /* conversion */
3789 gen_mov_vreg_F0(!dp, rd);
239c20c7 3790 } else {
b7bcbe95 3791 gen_mov_vreg_F0(dp, rd);
239c20c7 3792 }
b7bcbe95
FB
3793
3794 /* break out of the loop if we have finished */
3795 if (veclen == 0)
3796 break;
3797
3798 if (op == 15 && delta_m == 0) {
3799 /* single source one-many */
3800 while (veclen--) {
3801 rd = ((rd + delta_d) & (bank_mask - 1))
3802 | (rd & bank_mask);
3803 gen_mov_vreg_F0(dp, rd);
3804 }
3805 break;
3806 }
3807 /* Setup the next operands. */
3808 veclen--;
3809 rd = ((rd + delta_d) & (bank_mask - 1))
3810 | (rd & bank_mask);
3811
3812 if (op == 15) {
3813 /* One source operand. */
3814 rm = ((rm + delta_m) & (bank_mask - 1))
3815 | (rm & bank_mask);
3816 gen_mov_F0_vreg(dp, rm);
3817 } else {
3818 /* Two source operands. */
3819 rn = ((rn + delta_d) & (bank_mask - 1))
3820 | (rn & bank_mask);
3821 gen_mov_F0_vreg(dp, rn);
3822 if (delta_m) {
3823 rm = ((rm + delta_m) & (bank_mask - 1))
3824 | (rm & bank_mask);
3825 gen_mov_F1_vreg(dp, rm);
3826 }
3827 }
3828 }
3829 }
3830 break;
3831 case 0xc:
3832 case 0xd:
8387da81 3833 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3834 /* two-register transfer */
3835 rn = (insn >> 16) & 0xf;
3836 rd = (insn >> 12) & 0xf;
3837 if (dp) {
9ee6e8bb
PB
3838 VFP_DREG_M(rm, insn);
3839 } else {
3840 rm = VFP_SREG_M(insn);
3841 }
b7bcbe95 3842
18c9b560 3843 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3844 /* vfp->arm */
3845 if (dp) {
4373f3ce
PB
3846 gen_mov_F0_vreg(0, rm * 2);
3847 tmp = gen_vfp_mrs();
3848 store_reg(s, rd, tmp);
3849 gen_mov_F0_vreg(0, rm * 2 + 1);
3850 tmp = gen_vfp_mrs();
3851 store_reg(s, rn, tmp);
b7bcbe95
FB
3852 } else {
3853 gen_mov_F0_vreg(0, rm);
4373f3ce 3854 tmp = gen_vfp_mrs();
8387da81 3855 store_reg(s, rd, tmp);
b7bcbe95 3856 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3857 tmp = gen_vfp_mrs();
8387da81 3858 store_reg(s, rn, tmp);
b7bcbe95
FB
3859 }
3860 } else {
3861 /* arm->vfp */
3862 if (dp) {
4373f3ce
PB
3863 tmp = load_reg(s, rd);
3864 gen_vfp_msr(tmp);
3865 gen_mov_vreg_F0(0, rm * 2);
3866 tmp = load_reg(s, rn);
3867 gen_vfp_msr(tmp);
3868 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3869 } else {
8387da81 3870 tmp = load_reg(s, rd);
4373f3ce 3871 gen_vfp_msr(tmp);
b7bcbe95 3872 gen_mov_vreg_F0(0, rm);
8387da81 3873 tmp = load_reg(s, rn);
4373f3ce 3874 gen_vfp_msr(tmp);
b7bcbe95
FB
3875 gen_mov_vreg_F0(0, rm + 1);
3876 }
3877 }
3878 } else {
3879 /* Load/store */
3880 rn = (insn >> 16) & 0xf;
3881 if (dp)
9ee6e8bb 3882 VFP_DREG_D(rd, insn);
b7bcbe95 3883 else
9ee6e8bb 3884 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3885 if ((insn & 0x01200000) == 0x01000000) {
3886 /* Single load/store */
3887 offset = (insn & 0xff) << 2;
3888 if ((insn & (1 << 23)) == 0)
3889 offset = -offset;
934814f1
PM
3890 if (s->thumb && rn == 15) {
3891 /* This is actually UNPREDICTABLE */
3892 addr = tcg_temp_new_i32();
3893 tcg_gen_movi_i32(addr, s->pc & ~2);
3894 } else {
3895 addr = load_reg(s, rn);
3896 }
312eea9f 3897 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3898 if (insn & (1 << 20)) {
312eea9f 3899 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3900 gen_mov_vreg_F0(dp, rd);
3901 } else {
3902 gen_mov_F0_vreg(dp, rd);
312eea9f 3903 gen_vfp_st(s, dp, addr);
b7bcbe95 3904 }
7d1b0095 3905 tcg_temp_free_i32(addr);
b7bcbe95
FB
3906 } else {
3907 /* load/store multiple */
934814f1 3908 int w = insn & (1 << 21);
b7bcbe95
FB
3909 if (dp)
3910 n = (insn >> 1) & 0x7f;
3911 else
3912 n = insn & 0xff;
3913
934814f1
PM
3914 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3915 /* P == U , W == 1 => UNDEF */
3916 return 1;
3917 }
3918 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3919 /* UNPREDICTABLE cases for bad immediates: we choose to
3920 * UNDEF to avoid generating huge numbers of TCG ops
3921 */
3922 return 1;
3923 }
3924 if (rn == 15 && w) {
3925 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3926 return 1;
3927 }
3928
3929 if (s->thumb && rn == 15) {
3930 /* This is actually UNPREDICTABLE */
3931 addr = tcg_temp_new_i32();
3932 tcg_gen_movi_i32(addr, s->pc & ~2);
3933 } else {
3934 addr = load_reg(s, rn);
3935 }
b7bcbe95 3936 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3937 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3938
3939 if (dp)
3940 offset = 8;
3941 else
3942 offset = 4;
3943 for (i = 0; i < n; i++) {
18c9b560 3944 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3945 /* load */
312eea9f 3946 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3947 gen_mov_vreg_F0(dp, rd + i);
3948 } else {
3949 /* store */
3950 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3951 gen_vfp_st(s, dp, addr);
b7bcbe95 3952 }
312eea9f 3953 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3954 }
934814f1 3955 if (w) {
b7bcbe95
FB
3956 /* writeback */
3957 if (insn & (1 << 24))
3958 offset = -offset * n;
3959 else if (dp && (insn & 1))
3960 offset = 4;
3961 else
3962 offset = 0;
3963
3964 if (offset != 0)
312eea9f
FN
3965 tcg_gen_addi_i32(addr, addr, offset);
3966 store_reg(s, rn, addr);
3967 } else {
7d1b0095 3968 tcg_temp_free_i32(addr);
b7bcbe95
FB
3969 }
3970 }
3971 }
3972 break;
3973 default:
3974 /* Should never happen. */
3975 return 1;
3976 }
3977 return 0;
3978}
3979
0a2461fa 3980static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3981{
6e256c93
FB
3982 TranslationBlock *tb;
3983
3984 tb = s->tb;
3985 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3986 tcg_gen_goto_tb(n);
eaed129d 3987 gen_set_pc_im(s, dest);
8cfd0495 3988 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3989 } else {
eaed129d 3990 gen_set_pc_im(s, dest);
57fec1fe 3991 tcg_gen_exit_tb(0);
6e256c93 3992 }
c53be334
FB
3993}
3994
8aaca4c0
FB
3995static inline void gen_jmp (DisasContext *s, uint32_t dest)
3996{
50225ad0 3997 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 3998 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3999 if (s->thumb)
d9ba4830
PB
4000 dest |= 1;
4001 gen_bx_im(s, dest);
8aaca4c0 4002 } else {
6e256c93 4003 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4004 s->is_jmp = DISAS_TB_JUMP;
4005 }
4006}
4007
39d5492a 4008static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4009{
ee097184 4010 if (x)
d9ba4830 4011 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4012 else
d9ba4830 4013 gen_sxth(t0);
ee097184 4014 if (y)
d9ba4830 4015 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4016 else
d9ba4830
PB
4017 gen_sxth(t1);
4018 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4019}
4020
4021/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4022static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4023{
b5ff1b31
FB
4024 uint32_t mask;
4025
4026 mask = 0;
4027 if (flags & (1 << 0))
4028 mask |= 0xff;
4029 if (flags & (1 << 1))
4030 mask |= 0xff00;
4031 if (flags & (1 << 2))
4032 mask |= 0xff0000;
4033 if (flags & (1 << 3))
4034 mask |= 0xff000000;
9ee6e8bb 4035
2ae23e75 4036 /* Mask out undefined bits. */
9ee6e8bb 4037 mask &= ~CPSR_RESERVED;
d614a513 4038 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4039 mask &= ~CPSR_T;
d614a513
PM
4040 }
4041 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4042 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4043 }
4044 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4045 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4046 }
4047 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4048 mask &= ~CPSR_IT;
d614a513 4049 }
4051e12c
PM
4050 /* Mask out execution state and reserved bits. */
4051 if (!spsr) {
4052 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4053 }
b5ff1b31
FB
4054 /* Mask out privileged bits. */
4055 if (IS_USER(s))
9ee6e8bb 4056 mask &= CPSR_USER;
b5ff1b31
FB
4057 return mask;
4058}
4059
2fbac54b 4060/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4061static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4062{
39d5492a 4063 TCGv_i32 tmp;
b5ff1b31
FB
4064 if (spsr) {
4065 /* ??? This is also undefined in system mode. */
4066 if (IS_USER(s))
4067 return 1;
d9ba4830
PB
4068
4069 tmp = load_cpu_field(spsr);
4070 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4071 tcg_gen_andi_i32(t0, t0, mask);
4072 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4073 store_cpu_field(tmp, spsr);
b5ff1b31 4074 } else {
2fbac54b 4075 gen_set_cpsr(t0, mask);
b5ff1b31 4076 }
7d1b0095 4077 tcg_temp_free_i32(t0);
b5ff1b31
FB
4078 gen_lookup_tb(s);
4079 return 0;
4080}
4081
2fbac54b
FN
4082/* Returns nonzero if access to the PSR is not permitted. */
4083static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4084{
39d5492a 4085 TCGv_i32 tmp;
7d1b0095 4086 tmp = tcg_temp_new_i32();
2fbac54b
FN
4087 tcg_gen_movi_i32(tmp, val);
4088 return gen_set_psr(s, mask, spsr, tmp);
4089}
4090
e9bb4aa9 4091/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4092static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4093{
39d5492a 4094 TCGv_i32 tmp;
e9bb4aa9 4095 store_reg(s, 15, pc);
d9ba4830 4096 tmp = load_cpu_field(spsr);
4051e12c 4097 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 4098 tcg_temp_free_i32(tmp);
577bf808 4099 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
4100}
4101
b0109805 4102/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4103static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4104{
4051e12c 4105 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
7d1b0095 4106 tcg_temp_free_i32(cpsr);
b0109805 4107 store_reg(s, 15, pc);
577bf808 4108 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4109}
3b46e624 4110
9ee6e8bb
PB
4111static void gen_nop_hint(DisasContext *s, int val)
4112{
4113 switch (val) {
c87e5a61
PM
4114 case 1: /* yield */
4115 gen_set_pc_im(s, s->pc);
4116 s->is_jmp = DISAS_YIELD;
4117 break;
9ee6e8bb 4118 case 3: /* wfi */
eaed129d 4119 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4120 s->is_jmp = DISAS_WFI;
4121 break;
4122 case 2: /* wfe */
72c1d3af
PM
4123 gen_set_pc_im(s, s->pc);
4124 s->is_jmp = DISAS_WFE;
4125 break;
9ee6e8bb 4126 case 4: /* sev */
12b10571
MR
4127 case 5: /* sevl */
4128 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4129 default: /* nop */
4130 break;
4131 }
4132}
99c475ab 4133
ad69471c 4134#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4135
39d5492a 4136static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4137{
4138 switch (size) {
dd8fbd78
FN
4139 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4140 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4141 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4142 default: abort();
9ee6e8bb 4143 }
9ee6e8bb
PB
4144}
4145
39d5492a 4146static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4147{
4148 switch (size) {
dd8fbd78
FN
4149 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4150 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4151 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4152 default: return;
4153 }
4154}
4155
4156/* 32-bit pairwise ops end up the same as the elementwise versions. */
4157#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4158#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4159#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4160#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4161
ad69471c
PB
4162#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4163 switch ((size << 1) | u) { \
4164 case 0: \
dd8fbd78 4165 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4166 break; \
4167 case 1: \
dd8fbd78 4168 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4169 break; \
4170 case 2: \
dd8fbd78 4171 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4172 break; \
4173 case 3: \
dd8fbd78 4174 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4175 break; \
4176 case 4: \
dd8fbd78 4177 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4178 break; \
4179 case 5: \
dd8fbd78 4180 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4181 break; \
4182 default: return 1; \
4183 }} while (0)
9ee6e8bb
PB
4184
4185#define GEN_NEON_INTEGER_OP(name) do { \
4186 switch ((size << 1) | u) { \
ad69471c 4187 case 0: \
dd8fbd78 4188 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4189 break; \
4190 case 1: \
dd8fbd78 4191 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4192 break; \
4193 case 2: \
dd8fbd78 4194 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4195 break; \
4196 case 3: \
dd8fbd78 4197 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4198 break; \
4199 case 4: \
dd8fbd78 4200 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4201 break; \
4202 case 5: \
dd8fbd78 4203 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4204 break; \
9ee6e8bb
PB
4205 default: return 1; \
4206 }} while (0)
4207
39d5492a 4208static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4209{
39d5492a 4210 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4211 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4212 return tmp;
9ee6e8bb
PB
4213}
4214
39d5492a 4215static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4216{
dd8fbd78 4217 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4218 tcg_temp_free_i32(var);
9ee6e8bb
PB
4219}
4220
39d5492a 4221static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4222{
39d5492a 4223 TCGv_i32 tmp;
9ee6e8bb 4224 if (size == 1) {
0fad6efc
PM
4225 tmp = neon_load_reg(reg & 7, reg >> 4);
4226 if (reg & 8) {
dd8fbd78 4227 gen_neon_dup_high16(tmp);
0fad6efc
PM
4228 } else {
4229 gen_neon_dup_low16(tmp);
dd8fbd78 4230 }
0fad6efc
PM
4231 } else {
4232 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4233 }
dd8fbd78 4234 return tmp;
9ee6e8bb
PB
4235}
4236
02acedf9 4237static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4238{
39d5492a 4239 TCGv_i32 tmp, tmp2;
600b828c 4240 if (!q && size == 2) {
02acedf9
PM
4241 return 1;
4242 }
4243 tmp = tcg_const_i32(rd);
4244 tmp2 = tcg_const_i32(rm);
4245 if (q) {
4246 switch (size) {
4247 case 0:
02da0b2d 4248 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4249 break;
4250 case 1:
02da0b2d 4251 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4252 break;
4253 case 2:
02da0b2d 4254 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4255 break;
4256 default:
4257 abort();
4258 }
4259 } else {
4260 switch (size) {
4261 case 0:
02da0b2d 4262 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4263 break;
4264 case 1:
02da0b2d 4265 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4266 break;
4267 default:
4268 abort();
4269 }
4270 }
4271 tcg_temp_free_i32(tmp);
4272 tcg_temp_free_i32(tmp2);
4273 return 0;
19457615
FN
4274}
4275
d68a6f3a 4276static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4277{
39d5492a 4278 TCGv_i32 tmp, tmp2;
600b828c 4279 if (!q && size == 2) {
d68a6f3a
PM
4280 return 1;
4281 }
4282 tmp = tcg_const_i32(rd);
4283 tmp2 = tcg_const_i32(rm);
4284 if (q) {
4285 switch (size) {
4286 case 0:
02da0b2d 4287 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4288 break;
4289 case 1:
02da0b2d 4290 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4291 break;
4292 case 2:
02da0b2d 4293 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4294 break;
4295 default:
4296 abort();
4297 }
4298 } else {
4299 switch (size) {
4300 case 0:
02da0b2d 4301 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4302 break;
4303 case 1:
02da0b2d 4304 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4305 break;
4306 default:
4307 abort();
4308 }
4309 }
4310 tcg_temp_free_i32(tmp);
4311 tcg_temp_free_i32(tmp2);
4312 return 0;
19457615
FN
4313}
4314
39d5492a 4315static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4316{
39d5492a 4317 TCGv_i32 rd, tmp;
19457615 4318
7d1b0095
PM
4319 rd = tcg_temp_new_i32();
4320 tmp = tcg_temp_new_i32();
19457615
FN
4321
4322 tcg_gen_shli_i32(rd, t0, 8);
4323 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4324 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4325 tcg_gen_or_i32(rd, rd, tmp);
4326
4327 tcg_gen_shri_i32(t1, t1, 8);
4328 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4329 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4330 tcg_gen_or_i32(t1, t1, tmp);
4331 tcg_gen_mov_i32(t0, rd);
4332
7d1b0095
PM
4333 tcg_temp_free_i32(tmp);
4334 tcg_temp_free_i32(rd);
19457615
FN
4335}
4336
39d5492a 4337static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4338{
39d5492a 4339 TCGv_i32 rd, tmp;
19457615 4340
7d1b0095
PM
4341 rd = tcg_temp_new_i32();
4342 tmp = tcg_temp_new_i32();
19457615
FN
4343
4344 tcg_gen_shli_i32(rd, t0, 16);
4345 tcg_gen_andi_i32(tmp, t1, 0xffff);
4346 tcg_gen_or_i32(rd, rd, tmp);
4347 tcg_gen_shri_i32(t1, t1, 16);
4348 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4349 tcg_gen_or_i32(t1, t1, tmp);
4350 tcg_gen_mov_i32(t0, rd);
4351
7d1b0095
PM
4352 tcg_temp_free_i32(tmp);
4353 tcg_temp_free_i32(rd);
19457615
FN
4354}
4355
4356
9ee6e8bb
PB
4357static struct {
4358 int nregs;
4359 int interleave;
4360 int spacing;
4361} neon_ls_element_type[11] = {
4362 {4, 4, 1},
4363 {4, 4, 2},
4364 {4, 1, 1},
4365 {4, 2, 1},
4366 {3, 3, 1},
4367 {3, 3, 2},
4368 {3, 1, 1},
4369 {1, 1, 1},
4370 {2, 2, 1},
4371 {2, 2, 2},
4372 {2, 1, 1}
4373};
4374
4375/* Translate a NEON load/store element instruction. Return nonzero if the
4376 instruction is invalid. */
7dcc1f89 4377static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4378{
4379 int rd, rn, rm;
4380 int op;
4381 int nregs;
4382 int interleave;
84496233 4383 int spacing;
9ee6e8bb
PB
4384 int stride;
4385 int size;
4386 int reg;
4387 int pass;
4388 int load;
4389 int shift;
9ee6e8bb 4390 int n;
39d5492a
PM
4391 TCGv_i32 addr;
4392 TCGv_i32 tmp;
4393 TCGv_i32 tmp2;
84496233 4394 TCGv_i64 tmp64;
9ee6e8bb 4395
2c7ffc41
PM
4396 /* FIXME: this access check should not take precedence over UNDEF
4397 * for invalid encodings; we will generate incorrect syndrome information
4398 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4399 */
9dbbc748 4400 if (s->fp_excp_el) {
2c7ffc41 4401 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 4402 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
4403 return 0;
4404 }
4405
5df8bac1 4406 if (!s->vfp_enabled)
9ee6e8bb
PB
4407 return 1;
4408 VFP_DREG_D(rd, insn);
4409 rn = (insn >> 16) & 0xf;
4410 rm = insn & 0xf;
4411 load = (insn & (1 << 21)) != 0;
4412 if ((insn & (1 << 23)) == 0) {
4413 /* Load store all elements. */
4414 op = (insn >> 8) & 0xf;
4415 size = (insn >> 6) & 3;
84496233 4416 if (op > 10)
9ee6e8bb 4417 return 1;
f2dd89d0
PM
4418 /* Catch UNDEF cases for bad values of align field */
4419 switch (op & 0xc) {
4420 case 4:
4421 if (((insn >> 5) & 1) == 1) {
4422 return 1;
4423 }
4424 break;
4425 case 8:
4426 if (((insn >> 4) & 3) == 3) {
4427 return 1;
4428 }
4429 break;
4430 default:
4431 break;
4432 }
9ee6e8bb
PB
4433 nregs = neon_ls_element_type[op].nregs;
4434 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4435 spacing = neon_ls_element_type[op].spacing;
4436 if (size == 3 && (interleave | spacing) != 1)
4437 return 1;
e318a60b 4438 addr = tcg_temp_new_i32();
dcc65026 4439 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4440 stride = (1 << size) * interleave;
4441 for (reg = 0; reg < nregs; reg++) {
4442 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4443 load_reg_var(s, addr, rn);
4444 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4445 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4446 load_reg_var(s, addr, rn);
4447 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4448 }
84496233 4449 if (size == 3) {
8ed1237d 4450 tmp64 = tcg_temp_new_i64();
84496233 4451 if (load) {
6ce2faf4 4452 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4453 neon_store_reg64(tmp64, rd);
84496233 4454 } else {
84496233 4455 neon_load_reg64(tmp64, rd);
6ce2faf4 4456 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4457 }
8ed1237d 4458 tcg_temp_free_i64(tmp64);
84496233
JR
4459 tcg_gen_addi_i32(addr, addr, stride);
4460 } else {
4461 for (pass = 0; pass < 2; pass++) {
4462 if (size == 2) {
4463 if (load) {
58ab8e96 4464 tmp = tcg_temp_new_i32();
6ce2faf4 4465 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4466 neon_store_reg(rd, pass, tmp);
4467 } else {
4468 tmp = neon_load_reg(rd, pass);
6ce2faf4 4469 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4470 tcg_temp_free_i32(tmp);
84496233 4471 }
1b2b1e54 4472 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4473 } else if (size == 1) {
4474 if (load) {
58ab8e96 4475 tmp = tcg_temp_new_i32();
6ce2faf4 4476 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4477 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4478 tmp2 = tcg_temp_new_i32();
6ce2faf4 4479 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4480 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4481 tcg_gen_shli_i32(tmp2, tmp2, 16);
4482 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4483 tcg_temp_free_i32(tmp2);
84496233
JR
4484 neon_store_reg(rd, pass, tmp);
4485 } else {
4486 tmp = neon_load_reg(rd, pass);
7d1b0095 4487 tmp2 = tcg_temp_new_i32();
84496233 4488 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4489 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4490 tcg_temp_free_i32(tmp);
84496233 4491 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4492 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4493 tcg_temp_free_i32(tmp2);
1b2b1e54 4494 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4495 }
84496233
JR
4496 } else /* size == 0 */ {
4497 if (load) {
39d5492a 4498 TCGV_UNUSED_I32(tmp2);
84496233 4499 for (n = 0; n < 4; n++) {
58ab8e96 4500 tmp = tcg_temp_new_i32();
6ce2faf4 4501 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4502 tcg_gen_addi_i32(addr, addr, stride);
4503 if (n == 0) {
4504 tmp2 = tmp;
4505 } else {
41ba8341
PB
4506 tcg_gen_shli_i32(tmp, tmp, n * 8);
4507 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4508 tcg_temp_free_i32(tmp);
84496233 4509 }
9ee6e8bb 4510 }
84496233
JR
4511 neon_store_reg(rd, pass, tmp2);
4512 } else {
4513 tmp2 = neon_load_reg(rd, pass);
4514 for (n = 0; n < 4; n++) {
7d1b0095 4515 tmp = tcg_temp_new_i32();
84496233
JR
4516 if (n == 0) {
4517 tcg_gen_mov_i32(tmp, tmp2);
4518 } else {
4519 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4520 }
6ce2faf4 4521 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4522 tcg_temp_free_i32(tmp);
84496233
JR
4523 tcg_gen_addi_i32(addr, addr, stride);
4524 }
7d1b0095 4525 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4526 }
4527 }
4528 }
4529 }
84496233 4530 rd += spacing;
9ee6e8bb 4531 }
e318a60b 4532 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4533 stride = nregs * 8;
4534 } else {
4535 size = (insn >> 10) & 3;
4536 if (size == 3) {
4537 /* Load single element to all lanes. */
8e18cde3
PM
4538 int a = (insn >> 4) & 1;
4539 if (!load) {
9ee6e8bb 4540 return 1;
8e18cde3 4541 }
9ee6e8bb
PB
4542 size = (insn >> 6) & 3;
4543 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4544
4545 if (size == 3) {
4546 if (nregs != 4 || a == 0) {
9ee6e8bb 4547 return 1;
99c475ab 4548 }
8e18cde3
PM
4549 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4550 size = 2;
4551 }
4552 if (nregs == 1 && a == 1 && size == 0) {
4553 return 1;
4554 }
4555 if (nregs == 3 && a == 1) {
4556 return 1;
4557 }
e318a60b 4558 addr = tcg_temp_new_i32();
8e18cde3
PM
4559 load_reg_var(s, addr, rn);
4560 if (nregs == 1) {
4561 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4562 tmp = gen_load_and_replicate(s, addr, size);
4563 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4564 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4565 if (insn & (1 << 5)) {
4566 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4567 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4568 }
4569 tcg_temp_free_i32(tmp);
4570 } else {
4571 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4572 stride = (insn & (1 << 5)) ? 2 : 1;
4573 for (reg = 0; reg < nregs; reg++) {
4574 tmp = gen_load_and_replicate(s, addr, size);
4575 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4576 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4577 tcg_temp_free_i32(tmp);
4578 tcg_gen_addi_i32(addr, addr, 1 << size);
4579 rd += stride;
4580 }
9ee6e8bb 4581 }
e318a60b 4582 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4583 stride = (1 << size) * nregs;
4584 } else {
4585 /* Single element. */
93262b16 4586 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4587 pass = (insn >> 7) & 1;
4588 switch (size) {
4589 case 0:
4590 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4591 stride = 1;
4592 break;
4593 case 1:
4594 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4595 stride = (insn & (1 << 5)) ? 2 : 1;
4596 break;
4597 case 2:
4598 shift = 0;
9ee6e8bb
PB
4599 stride = (insn & (1 << 6)) ? 2 : 1;
4600 break;
4601 default:
4602 abort();
4603 }
4604 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4605 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4606 switch (nregs) {
4607 case 1:
4608 if (((idx & (1 << size)) != 0) ||
4609 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4610 return 1;
4611 }
4612 break;
4613 case 3:
4614 if ((idx & 1) != 0) {
4615 return 1;
4616 }
4617 /* fall through */
4618 case 2:
4619 if (size == 2 && (idx & 2) != 0) {
4620 return 1;
4621 }
4622 break;
4623 case 4:
4624 if ((size == 2) && ((idx & 3) == 3)) {
4625 return 1;
4626 }
4627 break;
4628 default:
4629 abort();
4630 }
4631 if ((rd + stride * (nregs - 1)) > 31) {
4632 /* Attempts to write off the end of the register file
4633 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4634 * the neon_load_reg() would write off the end of the array.
4635 */
4636 return 1;
4637 }
e318a60b 4638 addr = tcg_temp_new_i32();
dcc65026 4639 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4640 for (reg = 0; reg < nregs; reg++) {
4641 if (load) {
58ab8e96 4642 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4643 switch (size) {
4644 case 0:
6ce2faf4 4645 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4646 break;
4647 case 1:
6ce2faf4 4648 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4649 break;
4650 case 2:
6ce2faf4 4651 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4652 break;
a50f5b91
PB
4653 default: /* Avoid compiler warnings. */
4654 abort();
9ee6e8bb
PB
4655 }
4656 if (size != 2) {
8f8e3aa4 4657 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4658 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4659 shift, size ? 16 : 8);
7d1b0095 4660 tcg_temp_free_i32(tmp2);
9ee6e8bb 4661 }
8f8e3aa4 4662 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4663 } else { /* Store */
8f8e3aa4
PB
4664 tmp = neon_load_reg(rd, pass);
4665 if (shift)
4666 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4667 switch (size) {
4668 case 0:
6ce2faf4 4669 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4670 break;
4671 case 1:
6ce2faf4 4672 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4673 break;
4674 case 2:
6ce2faf4 4675 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4676 break;
99c475ab 4677 }
58ab8e96 4678 tcg_temp_free_i32(tmp);
99c475ab 4679 }
9ee6e8bb 4680 rd += stride;
1b2b1e54 4681 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4682 }
e318a60b 4683 tcg_temp_free_i32(addr);
9ee6e8bb 4684 stride = nregs * (1 << size);
99c475ab 4685 }
9ee6e8bb
PB
4686 }
4687 if (rm != 15) {
39d5492a 4688 TCGv_i32 base;
b26eefb6
PB
4689
4690 base = load_reg(s, rn);
9ee6e8bb 4691 if (rm == 13) {
b26eefb6 4692 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4693 } else {
39d5492a 4694 TCGv_i32 index;
b26eefb6
PB
4695 index = load_reg(s, rm);
4696 tcg_gen_add_i32(base, base, index);
7d1b0095 4697 tcg_temp_free_i32(index);
9ee6e8bb 4698 }
b26eefb6 4699 store_reg(s, rn, base);
9ee6e8bb
PB
4700 }
4701 return 0;
4702}
3b46e624 4703
8f8e3aa4 4704/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4705static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4706{
4707 tcg_gen_and_i32(t, t, c);
f669df27 4708 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4709 tcg_gen_or_i32(dest, t, f);
4710}
4711
39d5492a 4712static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4713{
4714 switch (size) {
4715 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4716 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4717 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4718 default: abort();
4719 }
4720}
4721
39d5492a 4722static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4723{
4724 switch (size) {
02da0b2d
PM
4725 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4726 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4727 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4728 default: abort();
4729 }
4730}
4731
39d5492a 4732static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4733{
4734 switch (size) {
02da0b2d
PM
4735 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4736 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4737 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4738 default: abort();
4739 }
4740}
4741
39d5492a 4742static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4743{
4744 switch (size) {
02da0b2d
PM
4745 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4746 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4747 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4748 default: abort();
4749 }
4750}
4751
39d5492a 4752static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4753 int q, int u)
4754{
4755 if (q) {
4756 if (u) {
4757 switch (size) {
4758 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4759 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4760 default: abort();
4761 }
4762 } else {
4763 switch (size) {
4764 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4765 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4766 default: abort();
4767 }
4768 }
4769 } else {
4770 if (u) {
4771 switch (size) {
b408a9b0
CL
4772 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4773 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4774 default: abort();
4775 }
4776 } else {
4777 switch (size) {
4778 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4779 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4780 default: abort();
4781 }
4782 }
4783 }
4784}
4785
39d5492a 4786static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4787{
4788 if (u) {
4789 switch (size) {
4790 case 0: gen_helper_neon_widen_u8(dest, src); break;
4791 case 1: gen_helper_neon_widen_u16(dest, src); break;
4792 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4793 default: abort();
4794 }
4795 } else {
4796 switch (size) {
4797 case 0: gen_helper_neon_widen_s8(dest, src); break;
4798 case 1: gen_helper_neon_widen_s16(dest, src); break;
4799 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4800 default: abort();
4801 }
4802 }
7d1b0095 4803 tcg_temp_free_i32(src);
ad69471c
PB
4804}
4805
4806static inline void gen_neon_addl(int size)
4807{
4808 switch (size) {
4809 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4810 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4811 case 2: tcg_gen_add_i64(CPU_V001); break;
4812 default: abort();
4813 }
4814}
4815
4816static inline void gen_neon_subl(int size)
4817{
4818 switch (size) {
4819 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4820 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4821 case 2: tcg_gen_sub_i64(CPU_V001); break;
4822 default: abort();
4823 }
4824}
4825
a7812ae4 4826static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4827{
4828 switch (size) {
4829 case 0: gen_helper_neon_negl_u16(var, var); break;
4830 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4831 case 2:
4832 tcg_gen_neg_i64(var, var);
4833 break;
ad69471c
PB
4834 default: abort();
4835 }
4836}
4837
a7812ae4 4838static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4839{
4840 switch (size) {
02da0b2d
PM
4841 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4842 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4843 default: abort();
4844 }
4845}
4846
39d5492a
PM
4847static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4848 int size, int u)
ad69471c 4849{
a7812ae4 4850 TCGv_i64 tmp;
ad69471c
PB
4851
4852 switch ((size << 1) | u) {
4853 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4854 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4855 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4856 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4857 case 4:
4858 tmp = gen_muls_i64_i32(a, b);
4859 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4860 tcg_temp_free_i64(tmp);
ad69471c
PB
4861 break;
4862 case 5:
4863 tmp = gen_mulu_i64_i32(a, b);
4864 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4865 tcg_temp_free_i64(tmp);
ad69471c
PB
4866 break;
4867 default: abort();
4868 }
c6067f04
CL
4869
4870 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4871 Don't forget to clean them now. */
4872 if (size < 2) {
7d1b0095
PM
4873 tcg_temp_free_i32(a);
4874 tcg_temp_free_i32(b);
c6067f04 4875 }
ad69471c
PB
4876}
4877
39d5492a
PM
4878static void gen_neon_narrow_op(int op, int u, int size,
4879 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4880{
4881 if (op) {
4882 if (u) {
4883 gen_neon_unarrow_sats(size, dest, src);
4884 } else {
4885 gen_neon_narrow(size, dest, src);
4886 }
4887 } else {
4888 if (u) {
4889 gen_neon_narrow_satu(size, dest, src);
4890 } else {
4891 gen_neon_narrow_sats(size, dest, src);
4892 }
4893 }
4894}
4895
62698be3
PM
4896/* Symbolic constants for op fields for Neon 3-register same-length.
4897 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4898 * table A7-9.
4899 */
4900#define NEON_3R_VHADD 0
4901#define NEON_3R_VQADD 1
4902#define NEON_3R_VRHADD 2
4903#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4904#define NEON_3R_VHSUB 4
4905#define NEON_3R_VQSUB 5
4906#define NEON_3R_VCGT 6
4907#define NEON_3R_VCGE 7
4908#define NEON_3R_VSHL 8
4909#define NEON_3R_VQSHL 9
4910#define NEON_3R_VRSHL 10
4911#define NEON_3R_VQRSHL 11
4912#define NEON_3R_VMAX 12
4913#define NEON_3R_VMIN 13
4914#define NEON_3R_VABD 14
4915#define NEON_3R_VABA 15
4916#define NEON_3R_VADD_VSUB 16
4917#define NEON_3R_VTST_VCEQ 17
4918#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4919#define NEON_3R_VMUL 19
4920#define NEON_3R_VPMAX 20
4921#define NEON_3R_VPMIN 21
4922#define NEON_3R_VQDMULH_VQRDMULH 22
4923#define NEON_3R_VPADD 23
f1ecb913 4924#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4925#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4926#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4927#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4928#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4929#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4930#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4931#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4932
4933static const uint8_t neon_3r_sizes[] = {
4934 [NEON_3R_VHADD] = 0x7,
4935 [NEON_3R_VQADD] = 0xf,
4936 [NEON_3R_VRHADD] = 0x7,
4937 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4938 [NEON_3R_VHSUB] = 0x7,
4939 [NEON_3R_VQSUB] = 0xf,
4940 [NEON_3R_VCGT] = 0x7,
4941 [NEON_3R_VCGE] = 0x7,
4942 [NEON_3R_VSHL] = 0xf,
4943 [NEON_3R_VQSHL] = 0xf,
4944 [NEON_3R_VRSHL] = 0xf,
4945 [NEON_3R_VQRSHL] = 0xf,
4946 [NEON_3R_VMAX] = 0x7,
4947 [NEON_3R_VMIN] = 0x7,
4948 [NEON_3R_VABD] = 0x7,
4949 [NEON_3R_VABA] = 0x7,
4950 [NEON_3R_VADD_VSUB] = 0xf,
4951 [NEON_3R_VTST_VCEQ] = 0x7,
4952 [NEON_3R_VML] = 0x7,
4953 [NEON_3R_VMUL] = 0x7,
4954 [NEON_3R_VPMAX] = 0x7,
4955 [NEON_3R_VPMIN] = 0x7,
4956 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4957 [NEON_3R_VPADD] = 0x7,
f1ecb913 4958 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4959 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4960 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4961 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4962 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4963 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4964 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4965 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4966};
4967
600b828c
PM
4968/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4969 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4970 * table A7-13.
4971 */
4972#define NEON_2RM_VREV64 0
4973#define NEON_2RM_VREV32 1
4974#define NEON_2RM_VREV16 2
4975#define NEON_2RM_VPADDL 4
4976#define NEON_2RM_VPADDL_U 5
9d935509
AB
4977#define NEON_2RM_AESE 6 /* Includes AESD */
4978#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4979#define NEON_2RM_VCLS 8
4980#define NEON_2RM_VCLZ 9
4981#define NEON_2RM_VCNT 10
4982#define NEON_2RM_VMVN 11
4983#define NEON_2RM_VPADAL 12
4984#define NEON_2RM_VPADAL_U 13
4985#define NEON_2RM_VQABS 14
4986#define NEON_2RM_VQNEG 15
4987#define NEON_2RM_VCGT0 16
4988#define NEON_2RM_VCGE0 17
4989#define NEON_2RM_VCEQ0 18
4990#define NEON_2RM_VCLE0 19
4991#define NEON_2RM_VCLT0 20
f1ecb913 4992#define NEON_2RM_SHA1H 21
600b828c
PM
4993#define NEON_2RM_VABS 22
4994#define NEON_2RM_VNEG 23
4995#define NEON_2RM_VCGT0_F 24
4996#define NEON_2RM_VCGE0_F 25
4997#define NEON_2RM_VCEQ0_F 26
4998#define NEON_2RM_VCLE0_F 27
4999#define NEON_2RM_VCLT0_F 28
5000#define NEON_2RM_VABS_F 30
5001#define NEON_2RM_VNEG_F 31
5002#define NEON_2RM_VSWP 32
5003#define NEON_2RM_VTRN 33
5004#define NEON_2RM_VUZP 34
5005#define NEON_2RM_VZIP 35
5006#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5007#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5008#define NEON_2RM_VSHLL 38
f1ecb913 5009#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5010#define NEON_2RM_VRINTN 40
2ce70625 5011#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5012#define NEON_2RM_VRINTA 42
5013#define NEON_2RM_VRINTZ 43
600b828c 5014#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5015#define NEON_2RM_VRINTM 45
600b828c 5016#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5017#define NEON_2RM_VRINTP 47
901ad525
WN
5018#define NEON_2RM_VCVTAU 48
5019#define NEON_2RM_VCVTAS 49
5020#define NEON_2RM_VCVTNU 50
5021#define NEON_2RM_VCVTNS 51
5022#define NEON_2RM_VCVTPU 52
5023#define NEON_2RM_VCVTPS 53
5024#define NEON_2RM_VCVTMU 54
5025#define NEON_2RM_VCVTMS 55
600b828c
PM
5026#define NEON_2RM_VRECPE 56
5027#define NEON_2RM_VRSQRTE 57
5028#define NEON_2RM_VRECPE_F 58
5029#define NEON_2RM_VRSQRTE_F 59
5030#define NEON_2RM_VCVT_FS 60
5031#define NEON_2RM_VCVT_FU 61
5032#define NEON_2RM_VCVT_SF 62
5033#define NEON_2RM_VCVT_UF 63
5034
5035static int neon_2rm_is_float_op(int op)
5036{
5037 /* Return true if this neon 2reg-misc op is float-to-float */
5038 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5039 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5040 op == NEON_2RM_VRINTM ||
5041 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5042 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5043}
5044
5045/* Each entry in this array has bit n set if the insn allows
5046 * size value n (otherwise it will UNDEF). Since unallocated
5047 * op values will have no bits set they always UNDEF.
5048 */
5049static const uint8_t neon_2rm_sizes[] = {
5050 [NEON_2RM_VREV64] = 0x7,
5051 [NEON_2RM_VREV32] = 0x3,
5052 [NEON_2RM_VREV16] = 0x1,
5053 [NEON_2RM_VPADDL] = 0x7,
5054 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5055 [NEON_2RM_AESE] = 0x1,
5056 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5057 [NEON_2RM_VCLS] = 0x7,
5058 [NEON_2RM_VCLZ] = 0x7,
5059 [NEON_2RM_VCNT] = 0x1,
5060 [NEON_2RM_VMVN] = 0x1,
5061 [NEON_2RM_VPADAL] = 0x7,
5062 [NEON_2RM_VPADAL_U] = 0x7,
5063 [NEON_2RM_VQABS] = 0x7,
5064 [NEON_2RM_VQNEG] = 0x7,
5065 [NEON_2RM_VCGT0] = 0x7,
5066 [NEON_2RM_VCGE0] = 0x7,
5067 [NEON_2RM_VCEQ0] = 0x7,
5068 [NEON_2RM_VCLE0] = 0x7,
5069 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5070 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5071 [NEON_2RM_VABS] = 0x7,
5072 [NEON_2RM_VNEG] = 0x7,
5073 [NEON_2RM_VCGT0_F] = 0x4,
5074 [NEON_2RM_VCGE0_F] = 0x4,
5075 [NEON_2RM_VCEQ0_F] = 0x4,
5076 [NEON_2RM_VCLE0_F] = 0x4,
5077 [NEON_2RM_VCLT0_F] = 0x4,
5078 [NEON_2RM_VABS_F] = 0x4,
5079 [NEON_2RM_VNEG_F] = 0x4,
5080 [NEON_2RM_VSWP] = 0x1,
5081 [NEON_2RM_VTRN] = 0x7,
5082 [NEON_2RM_VUZP] = 0x7,
5083 [NEON_2RM_VZIP] = 0x7,
5084 [NEON_2RM_VMOVN] = 0x7,
5085 [NEON_2RM_VQMOVN] = 0x7,
5086 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5087 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5088 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5089 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5090 [NEON_2RM_VRINTA] = 0x4,
5091 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5092 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5093 [NEON_2RM_VRINTM] = 0x4,
600b828c 5094 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5095 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5096 [NEON_2RM_VCVTAU] = 0x4,
5097 [NEON_2RM_VCVTAS] = 0x4,
5098 [NEON_2RM_VCVTNU] = 0x4,
5099 [NEON_2RM_VCVTNS] = 0x4,
5100 [NEON_2RM_VCVTPU] = 0x4,
5101 [NEON_2RM_VCVTPS] = 0x4,
5102 [NEON_2RM_VCVTMU] = 0x4,
5103 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5104 [NEON_2RM_VRECPE] = 0x4,
5105 [NEON_2RM_VRSQRTE] = 0x4,
5106 [NEON_2RM_VRECPE_F] = 0x4,
5107 [NEON_2RM_VRSQRTE_F] = 0x4,
5108 [NEON_2RM_VCVT_FS] = 0x4,
5109 [NEON_2RM_VCVT_FU] = 0x4,
5110 [NEON_2RM_VCVT_SF] = 0x4,
5111 [NEON_2RM_VCVT_UF] = 0x4,
5112};
5113
9ee6e8bb
PB
5114/* Translate a NEON data processing instruction. Return nonzero if the
5115 instruction is invalid.
ad69471c
PB
5116 We process data in a mixture of 32-bit and 64-bit chunks.
5117 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5118
7dcc1f89 5119static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5120{
5121 int op;
5122 int q;
5123 int rd, rn, rm;
5124 int size;
5125 int shift;
5126 int pass;
5127 int count;
5128 int pairwise;
5129 int u;
ca9a32e4 5130 uint32_t imm, mask;
39d5492a 5131 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5132 TCGv_i64 tmp64;
9ee6e8bb 5133
2c7ffc41
PM
5134 /* FIXME: this access check should not take precedence over UNDEF
5135 * for invalid encodings; we will generate incorrect syndrome information
5136 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5137 */
9dbbc748 5138 if (s->fp_excp_el) {
2c7ffc41 5139 gen_exception_insn(s, 4, EXCP_UDEF,
9dbbc748 5140 syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
2c7ffc41
PM
5141 return 0;
5142 }
5143
5df8bac1 5144 if (!s->vfp_enabled)
9ee6e8bb
PB
5145 return 1;
5146 q = (insn & (1 << 6)) != 0;
5147 u = (insn >> 24) & 1;
5148 VFP_DREG_D(rd, insn);
5149 VFP_DREG_N(rn, insn);
5150 VFP_DREG_M(rm, insn);
5151 size = (insn >> 20) & 3;
5152 if ((insn & (1 << 23)) == 0) {
5153 /* Three register same length. */
5154 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5155 /* Catch invalid op and bad size combinations: UNDEF */
5156 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5157 return 1;
5158 }
25f84f79
PM
5159 /* All insns of this form UNDEF for either this condition or the
5160 * superset of cases "Q==1"; we catch the latter later.
5161 */
5162 if (q && ((rd | rn | rm) & 1)) {
5163 return 1;
5164 }
f1ecb913
AB
5165 /*
5166 * The SHA-1/SHA-256 3-register instructions require special treatment
5167 * here, as their size field is overloaded as an op type selector, and
5168 * they all consume their input in a single pass.
5169 */
5170 if (op == NEON_3R_SHA) {
5171 if (!q) {
5172 return 1;
5173 }
5174 if (!u) { /* SHA-1 */
d614a513 5175 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5176 return 1;
5177 }
5178 tmp = tcg_const_i32(rd);
5179 tmp2 = tcg_const_i32(rn);
5180 tmp3 = tcg_const_i32(rm);
5181 tmp4 = tcg_const_i32(size);
5182 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5183 tcg_temp_free_i32(tmp4);
5184 } else { /* SHA-256 */
d614a513 5185 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5186 return 1;
5187 }
5188 tmp = tcg_const_i32(rd);
5189 tmp2 = tcg_const_i32(rn);
5190 tmp3 = tcg_const_i32(rm);
5191 switch (size) {
5192 case 0:
5193 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5194 break;
5195 case 1:
5196 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5197 break;
5198 case 2:
5199 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5200 break;
5201 }
5202 }
5203 tcg_temp_free_i32(tmp);
5204 tcg_temp_free_i32(tmp2);
5205 tcg_temp_free_i32(tmp3);
5206 return 0;
5207 }
62698be3
PM
5208 if (size == 3 && op != NEON_3R_LOGIC) {
5209 /* 64-bit element instructions. */
9ee6e8bb 5210 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5211 neon_load_reg64(cpu_V0, rn + pass);
5212 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5213 switch (op) {
62698be3 5214 case NEON_3R_VQADD:
9ee6e8bb 5215 if (u) {
02da0b2d
PM
5216 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5217 cpu_V0, cpu_V1);
2c0262af 5218 } else {
02da0b2d
PM
5219 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5220 cpu_V0, cpu_V1);
2c0262af 5221 }
9ee6e8bb 5222 break;
62698be3 5223 case NEON_3R_VQSUB:
9ee6e8bb 5224 if (u) {
02da0b2d
PM
5225 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5226 cpu_V0, cpu_V1);
ad69471c 5227 } else {
02da0b2d
PM
5228 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5229 cpu_V0, cpu_V1);
ad69471c
PB
5230 }
5231 break;
62698be3 5232 case NEON_3R_VSHL:
ad69471c
PB
5233 if (u) {
5234 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5235 } else {
5236 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5237 }
5238 break;
62698be3 5239 case NEON_3R_VQSHL:
ad69471c 5240 if (u) {
02da0b2d
PM
5241 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5242 cpu_V1, cpu_V0);
ad69471c 5243 } else {
02da0b2d
PM
5244 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5245 cpu_V1, cpu_V0);
ad69471c
PB
5246 }
5247 break;
62698be3 5248 case NEON_3R_VRSHL:
ad69471c
PB
5249 if (u) {
5250 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5251 } else {
ad69471c
PB
5252 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5253 }
5254 break;
62698be3 5255 case NEON_3R_VQRSHL:
ad69471c 5256 if (u) {
02da0b2d
PM
5257 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5258 cpu_V1, cpu_V0);
ad69471c 5259 } else {
02da0b2d
PM
5260 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5261 cpu_V1, cpu_V0);
1e8d4eec 5262 }
9ee6e8bb 5263 break;
62698be3 5264 case NEON_3R_VADD_VSUB:
9ee6e8bb 5265 if (u) {
ad69471c 5266 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5267 } else {
ad69471c 5268 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5269 }
5270 break;
5271 default:
5272 abort();
2c0262af 5273 }
ad69471c 5274 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5275 }
9ee6e8bb 5276 return 0;
2c0262af 5277 }
25f84f79 5278 pairwise = 0;
9ee6e8bb 5279 switch (op) {
62698be3
PM
5280 case NEON_3R_VSHL:
5281 case NEON_3R_VQSHL:
5282 case NEON_3R_VRSHL:
5283 case NEON_3R_VQRSHL:
9ee6e8bb 5284 {
ad69471c
PB
5285 int rtmp;
5286 /* Shift instruction operands are reversed. */
5287 rtmp = rn;
9ee6e8bb 5288 rn = rm;
ad69471c 5289 rm = rtmp;
9ee6e8bb 5290 }
2c0262af 5291 break;
25f84f79
PM
5292 case NEON_3R_VPADD:
5293 if (u) {
5294 return 1;
5295 }
5296 /* Fall through */
62698be3
PM
5297 case NEON_3R_VPMAX:
5298 case NEON_3R_VPMIN:
9ee6e8bb 5299 pairwise = 1;
2c0262af 5300 break;
25f84f79
PM
5301 case NEON_3R_FLOAT_ARITH:
5302 pairwise = (u && size < 2); /* if VPADD (float) */
5303 break;
5304 case NEON_3R_FLOAT_MINMAX:
5305 pairwise = u; /* if VPMIN/VPMAX (float) */
5306 break;
5307 case NEON_3R_FLOAT_CMP:
5308 if (!u && size) {
5309 /* no encoding for U=0 C=1x */
5310 return 1;
5311 }
5312 break;
5313 case NEON_3R_FLOAT_ACMP:
5314 if (!u) {
5315 return 1;
5316 }
5317 break;
505935fc
WN
5318 case NEON_3R_FLOAT_MISC:
5319 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5320 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5321 return 1;
5322 }
2c0262af 5323 break;
25f84f79
PM
5324 case NEON_3R_VMUL:
5325 if (u && (size != 0)) {
5326 /* UNDEF on invalid size for polynomial subcase */
5327 return 1;
5328 }
2c0262af 5329 break;
da97f52c 5330 case NEON_3R_VFM:
d614a513 5331 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5332 return 1;
5333 }
5334 break;
9ee6e8bb 5335 default:
2c0262af 5336 break;
9ee6e8bb 5337 }
dd8fbd78 5338
25f84f79
PM
5339 if (pairwise && q) {
5340 /* All the pairwise insns UNDEF if Q is set */
5341 return 1;
5342 }
5343
9ee6e8bb
PB
5344 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5345
5346 if (pairwise) {
5347 /* Pairwise. */
a5a14945
JR
5348 if (pass < 1) {
5349 tmp = neon_load_reg(rn, 0);
5350 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5351 } else {
a5a14945
JR
5352 tmp = neon_load_reg(rm, 0);
5353 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5354 }
5355 } else {
5356 /* Elementwise. */
dd8fbd78
FN
5357 tmp = neon_load_reg(rn, pass);
5358 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5359 }
5360 switch (op) {
62698be3 5361 case NEON_3R_VHADD:
9ee6e8bb
PB
5362 GEN_NEON_INTEGER_OP(hadd);
5363 break;
62698be3 5364 case NEON_3R_VQADD:
02da0b2d 5365 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5366 break;
62698be3 5367 case NEON_3R_VRHADD:
9ee6e8bb 5368 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5369 break;
62698be3 5370 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5371 switch ((u << 2) | size) {
5372 case 0: /* VAND */
dd8fbd78 5373 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5374 break;
5375 case 1: /* BIC */
f669df27 5376 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5377 break;
5378 case 2: /* VORR */
dd8fbd78 5379 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5380 break;
5381 case 3: /* VORN */
f669df27 5382 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5383 break;
5384 case 4: /* VEOR */
dd8fbd78 5385 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5386 break;
5387 case 5: /* VBSL */
dd8fbd78
FN
5388 tmp3 = neon_load_reg(rd, pass);
5389 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5390 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5391 break;
5392 case 6: /* VBIT */
dd8fbd78
FN
5393 tmp3 = neon_load_reg(rd, pass);
5394 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5395 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5396 break;
5397 case 7: /* VBIF */
dd8fbd78
FN
5398 tmp3 = neon_load_reg(rd, pass);
5399 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5400 tcg_temp_free_i32(tmp3);
9ee6e8bb 5401 break;
2c0262af
FB
5402 }
5403 break;
62698be3 5404 case NEON_3R_VHSUB:
9ee6e8bb
PB
5405 GEN_NEON_INTEGER_OP(hsub);
5406 break;
62698be3 5407 case NEON_3R_VQSUB:
02da0b2d 5408 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5409 break;
62698be3 5410 case NEON_3R_VCGT:
9ee6e8bb
PB
5411 GEN_NEON_INTEGER_OP(cgt);
5412 break;
62698be3 5413 case NEON_3R_VCGE:
9ee6e8bb
PB
5414 GEN_NEON_INTEGER_OP(cge);
5415 break;
62698be3 5416 case NEON_3R_VSHL:
ad69471c 5417 GEN_NEON_INTEGER_OP(shl);
2c0262af 5418 break;
62698be3 5419 case NEON_3R_VQSHL:
02da0b2d 5420 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5421 break;
62698be3 5422 case NEON_3R_VRSHL:
ad69471c 5423 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5424 break;
62698be3 5425 case NEON_3R_VQRSHL:
02da0b2d 5426 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5427 break;
62698be3 5428 case NEON_3R_VMAX:
9ee6e8bb
PB
5429 GEN_NEON_INTEGER_OP(max);
5430 break;
62698be3 5431 case NEON_3R_VMIN:
9ee6e8bb
PB
5432 GEN_NEON_INTEGER_OP(min);
5433 break;
62698be3 5434 case NEON_3R_VABD:
9ee6e8bb
PB
5435 GEN_NEON_INTEGER_OP(abd);
5436 break;
62698be3 5437 case NEON_3R_VABA:
9ee6e8bb 5438 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5439 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5440 tmp2 = neon_load_reg(rd, pass);
5441 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5442 break;
62698be3 5443 case NEON_3R_VADD_VSUB:
9ee6e8bb 5444 if (!u) { /* VADD */
62698be3 5445 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5446 } else { /* VSUB */
5447 switch (size) {
dd8fbd78
FN
5448 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5449 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5450 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5451 default: abort();
9ee6e8bb
PB
5452 }
5453 }
5454 break;
62698be3 5455 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5456 if (!u) { /* VTST */
5457 switch (size) {
dd8fbd78
FN
5458 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5459 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5460 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5461 default: abort();
9ee6e8bb
PB
5462 }
5463 } else { /* VCEQ */
5464 switch (size) {
dd8fbd78
FN
5465 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5466 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5467 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5468 default: abort();
9ee6e8bb
PB
5469 }
5470 }
5471 break;
62698be3 5472 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5473 switch (size) {
dd8fbd78
FN
5474 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5475 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5476 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5477 default: abort();
9ee6e8bb 5478 }
7d1b0095 5479 tcg_temp_free_i32(tmp2);
dd8fbd78 5480 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5481 if (u) { /* VMLS */
dd8fbd78 5482 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5483 } else { /* VMLA */
dd8fbd78 5484 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5485 }
5486 break;
62698be3 5487 case NEON_3R_VMUL:
9ee6e8bb 5488 if (u) { /* polynomial */
dd8fbd78 5489 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5490 } else { /* Integer */
5491 switch (size) {
dd8fbd78
FN
5492 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5493 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5494 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5495 default: abort();
9ee6e8bb
PB
5496 }
5497 }
5498 break;
62698be3 5499 case NEON_3R_VPMAX:
9ee6e8bb
PB
5500 GEN_NEON_INTEGER_OP(pmax);
5501 break;
62698be3 5502 case NEON_3R_VPMIN:
9ee6e8bb
PB
5503 GEN_NEON_INTEGER_OP(pmin);
5504 break;
62698be3 5505 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5506 if (!u) { /* VQDMULH */
5507 switch (size) {
02da0b2d
PM
5508 case 1:
5509 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5510 break;
5511 case 2:
5512 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5513 break;
62698be3 5514 default: abort();
9ee6e8bb 5515 }
62698be3 5516 } else { /* VQRDMULH */
9ee6e8bb 5517 switch (size) {
02da0b2d
PM
5518 case 1:
5519 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5520 break;
5521 case 2:
5522 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5523 break;
62698be3 5524 default: abort();
9ee6e8bb
PB
5525 }
5526 }
5527 break;
62698be3 5528 case NEON_3R_VPADD:
9ee6e8bb 5529 switch (size) {
dd8fbd78
FN
5530 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5531 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5532 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5533 default: abort();
9ee6e8bb
PB
5534 }
5535 break;
62698be3 5536 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5537 {
5538 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5539 switch ((u << 2) | size) {
5540 case 0: /* VADD */
aa47cfdd
PM
5541 case 4: /* VPADD */
5542 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5543 break;
5544 case 2: /* VSUB */
aa47cfdd 5545 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5546 break;
5547 case 6: /* VABD */
aa47cfdd 5548 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5549 break;
5550 default:
62698be3 5551 abort();
9ee6e8bb 5552 }
aa47cfdd 5553 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5554 break;
aa47cfdd 5555 }
62698be3 5556 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5557 {
5558 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5559 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5560 if (!u) {
7d1b0095 5561 tcg_temp_free_i32(tmp2);
dd8fbd78 5562 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5563 if (size == 0) {
aa47cfdd 5564 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5565 } else {
aa47cfdd 5566 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5567 }
5568 }
aa47cfdd 5569 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5570 break;
aa47cfdd 5571 }
62698be3 5572 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5573 {
5574 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5575 if (!u) {
aa47cfdd 5576 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5577 } else {
aa47cfdd
PM
5578 if (size == 0) {
5579 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5580 } else {
5581 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5582 }
b5ff1b31 5583 }
aa47cfdd 5584 tcg_temp_free_ptr(fpstatus);
2c0262af 5585 break;
aa47cfdd 5586 }
62698be3 5587 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5588 {
5589 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5590 if (size == 0) {
5591 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5592 } else {
5593 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5594 }
5595 tcg_temp_free_ptr(fpstatus);
2c0262af 5596 break;
aa47cfdd 5597 }
62698be3 5598 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5599 {
5600 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5601 if (size == 0) {
f71a2ae5 5602 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5603 } else {
f71a2ae5 5604 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5605 }
5606 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5607 break;
aa47cfdd 5608 }
505935fc
WN
5609 case NEON_3R_FLOAT_MISC:
5610 if (u) {
5611 /* VMAXNM/VMINNM */
5612 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5613 if (size == 0) {
f71a2ae5 5614 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5615 } else {
f71a2ae5 5616 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5617 }
5618 tcg_temp_free_ptr(fpstatus);
5619 } else {
5620 if (size == 0) {
5621 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5622 } else {
5623 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5624 }
5625 }
2c0262af 5626 break;
da97f52c
PM
5627 case NEON_3R_VFM:
5628 {
5629 /* VFMA, VFMS: fused multiply-add */
5630 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5631 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5632 if (size) {
5633 /* VFMS */
5634 gen_helper_vfp_negs(tmp, tmp);
5635 }
5636 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5637 tcg_temp_free_i32(tmp3);
5638 tcg_temp_free_ptr(fpstatus);
5639 break;
5640 }
9ee6e8bb
PB
5641 default:
5642 abort();
2c0262af 5643 }
7d1b0095 5644 tcg_temp_free_i32(tmp2);
dd8fbd78 5645
9ee6e8bb
PB
5646 /* Save the result. For elementwise operations we can put it
5647 straight into the destination register. For pairwise operations
5648 we have to be careful to avoid clobbering the source operands. */
5649 if (pairwise && rd == rm) {
dd8fbd78 5650 neon_store_scratch(pass, tmp);
9ee6e8bb 5651 } else {
dd8fbd78 5652 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5653 }
5654
5655 } /* for pass */
5656 if (pairwise && rd == rm) {
5657 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5658 tmp = neon_load_scratch(pass);
5659 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5660 }
5661 }
ad69471c 5662 /* End of 3 register same size operations. */
9ee6e8bb
PB
5663 } else if (insn & (1 << 4)) {
5664 if ((insn & 0x00380080) != 0) {
5665 /* Two registers and shift. */
5666 op = (insn >> 8) & 0xf;
5667 if (insn & (1 << 7)) {
cc13115b
PM
5668 /* 64-bit shift. */
5669 if (op > 7) {
5670 return 1;
5671 }
9ee6e8bb
PB
5672 size = 3;
5673 } else {
5674 size = 2;
5675 while ((insn & (1 << (size + 19))) == 0)
5676 size--;
5677 }
5678 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5679 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5680 by immediate using the variable shift operations. */
5681 if (op < 8) {
5682 /* Shift by immediate:
5683 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5684 if (q && ((rd | rm) & 1)) {
5685 return 1;
5686 }
5687 if (!u && (op == 4 || op == 6)) {
5688 return 1;
5689 }
9ee6e8bb
PB
5690 /* Right shifts are encoded as N - shift, where N is the
5691 element size in bits. */
5692 if (op <= 4)
5693 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5694 if (size == 3) {
5695 count = q + 1;
5696 } else {
5697 count = q ? 4: 2;
5698 }
5699 switch (size) {
5700 case 0:
5701 imm = (uint8_t) shift;
5702 imm |= imm << 8;
5703 imm |= imm << 16;
5704 break;
5705 case 1:
5706 imm = (uint16_t) shift;
5707 imm |= imm << 16;
5708 break;
5709 case 2:
5710 case 3:
5711 imm = shift;
5712 break;
5713 default:
5714 abort();
5715 }
5716
5717 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5718 if (size == 3) {
5719 neon_load_reg64(cpu_V0, rm + pass);
5720 tcg_gen_movi_i64(cpu_V1, imm);
5721 switch (op) {
5722 case 0: /* VSHR */
5723 case 1: /* VSRA */
5724 if (u)
5725 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5726 else
ad69471c 5727 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5728 break;
ad69471c
PB
5729 case 2: /* VRSHR */
5730 case 3: /* VRSRA */
5731 if (u)
5732 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5733 else
ad69471c 5734 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5735 break;
ad69471c 5736 case 4: /* VSRI */
ad69471c
PB
5737 case 5: /* VSHL, VSLI */
5738 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5739 break;
0322b26e 5740 case 6: /* VQSHLU */
02da0b2d
PM
5741 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5742 cpu_V0, cpu_V1);
ad69471c 5743 break;
0322b26e
PM
5744 case 7: /* VQSHL */
5745 if (u) {
02da0b2d 5746 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5747 cpu_V0, cpu_V1);
5748 } else {
02da0b2d 5749 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5750 cpu_V0, cpu_V1);
5751 }
9ee6e8bb 5752 break;
9ee6e8bb 5753 }
ad69471c
PB
5754 if (op == 1 || op == 3) {
5755 /* Accumulate. */
5371cb81 5756 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5757 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5758 } else if (op == 4 || (op == 5 && u)) {
5759 /* Insert */
923e6509
CL
5760 neon_load_reg64(cpu_V1, rd + pass);
5761 uint64_t mask;
5762 if (shift < -63 || shift > 63) {
5763 mask = 0;
5764 } else {
5765 if (op == 4) {
5766 mask = 0xffffffffffffffffull >> -shift;
5767 } else {
5768 mask = 0xffffffffffffffffull << shift;
5769 }
5770 }
5771 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5772 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5773 }
5774 neon_store_reg64(cpu_V0, rd + pass);
5775 } else { /* size < 3 */
5776 /* Operands in T0 and T1. */
dd8fbd78 5777 tmp = neon_load_reg(rm, pass);
7d1b0095 5778 tmp2 = tcg_temp_new_i32();
dd8fbd78 5779 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5780 switch (op) {
5781 case 0: /* VSHR */
5782 case 1: /* VSRA */
5783 GEN_NEON_INTEGER_OP(shl);
5784 break;
5785 case 2: /* VRSHR */
5786 case 3: /* VRSRA */
5787 GEN_NEON_INTEGER_OP(rshl);
5788 break;
5789 case 4: /* VSRI */
ad69471c
PB
5790 case 5: /* VSHL, VSLI */
5791 switch (size) {
dd8fbd78
FN
5792 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5793 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5794 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5795 default: abort();
ad69471c
PB
5796 }
5797 break;
0322b26e 5798 case 6: /* VQSHLU */
ad69471c 5799 switch (size) {
0322b26e 5800 case 0:
02da0b2d
PM
5801 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5802 tmp, tmp2);
0322b26e
PM
5803 break;
5804 case 1:
02da0b2d
PM
5805 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5806 tmp, tmp2);
0322b26e
PM
5807 break;
5808 case 2:
02da0b2d
PM
5809 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5810 tmp, tmp2);
0322b26e
PM
5811 break;
5812 default:
cc13115b 5813 abort();
ad69471c
PB
5814 }
5815 break;
0322b26e 5816 case 7: /* VQSHL */
02da0b2d 5817 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5818 break;
ad69471c 5819 }
7d1b0095 5820 tcg_temp_free_i32(tmp2);
ad69471c
PB
5821
5822 if (op == 1 || op == 3) {
5823 /* Accumulate. */
dd8fbd78 5824 tmp2 = neon_load_reg(rd, pass);
5371cb81 5825 gen_neon_add(size, tmp, tmp2);
7d1b0095 5826 tcg_temp_free_i32(tmp2);
ad69471c
PB
5827 } else if (op == 4 || (op == 5 && u)) {
5828 /* Insert */
5829 switch (size) {
5830 case 0:
5831 if (op == 4)
ca9a32e4 5832 mask = 0xff >> -shift;
ad69471c 5833 else
ca9a32e4
JR
5834 mask = (uint8_t)(0xff << shift);
5835 mask |= mask << 8;
5836 mask |= mask << 16;
ad69471c
PB
5837 break;
5838 case 1:
5839 if (op == 4)
ca9a32e4 5840 mask = 0xffff >> -shift;
ad69471c 5841 else
ca9a32e4
JR
5842 mask = (uint16_t)(0xffff << shift);
5843 mask |= mask << 16;
ad69471c
PB
5844 break;
5845 case 2:
ca9a32e4
JR
5846 if (shift < -31 || shift > 31) {
5847 mask = 0;
5848 } else {
5849 if (op == 4)
5850 mask = 0xffffffffu >> -shift;
5851 else
5852 mask = 0xffffffffu << shift;
5853 }
ad69471c
PB
5854 break;
5855 default:
5856 abort();
5857 }
dd8fbd78 5858 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5859 tcg_gen_andi_i32(tmp, tmp, mask);
5860 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5861 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5862 tcg_temp_free_i32(tmp2);
ad69471c 5863 }
dd8fbd78 5864 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5865 }
5866 } /* for pass */
5867 } else if (op < 10) {
ad69471c 5868 /* Shift by immediate and narrow:
9ee6e8bb 5869 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5870 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5871 if (rm & 1) {
5872 return 1;
5873 }
9ee6e8bb
PB
5874 shift = shift - (1 << (size + 3));
5875 size++;
92cdfaeb 5876 if (size == 3) {
a7812ae4 5877 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5878 neon_load_reg64(cpu_V0, rm);
5879 neon_load_reg64(cpu_V1, rm + 1);
5880 for (pass = 0; pass < 2; pass++) {
5881 TCGv_i64 in;
5882 if (pass == 0) {
5883 in = cpu_V0;
5884 } else {
5885 in = cpu_V1;
5886 }
ad69471c 5887 if (q) {
0b36f4cd 5888 if (input_unsigned) {
92cdfaeb 5889 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5890 } else {
92cdfaeb 5891 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5892 }
ad69471c 5893 } else {
0b36f4cd 5894 if (input_unsigned) {
92cdfaeb 5895 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5896 } else {
92cdfaeb 5897 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5898 }
ad69471c 5899 }
7d1b0095 5900 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5901 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5902 neon_store_reg(rd, pass, tmp);
5903 } /* for pass */
5904 tcg_temp_free_i64(tmp64);
5905 } else {
5906 if (size == 1) {
5907 imm = (uint16_t)shift;
5908 imm |= imm << 16;
2c0262af 5909 } else {
92cdfaeb
PM
5910 /* size == 2 */
5911 imm = (uint32_t)shift;
5912 }
5913 tmp2 = tcg_const_i32(imm);
5914 tmp4 = neon_load_reg(rm + 1, 0);
5915 tmp5 = neon_load_reg(rm + 1, 1);
5916 for (pass = 0; pass < 2; pass++) {
5917 if (pass == 0) {
5918 tmp = neon_load_reg(rm, 0);
5919 } else {
5920 tmp = tmp4;
5921 }
0b36f4cd
CL
5922 gen_neon_shift_narrow(size, tmp, tmp2, q,
5923 input_unsigned);
92cdfaeb
PM
5924 if (pass == 0) {
5925 tmp3 = neon_load_reg(rm, 1);
5926 } else {
5927 tmp3 = tmp5;
5928 }
0b36f4cd
CL
5929 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5930 input_unsigned);
36aa55dc 5931 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5932 tcg_temp_free_i32(tmp);
5933 tcg_temp_free_i32(tmp3);
5934 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5935 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5936 neon_store_reg(rd, pass, tmp);
5937 } /* for pass */
c6067f04 5938 tcg_temp_free_i32(tmp2);
b75263d6 5939 }
9ee6e8bb 5940 } else if (op == 10) {
cc13115b
PM
5941 /* VSHLL, VMOVL */
5942 if (q || (rd & 1)) {
9ee6e8bb 5943 return 1;
cc13115b 5944 }
ad69471c
PB
5945 tmp = neon_load_reg(rm, 0);
5946 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5947 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5948 if (pass == 1)
5949 tmp = tmp2;
5950
5951 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5952
9ee6e8bb
PB
5953 if (shift != 0) {
5954 /* The shift is less than the width of the source
ad69471c
PB
5955 type, so we can just shift the whole register. */
5956 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5957 /* Widen the result of shift: we need to clear
5958 * the potential overflow bits resulting from
5959 * left bits of the narrow input appearing as
5960 * right bits of left the neighbour narrow
5961 * input. */
ad69471c
PB
5962 if (size < 2 || !u) {
5963 uint64_t imm64;
5964 if (size == 0) {
5965 imm = (0xffu >> (8 - shift));
5966 imm |= imm << 16;
acdf01ef 5967 } else if (size == 1) {
ad69471c 5968 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5969 } else {
5970 /* size == 2 */
5971 imm = 0xffffffff >> (32 - shift);
5972 }
5973 if (size < 2) {
5974 imm64 = imm | (((uint64_t)imm) << 32);
5975 } else {
5976 imm64 = imm;
9ee6e8bb 5977 }
acdf01ef 5978 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5979 }
5980 }
ad69471c 5981 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5982 }
f73534a5 5983 } else if (op >= 14) {
9ee6e8bb 5984 /* VCVT fixed-point. */
cc13115b
PM
5985 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5986 return 1;
5987 }
f73534a5
PM
5988 /* We have already masked out the must-be-1 top bit of imm6,
5989 * hence this 32-shift where the ARM ARM has 64-imm6.
5990 */
5991 shift = 32 - shift;
9ee6e8bb 5992 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5993 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5994 if (!(op & 1)) {
9ee6e8bb 5995 if (u)
5500b06c 5996 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5997 else
5500b06c 5998 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5999 } else {
6000 if (u)
5500b06c 6001 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6002 else
5500b06c 6003 gen_vfp_tosl(0, shift, 1);
2c0262af 6004 }
4373f3ce 6005 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6006 }
6007 } else {
9ee6e8bb
PB
6008 return 1;
6009 }
6010 } else { /* (insn & 0x00380080) == 0 */
6011 int invert;
7d80fee5
PM
6012 if (q && (rd & 1)) {
6013 return 1;
6014 }
9ee6e8bb
PB
6015
6016 op = (insn >> 8) & 0xf;
6017 /* One register and immediate. */
6018 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6019 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6020 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6021 * We choose to not special-case this and will behave as if a
6022 * valid constant encoding of 0 had been given.
6023 */
9ee6e8bb
PB
6024 switch (op) {
6025 case 0: case 1:
6026 /* no-op */
6027 break;
6028 case 2: case 3:
6029 imm <<= 8;
6030 break;
6031 case 4: case 5:
6032 imm <<= 16;
6033 break;
6034 case 6: case 7:
6035 imm <<= 24;
6036 break;
6037 case 8: case 9:
6038 imm |= imm << 16;
6039 break;
6040 case 10: case 11:
6041 imm = (imm << 8) | (imm << 24);
6042 break;
6043 case 12:
8e31209e 6044 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6045 break;
6046 case 13:
6047 imm = (imm << 16) | 0xffff;
6048 break;
6049 case 14:
6050 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6051 if (invert)
6052 imm = ~imm;
6053 break;
6054 case 15:
7d80fee5
PM
6055 if (invert) {
6056 return 1;
6057 }
9ee6e8bb
PB
6058 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6059 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6060 break;
6061 }
6062 if (invert)
6063 imm = ~imm;
6064
9ee6e8bb
PB
6065 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6066 if (op & 1 && op < 12) {
ad69471c 6067 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6068 if (invert) {
6069 /* The immediate value has already been inverted, so
6070 BIC becomes AND. */
ad69471c 6071 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6072 } else {
ad69471c 6073 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6074 }
9ee6e8bb 6075 } else {
ad69471c 6076 /* VMOV, VMVN. */
7d1b0095 6077 tmp = tcg_temp_new_i32();
9ee6e8bb 6078 if (op == 14 && invert) {
a5a14945 6079 int n;
ad69471c
PB
6080 uint32_t val;
6081 val = 0;
9ee6e8bb
PB
6082 for (n = 0; n < 4; n++) {
6083 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6084 val |= 0xff << (n * 8);
9ee6e8bb 6085 }
ad69471c
PB
6086 tcg_gen_movi_i32(tmp, val);
6087 } else {
6088 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6089 }
9ee6e8bb 6090 }
ad69471c 6091 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6092 }
6093 }
e4b3861d 6094 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6095 if (size != 3) {
6096 op = (insn >> 8) & 0xf;
6097 if ((insn & (1 << 6)) == 0) {
6098 /* Three registers of different lengths. */
6099 int src1_wide;
6100 int src2_wide;
6101 int prewiden;
526d0096
PM
6102 /* undefreq: bit 0 : UNDEF if size == 0
6103 * bit 1 : UNDEF if size == 1
6104 * bit 2 : UNDEF if size == 2
6105 * bit 3 : UNDEF if U == 1
6106 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6107 */
6108 int undefreq;
6109 /* prewiden, src1_wide, src2_wide, undefreq */
6110 static const int neon_3reg_wide[16][4] = {
6111 {1, 0, 0, 0}, /* VADDL */
6112 {1, 1, 0, 0}, /* VADDW */
6113 {1, 0, 0, 0}, /* VSUBL */
6114 {1, 1, 0, 0}, /* VSUBW */
6115 {0, 1, 1, 0}, /* VADDHN */
6116 {0, 0, 0, 0}, /* VABAL */
6117 {0, 1, 1, 0}, /* VSUBHN */
6118 {0, 0, 0, 0}, /* VABDL */
6119 {0, 0, 0, 0}, /* VMLAL */
526d0096 6120 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6121 {0, 0, 0, 0}, /* VMLSL */
526d0096 6122 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6123 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6124 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6125 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6126 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6127 };
6128
6129 prewiden = neon_3reg_wide[op][0];
6130 src1_wide = neon_3reg_wide[op][1];
6131 src2_wide = neon_3reg_wide[op][2];
695272dc 6132 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6133
526d0096
PM
6134 if ((undefreq & (1 << size)) ||
6135 ((undefreq & 8) && u)) {
695272dc
PM
6136 return 1;
6137 }
6138 if ((src1_wide && (rn & 1)) ||
6139 (src2_wide && (rm & 1)) ||
6140 (!src2_wide && (rd & 1))) {
ad69471c 6141 return 1;
695272dc 6142 }
ad69471c 6143
4e624eda
PM
6144 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6145 * outside the loop below as it only performs a single pass.
6146 */
6147 if (op == 14 && size == 2) {
6148 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6149
d614a513 6150 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6151 return 1;
6152 }
6153 tcg_rn = tcg_temp_new_i64();
6154 tcg_rm = tcg_temp_new_i64();
6155 tcg_rd = tcg_temp_new_i64();
6156 neon_load_reg64(tcg_rn, rn);
6157 neon_load_reg64(tcg_rm, rm);
6158 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6159 neon_store_reg64(tcg_rd, rd);
6160 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6161 neon_store_reg64(tcg_rd, rd + 1);
6162 tcg_temp_free_i64(tcg_rn);
6163 tcg_temp_free_i64(tcg_rm);
6164 tcg_temp_free_i64(tcg_rd);
6165 return 0;
6166 }
6167
9ee6e8bb
PB
6168 /* Avoid overlapping operands. Wide source operands are
6169 always aligned so will never overlap with wide
6170 destinations in problematic ways. */
8f8e3aa4 6171 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6172 tmp = neon_load_reg(rm, 1);
6173 neon_store_scratch(2, tmp);
8f8e3aa4 6174 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6175 tmp = neon_load_reg(rn, 1);
6176 neon_store_scratch(2, tmp);
9ee6e8bb 6177 }
39d5492a 6178 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6179 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6180 if (src1_wide) {
6181 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6182 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6183 } else {
ad69471c 6184 if (pass == 1 && rd == rn) {
dd8fbd78 6185 tmp = neon_load_scratch(2);
9ee6e8bb 6186 } else {
ad69471c
PB
6187 tmp = neon_load_reg(rn, pass);
6188 }
6189 if (prewiden) {
6190 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6191 }
6192 }
ad69471c
PB
6193 if (src2_wide) {
6194 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6195 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6196 } else {
ad69471c 6197 if (pass == 1 && rd == rm) {
dd8fbd78 6198 tmp2 = neon_load_scratch(2);
9ee6e8bb 6199 } else {
ad69471c
PB
6200 tmp2 = neon_load_reg(rm, pass);
6201 }
6202 if (prewiden) {
6203 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6204 }
9ee6e8bb
PB
6205 }
6206 switch (op) {
6207 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6208 gen_neon_addl(size);
9ee6e8bb 6209 break;
79b0e534 6210 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6211 gen_neon_subl(size);
9ee6e8bb
PB
6212 break;
6213 case 5: case 7: /* VABAL, VABDL */
6214 switch ((size << 1) | u) {
ad69471c
PB
6215 case 0:
6216 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6217 break;
6218 case 1:
6219 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6220 break;
6221 case 2:
6222 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6223 break;
6224 case 3:
6225 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6226 break;
6227 case 4:
6228 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6229 break;
6230 case 5:
6231 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6232 break;
9ee6e8bb
PB
6233 default: abort();
6234 }
7d1b0095
PM
6235 tcg_temp_free_i32(tmp2);
6236 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6237 break;
6238 case 8: case 9: case 10: case 11: case 12: case 13:
6239 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6240 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6241 break;
6242 case 14: /* Polynomial VMULL */
e5ca24cb 6243 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6244 tcg_temp_free_i32(tmp2);
6245 tcg_temp_free_i32(tmp);
e5ca24cb 6246 break;
695272dc
PM
6247 default: /* 15 is RESERVED: caught earlier */
6248 abort();
9ee6e8bb 6249 }
ebcd88ce
PM
6250 if (op == 13) {
6251 /* VQDMULL */
6252 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6253 neon_store_reg64(cpu_V0, rd + pass);
6254 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6255 /* Accumulate. */
ebcd88ce 6256 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6257 switch (op) {
4dc064e6
PM
6258 case 10: /* VMLSL */
6259 gen_neon_negl(cpu_V0, size);
6260 /* Fall through */
6261 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6262 gen_neon_addl(size);
9ee6e8bb
PB
6263 break;
6264 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6265 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6266 if (op == 11) {
6267 gen_neon_negl(cpu_V0, size);
6268 }
ad69471c
PB
6269 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6270 break;
9ee6e8bb
PB
6271 default:
6272 abort();
6273 }
ad69471c 6274 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6275 } else if (op == 4 || op == 6) {
6276 /* Narrowing operation. */
7d1b0095 6277 tmp = tcg_temp_new_i32();
79b0e534 6278 if (!u) {
9ee6e8bb 6279 switch (size) {
ad69471c
PB
6280 case 0:
6281 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6282 break;
6283 case 1:
6284 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6285 break;
6286 case 2:
6287 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6288 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6289 break;
9ee6e8bb
PB
6290 default: abort();
6291 }
6292 } else {
6293 switch (size) {
ad69471c
PB
6294 case 0:
6295 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6296 break;
6297 case 1:
6298 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6299 break;
6300 case 2:
6301 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6302 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6303 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6304 break;
9ee6e8bb
PB
6305 default: abort();
6306 }
6307 }
ad69471c
PB
6308 if (pass == 0) {
6309 tmp3 = tmp;
6310 } else {
6311 neon_store_reg(rd, 0, tmp3);
6312 neon_store_reg(rd, 1, tmp);
6313 }
9ee6e8bb
PB
6314 } else {
6315 /* Write back the result. */
ad69471c 6316 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6317 }
6318 }
6319 } else {
3e3326df
PM
6320 /* Two registers and a scalar. NB that for ops of this form
6321 * the ARM ARM labels bit 24 as Q, but it is in our variable
6322 * 'u', not 'q'.
6323 */
6324 if (size == 0) {
6325 return 1;
6326 }
9ee6e8bb 6327 switch (op) {
9ee6e8bb 6328 case 1: /* Float VMLA scalar */
9ee6e8bb 6329 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6330 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6331 if (size == 1) {
6332 return 1;
6333 }
6334 /* fall through */
6335 case 0: /* Integer VMLA scalar */
6336 case 4: /* Integer VMLS scalar */
6337 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6338 case 12: /* VQDMULH scalar */
6339 case 13: /* VQRDMULH scalar */
3e3326df
PM
6340 if (u && ((rd | rn) & 1)) {
6341 return 1;
6342 }
dd8fbd78
FN
6343 tmp = neon_get_scalar(size, rm);
6344 neon_store_scratch(0, tmp);
9ee6e8bb 6345 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6346 tmp = neon_load_scratch(0);
6347 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6348 if (op == 12) {
6349 if (size == 1) {
02da0b2d 6350 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6351 } else {
02da0b2d 6352 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6353 }
6354 } else if (op == 13) {
6355 if (size == 1) {
02da0b2d 6356 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6357 } else {
02da0b2d 6358 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6359 }
6360 } else if (op & 1) {
aa47cfdd
PM
6361 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6362 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6363 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6364 } else {
6365 switch (size) {
dd8fbd78
FN
6366 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6367 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6368 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6369 default: abort();
9ee6e8bb
PB
6370 }
6371 }
7d1b0095 6372 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6373 if (op < 8) {
6374 /* Accumulate. */
dd8fbd78 6375 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6376 switch (op) {
6377 case 0:
dd8fbd78 6378 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6379 break;
6380 case 1:
aa47cfdd
PM
6381 {
6382 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6383 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6384 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6385 break;
aa47cfdd 6386 }
9ee6e8bb 6387 case 4:
dd8fbd78 6388 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6389 break;
6390 case 5:
aa47cfdd
PM
6391 {
6392 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6393 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6394 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6395 break;
aa47cfdd 6396 }
9ee6e8bb
PB
6397 default:
6398 abort();
6399 }
7d1b0095 6400 tcg_temp_free_i32(tmp2);
9ee6e8bb 6401 }
dd8fbd78 6402 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6403 }
6404 break;
9ee6e8bb 6405 case 3: /* VQDMLAL scalar */
9ee6e8bb 6406 case 7: /* VQDMLSL scalar */
9ee6e8bb 6407 case 11: /* VQDMULL scalar */
3e3326df 6408 if (u == 1) {
ad69471c 6409 return 1;
3e3326df
PM
6410 }
6411 /* fall through */
6412 case 2: /* VMLAL sclar */
6413 case 6: /* VMLSL scalar */
6414 case 10: /* VMULL scalar */
6415 if (rd & 1) {
6416 return 1;
6417 }
dd8fbd78 6418 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6419 /* We need a copy of tmp2 because gen_neon_mull
6420 * deletes it during pass 0. */
7d1b0095 6421 tmp4 = tcg_temp_new_i32();
c6067f04 6422 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6423 tmp3 = neon_load_reg(rn, 1);
ad69471c 6424
9ee6e8bb 6425 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6426 if (pass == 0) {
6427 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6428 } else {
dd8fbd78 6429 tmp = tmp3;
c6067f04 6430 tmp2 = tmp4;
9ee6e8bb 6431 }
ad69471c 6432 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6433 if (op != 11) {
6434 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6435 }
9ee6e8bb 6436 switch (op) {
4dc064e6
PM
6437 case 6:
6438 gen_neon_negl(cpu_V0, size);
6439 /* Fall through */
6440 case 2:
ad69471c 6441 gen_neon_addl(size);
9ee6e8bb
PB
6442 break;
6443 case 3: case 7:
ad69471c 6444 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6445 if (op == 7) {
6446 gen_neon_negl(cpu_V0, size);
6447 }
ad69471c 6448 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6449 break;
6450 case 10:
6451 /* no-op */
6452 break;
6453 case 11:
ad69471c 6454 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6455 break;
6456 default:
6457 abort();
6458 }
ad69471c 6459 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6460 }
dd8fbd78 6461
dd8fbd78 6462
9ee6e8bb
PB
6463 break;
6464 default: /* 14 and 15 are RESERVED */
6465 return 1;
6466 }
6467 }
6468 } else { /* size == 3 */
6469 if (!u) {
6470 /* Extract. */
9ee6e8bb 6471 imm = (insn >> 8) & 0xf;
ad69471c
PB
6472
6473 if (imm > 7 && !q)
6474 return 1;
6475
52579ea1
PM
6476 if (q && ((rd | rn | rm) & 1)) {
6477 return 1;
6478 }
6479
ad69471c
PB
6480 if (imm == 0) {
6481 neon_load_reg64(cpu_V0, rn);
6482 if (q) {
6483 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6484 }
ad69471c
PB
6485 } else if (imm == 8) {
6486 neon_load_reg64(cpu_V0, rn + 1);
6487 if (q) {
6488 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6489 }
ad69471c 6490 } else if (q) {
a7812ae4 6491 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6492 if (imm < 8) {
6493 neon_load_reg64(cpu_V0, rn);
a7812ae4 6494 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6495 } else {
6496 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6497 neon_load_reg64(tmp64, rm);
ad69471c
PB
6498 }
6499 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6500 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6501 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6502 if (imm < 8) {
6503 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6504 } else {
ad69471c
PB
6505 neon_load_reg64(cpu_V1, rm + 1);
6506 imm -= 8;
9ee6e8bb 6507 }
ad69471c 6508 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6509 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6510 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6511 tcg_temp_free_i64(tmp64);
ad69471c 6512 } else {
a7812ae4 6513 /* BUGFIX */
ad69471c 6514 neon_load_reg64(cpu_V0, rn);
a7812ae4 6515 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6516 neon_load_reg64(cpu_V1, rm);
a7812ae4 6517 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6518 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6519 }
6520 neon_store_reg64(cpu_V0, rd);
6521 if (q) {
6522 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6523 }
6524 } else if ((insn & (1 << 11)) == 0) {
6525 /* Two register misc. */
6526 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6527 size = (insn >> 18) & 3;
600b828c
PM
6528 /* UNDEF for unknown op values and bad op-size combinations */
6529 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6530 return 1;
6531 }
fc2a9b37
PM
6532 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6533 q && ((rm | rd) & 1)) {
6534 return 1;
6535 }
9ee6e8bb 6536 switch (op) {
600b828c 6537 case NEON_2RM_VREV64:
9ee6e8bb 6538 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6539 tmp = neon_load_reg(rm, pass * 2);
6540 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6541 switch (size) {
dd8fbd78
FN
6542 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6543 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6544 case 2: /* no-op */ break;
6545 default: abort();
6546 }
dd8fbd78 6547 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6548 if (size == 2) {
dd8fbd78 6549 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6550 } else {
9ee6e8bb 6551 switch (size) {
dd8fbd78
FN
6552 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6553 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6554 default: abort();
6555 }
dd8fbd78 6556 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6557 }
6558 }
6559 break;
600b828c
PM
6560 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6561 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6562 for (pass = 0; pass < q + 1; pass++) {
6563 tmp = neon_load_reg(rm, pass * 2);
6564 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6565 tmp = neon_load_reg(rm, pass * 2 + 1);
6566 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6567 switch (size) {
6568 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6569 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6570 case 2: tcg_gen_add_i64(CPU_V001); break;
6571 default: abort();
6572 }
600b828c 6573 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6574 /* Accumulate. */
ad69471c
PB
6575 neon_load_reg64(cpu_V1, rd + pass);
6576 gen_neon_addl(size);
9ee6e8bb 6577 }
ad69471c 6578 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6579 }
6580 break;
600b828c 6581 case NEON_2RM_VTRN:
9ee6e8bb 6582 if (size == 2) {
a5a14945 6583 int n;
9ee6e8bb 6584 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6585 tmp = neon_load_reg(rm, n);
6586 tmp2 = neon_load_reg(rd, n + 1);
6587 neon_store_reg(rm, n, tmp2);
6588 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6589 }
6590 } else {
6591 goto elementwise;
6592 }
6593 break;
600b828c 6594 case NEON_2RM_VUZP:
02acedf9 6595 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6596 return 1;
9ee6e8bb
PB
6597 }
6598 break;
600b828c 6599 case NEON_2RM_VZIP:
d68a6f3a 6600 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6601 return 1;
9ee6e8bb
PB
6602 }
6603 break;
600b828c
PM
6604 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6605 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6606 if (rm & 1) {
6607 return 1;
6608 }
39d5492a 6609 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6610 for (pass = 0; pass < 2; pass++) {
ad69471c 6611 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6612 tmp = tcg_temp_new_i32();
600b828c
PM
6613 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6614 tmp, cpu_V0);
ad69471c
PB
6615 if (pass == 0) {
6616 tmp2 = tmp;
6617 } else {
6618 neon_store_reg(rd, 0, tmp2);
6619 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6620 }
9ee6e8bb
PB
6621 }
6622 break;
600b828c 6623 case NEON_2RM_VSHLL:
fc2a9b37 6624 if (q || (rd & 1)) {
9ee6e8bb 6625 return 1;
600b828c 6626 }
ad69471c
PB
6627 tmp = neon_load_reg(rm, 0);
6628 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6629 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6630 if (pass == 1)
6631 tmp = tmp2;
6632 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6633 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6634 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6635 }
6636 break;
600b828c 6637 case NEON_2RM_VCVT_F16_F32:
d614a513 6638 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6639 q || (rm & 1)) {
6640 return 1;
6641 }
7d1b0095
PM
6642 tmp = tcg_temp_new_i32();
6643 tmp2 = tcg_temp_new_i32();
60011498 6644 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6645 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6646 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6647 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6648 tcg_gen_shli_i32(tmp2, tmp2, 16);
6649 tcg_gen_or_i32(tmp2, tmp2, tmp);
6650 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6651 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6652 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6653 neon_store_reg(rd, 0, tmp2);
7d1b0095 6654 tmp2 = tcg_temp_new_i32();
2d981da7 6655 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6656 tcg_gen_shli_i32(tmp2, tmp2, 16);
6657 tcg_gen_or_i32(tmp2, tmp2, tmp);
6658 neon_store_reg(rd, 1, tmp2);
7d1b0095 6659 tcg_temp_free_i32(tmp);
60011498 6660 break;
600b828c 6661 case NEON_2RM_VCVT_F32_F16:
d614a513 6662 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6663 q || (rd & 1)) {
6664 return 1;
6665 }
7d1b0095 6666 tmp3 = tcg_temp_new_i32();
60011498
PB
6667 tmp = neon_load_reg(rm, 0);
6668 tmp2 = neon_load_reg(rm, 1);
6669 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6670 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6671 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6672 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6673 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6674 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6675 tcg_temp_free_i32(tmp);
60011498 6676 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6677 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6678 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6679 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6680 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6681 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6682 tcg_temp_free_i32(tmp2);
6683 tcg_temp_free_i32(tmp3);
60011498 6684 break;
9d935509 6685 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6686 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6687 || ((rm | rd) & 1)) {
6688 return 1;
6689 }
6690 tmp = tcg_const_i32(rd);
6691 tmp2 = tcg_const_i32(rm);
6692
6693 /* Bit 6 is the lowest opcode bit; it distinguishes between
6694 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6695 */
6696 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6697
6698 if (op == NEON_2RM_AESE) {
6699 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6700 } else {
6701 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6702 }
6703 tcg_temp_free_i32(tmp);
6704 tcg_temp_free_i32(tmp2);
6705 tcg_temp_free_i32(tmp3);
6706 break;
f1ecb913 6707 case NEON_2RM_SHA1H:
d614a513 6708 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
6709 || ((rm | rd) & 1)) {
6710 return 1;
6711 }
6712 tmp = tcg_const_i32(rd);
6713 tmp2 = tcg_const_i32(rm);
6714
6715 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6716
6717 tcg_temp_free_i32(tmp);
6718 tcg_temp_free_i32(tmp2);
6719 break;
6720 case NEON_2RM_SHA1SU1:
6721 if ((rm | rd) & 1) {
6722 return 1;
6723 }
6724 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6725 if (q) {
d614a513 6726 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
6727 return 1;
6728 }
d614a513 6729 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
6730 return 1;
6731 }
6732 tmp = tcg_const_i32(rd);
6733 tmp2 = tcg_const_i32(rm);
6734 if (q) {
6735 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6736 } else {
6737 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6738 }
6739 tcg_temp_free_i32(tmp);
6740 tcg_temp_free_i32(tmp2);
6741 break;
9ee6e8bb
PB
6742 default:
6743 elementwise:
6744 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6745 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6746 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6747 neon_reg_offset(rm, pass));
39d5492a 6748 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6749 } else {
dd8fbd78 6750 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6751 }
6752 switch (op) {
600b828c 6753 case NEON_2RM_VREV32:
9ee6e8bb 6754 switch (size) {
dd8fbd78
FN
6755 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6756 case 1: gen_swap_half(tmp); break;
600b828c 6757 default: abort();
9ee6e8bb
PB
6758 }
6759 break;
600b828c 6760 case NEON_2RM_VREV16:
dd8fbd78 6761 gen_rev16(tmp);
9ee6e8bb 6762 break;
600b828c 6763 case NEON_2RM_VCLS:
9ee6e8bb 6764 switch (size) {
dd8fbd78
FN
6765 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6766 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6767 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6768 default: abort();
9ee6e8bb
PB
6769 }
6770 break;
600b828c 6771 case NEON_2RM_VCLZ:
9ee6e8bb 6772 switch (size) {
dd8fbd78
FN
6773 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6774 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6775 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6776 default: abort();
9ee6e8bb
PB
6777 }
6778 break;
600b828c 6779 case NEON_2RM_VCNT:
dd8fbd78 6780 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6781 break;
600b828c 6782 case NEON_2RM_VMVN:
dd8fbd78 6783 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6784 break;
600b828c 6785 case NEON_2RM_VQABS:
9ee6e8bb 6786 switch (size) {
02da0b2d
PM
6787 case 0:
6788 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6789 break;
6790 case 1:
6791 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6792 break;
6793 case 2:
6794 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6795 break;
600b828c 6796 default: abort();
9ee6e8bb
PB
6797 }
6798 break;
600b828c 6799 case NEON_2RM_VQNEG:
9ee6e8bb 6800 switch (size) {
02da0b2d
PM
6801 case 0:
6802 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6803 break;
6804 case 1:
6805 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6806 break;
6807 case 2:
6808 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6809 break;
600b828c 6810 default: abort();
9ee6e8bb
PB
6811 }
6812 break;
600b828c 6813 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6814 tmp2 = tcg_const_i32(0);
9ee6e8bb 6815 switch(size) {
dd8fbd78
FN
6816 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6817 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6818 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6819 default: abort();
9ee6e8bb 6820 }
39d5492a 6821 tcg_temp_free_i32(tmp2);
600b828c 6822 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6823 tcg_gen_not_i32(tmp, tmp);
600b828c 6824 }
9ee6e8bb 6825 break;
600b828c 6826 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6827 tmp2 = tcg_const_i32(0);
9ee6e8bb 6828 switch(size) {
dd8fbd78
FN
6829 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6830 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6831 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6832 default: abort();
9ee6e8bb 6833 }
39d5492a 6834 tcg_temp_free_i32(tmp2);
600b828c 6835 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6836 tcg_gen_not_i32(tmp, tmp);
600b828c 6837 }
9ee6e8bb 6838 break;
600b828c 6839 case NEON_2RM_VCEQ0:
dd8fbd78 6840 tmp2 = tcg_const_i32(0);
9ee6e8bb 6841 switch(size) {
dd8fbd78
FN
6842 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6843 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6844 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6845 default: abort();
9ee6e8bb 6846 }
39d5492a 6847 tcg_temp_free_i32(tmp2);
9ee6e8bb 6848 break;
600b828c 6849 case NEON_2RM_VABS:
9ee6e8bb 6850 switch(size) {
dd8fbd78
FN
6851 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6852 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6853 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6854 default: abort();
9ee6e8bb
PB
6855 }
6856 break;
600b828c 6857 case NEON_2RM_VNEG:
dd8fbd78
FN
6858 tmp2 = tcg_const_i32(0);
6859 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6860 tcg_temp_free_i32(tmp2);
9ee6e8bb 6861 break;
600b828c 6862 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6863 {
6864 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6865 tmp2 = tcg_const_i32(0);
aa47cfdd 6866 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6867 tcg_temp_free_i32(tmp2);
aa47cfdd 6868 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6869 break;
aa47cfdd 6870 }
600b828c 6871 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6872 {
6873 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6874 tmp2 = tcg_const_i32(0);
aa47cfdd 6875 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6876 tcg_temp_free_i32(tmp2);
aa47cfdd 6877 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6878 break;
aa47cfdd 6879 }
600b828c 6880 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6881 {
6882 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6883 tmp2 = tcg_const_i32(0);
aa47cfdd 6884 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6885 tcg_temp_free_i32(tmp2);
aa47cfdd 6886 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6887 break;
aa47cfdd 6888 }
600b828c 6889 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6890 {
6891 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6892 tmp2 = tcg_const_i32(0);
aa47cfdd 6893 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6894 tcg_temp_free_i32(tmp2);
aa47cfdd 6895 tcg_temp_free_ptr(fpstatus);
0e326109 6896 break;
aa47cfdd 6897 }
600b828c 6898 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6899 {
6900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6901 tmp2 = tcg_const_i32(0);
aa47cfdd 6902 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6903 tcg_temp_free_i32(tmp2);
aa47cfdd 6904 tcg_temp_free_ptr(fpstatus);
0e326109 6905 break;
aa47cfdd 6906 }
600b828c 6907 case NEON_2RM_VABS_F:
4373f3ce 6908 gen_vfp_abs(0);
9ee6e8bb 6909 break;
600b828c 6910 case NEON_2RM_VNEG_F:
4373f3ce 6911 gen_vfp_neg(0);
9ee6e8bb 6912 break;
600b828c 6913 case NEON_2RM_VSWP:
dd8fbd78
FN
6914 tmp2 = neon_load_reg(rd, pass);
6915 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6916 break;
600b828c 6917 case NEON_2RM_VTRN:
dd8fbd78 6918 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6919 switch (size) {
dd8fbd78
FN
6920 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6921 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6922 default: abort();
9ee6e8bb 6923 }
dd8fbd78 6924 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6925 break;
34f7b0a2
WN
6926 case NEON_2RM_VRINTN:
6927 case NEON_2RM_VRINTA:
6928 case NEON_2RM_VRINTM:
6929 case NEON_2RM_VRINTP:
6930 case NEON_2RM_VRINTZ:
6931 {
6932 TCGv_i32 tcg_rmode;
6933 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6934 int rmode;
6935
6936 if (op == NEON_2RM_VRINTZ) {
6937 rmode = FPROUNDING_ZERO;
6938 } else {
6939 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6940 }
6941
6942 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6943 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6944 cpu_env);
6945 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6946 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6947 cpu_env);
6948 tcg_temp_free_ptr(fpstatus);
6949 tcg_temp_free_i32(tcg_rmode);
6950 break;
6951 }
2ce70625
WN
6952 case NEON_2RM_VRINTX:
6953 {
6954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6955 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6956 tcg_temp_free_ptr(fpstatus);
6957 break;
6958 }
901ad525
WN
6959 case NEON_2RM_VCVTAU:
6960 case NEON_2RM_VCVTAS:
6961 case NEON_2RM_VCVTNU:
6962 case NEON_2RM_VCVTNS:
6963 case NEON_2RM_VCVTPU:
6964 case NEON_2RM_VCVTPS:
6965 case NEON_2RM_VCVTMU:
6966 case NEON_2RM_VCVTMS:
6967 {
6968 bool is_signed = !extract32(insn, 7, 1);
6969 TCGv_ptr fpst = get_fpstatus_ptr(1);
6970 TCGv_i32 tcg_rmode, tcg_shift;
6971 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6972
6973 tcg_shift = tcg_const_i32(0);
6974 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6975 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6976 cpu_env);
6977
6978 if (is_signed) {
6979 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6980 tcg_shift, fpst);
6981 } else {
6982 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6983 tcg_shift, fpst);
6984 }
6985
6986 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6987 cpu_env);
6988 tcg_temp_free_i32(tcg_rmode);
6989 tcg_temp_free_i32(tcg_shift);
6990 tcg_temp_free_ptr(fpst);
6991 break;
6992 }
600b828c 6993 case NEON_2RM_VRECPE:
b6d4443a
AB
6994 {
6995 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6996 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6997 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6998 break;
b6d4443a 6999 }
600b828c 7000 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7001 {
7002 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7003 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7004 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7005 break;
c2fb418e 7006 }
600b828c 7007 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7008 {
7009 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7010 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7011 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7012 break;
b6d4443a 7013 }
600b828c 7014 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7015 {
7016 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7017 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7018 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7019 break;
c2fb418e 7020 }
600b828c 7021 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7022 gen_vfp_sito(0, 1);
9ee6e8bb 7023 break;
600b828c 7024 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7025 gen_vfp_uito(0, 1);
9ee6e8bb 7026 break;
600b828c 7027 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7028 gen_vfp_tosiz(0, 1);
9ee6e8bb 7029 break;
600b828c 7030 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7031 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7032 break;
7033 default:
600b828c
PM
7034 /* Reserved op values were caught by the
7035 * neon_2rm_sizes[] check earlier.
7036 */
7037 abort();
9ee6e8bb 7038 }
600b828c 7039 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7040 tcg_gen_st_f32(cpu_F0s, cpu_env,
7041 neon_reg_offset(rd, pass));
9ee6e8bb 7042 } else {
dd8fbd78 7043 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7044 }
7045 }
7046 break;
7047 }
7048 } else if ((insn & (1 << 10)) == 0) {
7049 /* VTBL, VTBX. */
56907d77
PM
7050 int n = ((insn >> 8) & 3) + 1;
7051 if ((rn + n) > 32) {
7052 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7053 * helper function running off the end of the register file.
7054 */
7055 return 1;
7056 }
7057 n <<= 3;
9ee6e8bb 7058 if (insn & (1 << 6)) {
8f8e3aa4 7059 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7060 } else {
7d1b0095 7061 tmp = tcg_temp_new_i32();
8f8e3aa4 7062 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7063 }
8f8e3aa4 7064 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7065 tmp4 = tcg_const_i32(rn);
7066 tmp5 = tcg_const_i32(n);
9ef39277 7067 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7068 tcg_temp_free_i32(tmp);
9ee6e8bb 7069 if (insn & (1 << 6)) {
8f8e3aa4 7070 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7071 } else {
7d1b0095 7072 tmp = tcg_temp_new_i32();
8f8e3aa4 7073 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7074 }
8f8e3aa4 7075 tmp3 = neon_load_reg(rm, 1);
9ef39277 7076 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7077 tcg_temp_free_i32(tmp5);
7078 tcg_temp_free_i32(tmp4);
8f8e3aa4 7079 neon_store_reg(rd, 0, tmp2);
3018f259 7080 neon_store_reg(rd, 1, tmp3);
7d1b0095 7081 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7082 } else if ((insn & 0x380) == 0) {
7083 /* VDUP */
133da6aa
JR
7084 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7085 return 1;
7086 }
9ee6e8bb 7087 if (insn & (1 << 19)) {
dd8fbd78 7088 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7089 } else {
dd8fbd78 7090 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7091 }
7092 if (insn & (1 << 16)) {
dd8fbd78 7093 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7094 } else if (insn & (1 << 17)) {
7095 if ((insn >> 18) & 1)
dd8fbd78 7096 gen_neon_dup_high16(tmp);
9ee6e8bb 7097 else
dd8fbd78 7098 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7099 }
7100 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7101 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7102 tcg_gen_mov_i32(tmp2, tmp);
7103 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7104 }
7d1b0095 7105 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7106 } else {
7107 return 1;
7108 }
7109 }
7110 }
7111 return 0;
7112}
7113
7dcc1f89 7114static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7115{
4b6a83fb
PM
7116 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7117 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7118
7119 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7120
7121 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7122 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7123 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7124 return 1;
7125 }
d614a513 7126 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7127 return disas_iwmmxt_insn(s, insn);
d614a513 7128 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7129 return disas_dsp_insn(s, insn);
c0f4af17
PM
7130 }
7131 return 1;
4b6a83fb
PM
7132 }
7133
7134 /* Otherwise treat as a generic register access */
7135 is64 = (insn & (1 << 25)) == 0;
7136 if (!is64 && ((insn & (1 << 4)) == 0)) {
7137 /* cdp */
7138 return 1;
7139 }
7140
7141 crm = insn & 0xf;
7142 if (is64) {
7143 crn = 0;
7144 opc1 = (insn >> 4) & 0xf;
7145 opc2 = 0;
7146 rt2 = (insn >> 16) & 0xf;
7147 } else {
7148 crn = (insn >> 16) & 0xf;
7149 opc1 = (insn >> 21) & 7;
7150 opc2 = (insn >> 5) & 7;
7151 rt2 = 0;
7152 }
7153 isread = (insn >> 20) & 1;
7154 rt = (insn >> 12) & 0xf;
7155
60322b39 7156 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7157 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7158 if (ri) {
7159 /* Check access permissions */
dcbff19b 7160 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7161 return 1;
7162 }
7163
c0f4af17 7164 if (ri->accessfn ||
d614a513 7165 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7166 /* Emit code to perform further access permissions checks at
7167 * runtime; this may result in an exception.
c0f4af17
PM
7168 * Note that on XScale all cp0..c13 registers do an access check
7169 * call in order to handle c15_cpar.
f59df3f2
PM
7170 */
7171 TCGv_ptr tmpptr;
3f208fd7 7172 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7173 uint32_t syndrome;
7174
7175 /* Note that since we are an implementation which takes an
7176 * exception on a trapped conditional instruction only if the
7177 * instruction passes its condition code check, we can take
7178 * advantage of the clause in the ARM ARM that allows us to set
7179 * the COND field in the instruction to 0xE in all cases.
7180 * We could fish the actual condition out of the insn (ARM)
7181 * or the condexec bits (Thumb) but it isn't necessary.
7182 */
7183 switch (cpnum) {
7184 case 14:
7185 if (is64) {
7186 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7187 isread, s->thumb);
7188 } else {
7189 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7190 rt, isread, s->thumb);
7191 }
7192 break;
7193 case 15:
7194 if (is64) {
7195 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7196 isread, s->thumb);
7197 } else {
7198 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7199 rt, isread, s->thumb);
7200 }
7201 break;
7202 default:
7203 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7204 * so this can only happen if this is an ARMv7 or earlier CPU,
7205 * in which case the syndrome information won't actually be
7206 * guest visible.
7207 */
d614a513 7208 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7209 syndrome = syn_uncategorized();
7210 break;
7211 }
7212
43bfa4a1 7213 gen_set_condexec(s);
3977ee5d 7214 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7215 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7216 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7217 tcg_isread = tcg_const_i32(isread);
7218 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7219 tcg_isread);
f59df3f2 7220 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7221 tcg_temp_free_i32(tcg_syn);
3f208fd7 7222 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7223 }
7224
4b6a83fb
PM
7225 /* Handle special cases first */
7226 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7227 case ARM_CP_NOP:
7228 return 0;
7229 case ARM_CP_WFI:
7230 if (isread) {
7231 return 1;
7232 }
eaed129d 7233 gen_set_pc_im(s, s->pc);
4b6a83fb 7234 s->is_jmp = DISAS_WFI;
2bee5105 7235 return 0;
4b6a83fb
PM
7236 default:
7237 break;
7238 }
7239
bd79255d 7240 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7241 gen_io_start();
7242 }
7243
4b6a83fb
PM
7244 if (isread) {
7245 /* Read */
7246 if (is64) {
7247 TCGv_i64 tmp64;
7248 TCGv_i32 tmp;
7249 if (ri->type & ARM_CP_CONST) {
7250 tmp64 = tcg_const_i64(ri->resetvalue);
7251 } else if (ri->readfn) {
7252 TCGv_ptr tmpptr;
4b6a83fb
PM
7253 tmp64 = tcg_temp_new_i64();
7254 tmpptr = tcg_const_ptr(ri);
7255 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7256 tcg_temp_free_ptr(tmpptr);
7257 } else {
7258 tmp64 = tcg_temp_new_i64();
7259 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7260 }
7261 tmp = tcg_temp_new_i32();
ecc7b3aa 7262 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7263 store_reg(s, rt, tmp);
7264 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7265 tmp = tcg_temp_new_i32();
ecc7b3aa 7266 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7267 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7268 store_reg(s, rt2, tmp);
7269 } else {
39d5492a 7270 TCGv_i32 tmp;
4b6a83fb
PM
7271 if (ri->type & ARM_CP_CONST) {
7272 tmp = tcg_const_i32(ri->resetvalue);
7273 } else if (ri->readfn) {
7274 TCGv_ptr tmpptr;
4b6a83fb
PM
7275 tmp = tcg_temp_new_i32();
7276 tmpptr = tcg_const_ptr(ri);
7277 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7278 tcg_temp_free_ptr(tmpptr);
7279 } else {
7280 tmp = load_cpu_offset(ri->fieldoffset);
7281 }
7282 if (rt == 15) {
7283 /* Destination register of r15 for 32 bit loads sets
7284 * the condition codes from the high 4 bits of the value
7285 */
7286 gen_set_nzcv(tmp);
7287 tcg_temp_free_i32(tmp);
7288 } else {
7289 store_reg(s, rt, tmp);
7290 }
7291 }
7292 } else {
7293 /* Write */
7294 if (ri->type & ARM_CP_CONST) {
7295 /* If not forbidden by access permissions, treat as WI */
7296 return 0;
7297 }
7298
7299 if (is64) {
39d5492a 7300 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7301 TCGv_i64 tmp64 = tcg_temp_new_i64();
7302 tmplo = load_reg(s, rt);
7303 tmphi = load_reg(s, rt2);
7304 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7305 tcg_temp_free_i32(tmplo);
7306 tcg_temp_free_i32(tmphi);
7307 if (ri->writefn) {
7308 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7309 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7310 tcg_temp_free_ptr(tmpptr);
7311 } else {
7312 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7313 }
7314 tcg_temp_free_i64(tmp64);
7315 } else {
7316 if (ri->writefn) {
39d5492a 7317 TCGv_i32 tmp;
4b6a83fb 7318 TCGv_ptr tmpptr;
4b6a83fb
PM
7319 tmp = load_reg(s, rt);
7320 tmpptr = tcg_const_ptr(ri);
7321 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7322 tcg_temp_free_ptr(tmpptr);
7323 tcg_temp_free_i32(tmp);
7324 } else {
39d5492a 7325 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7326 store_cpu_offset(tmp, ri->fieldoffset);
7327 }
7328 }
2452731c
PM
7329 }
7330
bd79255d 7331 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7332 /* I/O operations must end the TB here (whether read or write) */
7333 gen_io_end();
7334 gen_lookup_tb(s);
7335 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7336 /* We default to ending the TB on a coprocessor register write,
7337 * but allow this to be suppressed by the register definition
7338 * (usually only necessary to work around guest bugs).
7339 */
2452731c 7340 gen_lookup_tb(s);
4b6a83fb 7341 }
2452731c 7342
4b6a83fb
PM
7343 return 0;
7344 }
7345
626187d8
PM
7346 /* Unknown register; this might be a guest error or a QEMU
7347 * unimplemented feature.
7348 */
7349 if (is64) {
7350 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7351 "64 bit system register cp:%d opc1: %d crm:%d "
7352 "(%s)\n",
7353 isread ? "read" : "write", cpnum, opc1, crm,
7354 s->ns ? "non-secure" : "secure");
626187d8
PM
7355 } else {
7356 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7357 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7358 "(%s)\n",
7359 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7360 s->ns ? "non-secure" : "secure");
626187d8
PM
7361 }
7362
4a9a539f 7363 return 1;
9ee6e8bb
PB
7364}
7365
5e3f878a
PB
7366
7367/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7368static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7369{
39d5492a 7370 TCGv_i32 tmp;
7d1b0095 7371 tmp = tcg_temp_new_i32();
ecc7b3aa 7372 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7373 store_reg(s, rlow, tmp);
7d1b0095 7374 tmp = tcg_temp_new_i32();
5e3f878a 7375 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7376 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7377 store_reg(s, rhigh, tmp);
7378}
7379
7380/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7381static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7382{
a7812ae4 7383 TCGv_i64 tmp;
39d5492a 7384 TCGv_i32 tmp2;
5e3f878a 7385
36aa55dc 7386 /* Load value and extend to 64 bits. */
a7812ae4 7387 tmp = tcg_temp_new_i64();
5e3f878a
PB
7388 tmp2 = load_reg(s, rlow);
7389 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7390 tcg_temp_free_i32(tmp2);
5e3f878a 7391 tcg_gen_add_i64(val, val, tmp);
b75263d6 7392 tcg_temp_free_i64(tmp);
5e3f878a
PB
7393}
7394
7395/* load and add a 64-bit value from a register pair. */
a7812ae4 7396static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7397{
a7812ae4 7398 TCGv_i64 tmp;
39d5492a
PM
7399 TCGv_i32 tmpl;
7400 TCGv_i32 tmph;
5e3f878a
PB
7401
7402 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7403 tmpl = load_reg(s, rlow);
7404 tmph = load_reg(s, rhigh);
a7812ae4 7405 tmp = tcg_temp_new_i64();
36aa55dc 7406 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7407 tcg_temp_free_i32(tmpl);
7408 tcg_temp_free_i32(tmph);
5e3f878a 7409 tcg_gen_add_i64(val, val, tmp);
b75263d6 7410 tcg_temp_free_i64(tmp);
5e3f878a
PB
7411}
7412
c9f10124 7413/* Set N and Z flags from hi|lo. */
39d5492a 7414static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7415{
c9f10124
RH
7416 tcg_gen_mov_i32(cpu_NF, hi);
7417 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7418}
7419
426f5abc
PB
7420/* Load/Store exclusive instructions are implemented by remembering
7421 the value/address loaded, and seeing if these are the same
b90372ad 7422 when the store is performed. This should be sufficient to implement
426f5abc
PB
7423 the architecturally mandated semantics, and avoids having to monitor
7424 regular stores.
7425
7426 In system emulation mode only one CPU will be running at once, so
7427 this sequence is effectively atomic. In user emulation mode we
7428 throw an exception and handle the atomic operation elsewhere. */
7429static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7430 TCGv_i32 addr, int size)
426f5abc 7431{
94ee24e7 7432 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7433
50225ad0
PM
7434 s->is_ldex = true;
7435
426f5abc
PB
7436 switch (size) {
7437 case 0:
6ce2faf4 7438 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7439 break;
7440 case 1:
30901475 7441 gen_aa32_ld16ua(tmp, addr, get_mem_index(s));
426f5abc
PB
7442 break;
7443 case 2:
7444 case 3:
30901475 7445 gen_aa32_ld32ua(tmp, addr, get_mem_index(s));
426f5abc
PB
7446 break;
7447 default:
7448 abort();
7449 }
03d05e2d 7450
426f5abc 7451 if (size == 3) {
39d5492a 7452 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7453 TCGv_i32 tmp3 = tcg_temp_new_i32();
7454
2c9adbda 7455 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7456 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7457 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7458 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7459 store_reg(s, rt2, tmp3);
7460 } else {
7461 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7462 }
03d05e2d
PM
7463
7464 store_reg(s, rt, tmp);
7465 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7466}
7467
7468static void gen_clrex(DisasContext *s)
7469{
03d05e2d 7470 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7471}
7472
7473#ifdef CONFIG_USER_ONLY
7474static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7475 TCGv_i32 addr, int size)
426f5abc 7476{
03d05e2d 7477 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7478 tcg_gen_movi_i32(cpu_exclusive_info,
7479 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7480 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7481}
7482#else
7483static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7484 TCGv_i32 addr, int size)
426f5abc 7485{
39d5492a 7486 TCGv_i32 tmp;
03d05e2d 7487 TCGv_i64 val64, extaddr;
42a268c2
RH
7488 TCGLabel *done_label;
7489 TCGLabel *fail_label;
426f5abc
PB
7490
7491 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7492 [addr] = {Rt};
7493 {Rd} = 0;
7494 } else {
7495 {Rd} = 1;
7496 } */
7497 fail_label = gen_new_label();
7498 done_label = gen_new_label();
03d05e2d
PM
7499 extaddr = tcg_temp_new_i64();
7500 tcg_gen_extu_i32_i64(extaddr, addr);
7501 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7502 tcg_temp_free_i64(extaddr);
7503
94ee24e7 7504 tmp = tcg_temp_new_i32();
426f5abc
PB
7505 switch (size) {
7506 case 0:
6ce2faf4 7507 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7508 break;
7509 case 1:
6ce2faf4 7510 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7511 break;
7512 case 2:
7513 case 3:
6ce2faf4 7514 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7515 break;
7516 default:
7517 abort();
7518 }
03d05e2d
PM
7519
7520 val64 = tcg_temp_new_i64();
426f5abc 7521 if (size == 3) {
39d5492a 7522 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7523 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7524 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7525 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7526 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7527 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7528 tcg_temp_free_i32(tmp3);
7529 } else {
7530 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7531 }
03d05e2d
PM
7532 tcg_temp_free_i32(tmp);
7533
7534 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7535 tcg_temp_free_i64(val64);
7536
426f5abc
PB
7537 tmp = load_reg(s, rt);
7538 switch (size) {
7539 case 0:
6ce2faf4 7540 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7541 break;
7542 case 1:
6ce2faf4 7543 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7544 break;
7545 case 2:
7546 case 3:
6ce2faf4 7547 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7548 break;
7549 default:
7550 abort();
7551 }
94ee24e7 7552 tcg_temp_free_i32(tmp);
426f5abc
PB
7553 if (size == 3) {
7554 tcg_gen_addi_i32(addr, addr, 4);
7555 tmp = load_reg(s, rt2);
6ce2faf4 7556 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7557 tcg_temp_free_i32(tmp);
426f5abc
PB
7558 }
7559 tcg_gen_movi_i32(cpu_R[rd], 0);
7560 tcg_gen_br(done_label);
7561 gen_set_label(fail_label);
7562 tcg_gen_movi_i32(cpu_R[rd], 1);
7563 gen_set_label(done_label);
03d05e2d 7564 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7565}
7566#endif
7567
81465888
PM
7568/* gen_srs:
7569 * @env: CPUARMState
7570 * @s: DisasContext
7571 * @mode: mode field from insn (which stack to store to)
7572 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7573 * @writeback: true if writeback bit set
7574 *
7575 * Generate code for the SRS (Store Return State) insn.
7576 */
7577static void gen_srs(DisasContext *s,
7578 uint32_t mode, uint32_t amode, bool writeback)
7579{
7580 int32_t offset;
7581 TCGv_i32 addr = tcg_temp_new_i32();
7582 TCGv_i32 tmp = tcg_const_i32(mode);
7583 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7584 tcg_temp_free_i32(tmp);
7585 switch (amode) {
7586 case 0: /* DA */
7587 offset = -4;
7588 break;
7589 case 1: /* IA */
7590 offset = 0;
7591 break;
7592 case 2: /* DB */
7593 offset = -8;
7594 break;
7595 case 3: /* IB */
7596 offset = 4;
7597 break;
7598 default:
7599 abort();
7600 }
7601 tcg_gen_addi_i32(addr, addr, offset);
7602 tmp = load_reg(s, 14);
c1197795 7603 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7604 tcg_temp_free_i32(tmp);
81465888
PM
7605 tmp = load_cpu_field(spsr);
7606 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7607 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7608 tcg_temp_free_i32(tmp);
81465888
PM
7609 if (writeback) {
7610 switch (amode) {
7611 case 0:
7612 offset = -8;
7613 break;
7614 case 1:
7615 offset = 4;
7616 break;
7617 case 2:
7618 offset = -4;
7619 break;
7620 case 3:
7621 offset = 0;
7622 break;
7623 default:
7624 abort();
7625 }
7626 tcg_gen_addi_i32(addr, addr, offset);
7627 tmp = tcg_const_i32(mode);
7628 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7629 tcg_temp_free_i32(tmp);
7630 }
7631 tcg_temp_free_i32(addr);
7632}
7633
f4df2210 7634static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7635{
f4df2210 7636 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7637 TCGv_i32 tmp;
7638 TCGv_i32 tmp2;
7639 TCGv_i32 tmp3;
7640 TCGv_i32 addr;
a7812ae4 7641 TCGv_i64 tmp64;
9ee6e8bb 7642
9ee6e8bb 7643 /* M variants do not implement ARM mode. */
b53d8923 7644 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7645 goto illegal_op;
b53d8923 7646 }
9ee6e8bb
PB
7647 cond = insn >> 28;
7648 if (cond == 0xf){
be5e7a76
DES
7649 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7650 * choose to UNDEF. In ARMv5 and above the space is used
7651 * for miscellaneous unconditional instructions.
7652 */
7653 ARCH(5);
7654
9ee6e8bb
PB
7655 /* Unconditional instructions. */
7656 if (((insn >> 25) & 7) == 1) {
7657 /* NEON Data processing. */
d614a513 7658 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7659 goto illegal_op;
d614a513 7660 }
9ee6e8bb 7661
7dcc1f89 7662 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7663 goto illegal_op;
7dcc1f89 7664 }
9ee6e8bb
PB
7665 return;
7666 }
7667 if ((insn & 0x0f100000) == 0x04000000) {
7668 /* NEON load/store. */
d614a513 7669 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7670 goto illegal_op;
d614a513 7671 }
9ee6e8bb 7672
7dcc1f89 7673 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7674 goto illegal_op;
7dcc1f89 7675 }
9ee6e8bb
PB
7676 return;
7677 }
6a57f3eb
WN
7678 if ((insn & 0x0f000e10) == 0x0e000a00) {
7679 /* VFP. */
7dcc1f89 7680 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7681 goto illegal_op;
7682 }
7683 return;
7684 }
3d185e5d
PM
7685 if (((insn & 0x0f30f000) == 0x0510f000) ||
7686 ((insn & 0x0f30f010) == 0x0710f000)) {
7687 if ((insn & (1 << 22)) == 0) {
7688 /* PLDW; v7MP */
d614a513 7689 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7690 goto illegal_op;
7691 }
7692 }
7693 /* Otherwise PLD; v5TE+ */
be5e7a76 7694 ARCH(5TE);
3d185e5d
PM
7695 return;
7696 }
7697 if (((insn & 0x0f70f000) == 0x0450f000) ||
7698 ((insn & 0x0f70f010) == 0x0650f000)) {
7699 ARCH(7);
7700 return; /* PLI; V7 */
7701 }
7702 if (((insn & 0x0f700000) == 0x04100000) ||
7703 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7704 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7705 goto illegal_op;
7706 }
7707 return; /* v7MP: Unallocated memory hint: must NOP */
7708 }
7709
7710 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7711 ARCH(6);
7712 /* setend */
10962fd5
PM
7713 if (((insn >> 9) & 1) != s->bswap_code) {
7714 /* Dynamic endianness switching not implemented. */
e0c270d9 7715 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7716 goto illegal_op;
7717 }
7718 return;
7719 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7720 switch ((insn >> 4) & 0xf) {
7721 case 1: /* clrex */
7722 ARCH(6K);
426f5abc 7723 gen_clrex(s);
9ee6e8bb
PB
7724 return;
7725 case 4: /* dsb */
7726 case 5: /* dmb */
9ee6e8bb
PB
7727 ARCH(7);
7728 /* We don't emulate caches so these are a no-op. */
7729 return;
6df99dec
SS
7730 case 6: /* isb */
7731 /* We need to break the TB after this insn to execute
7732 * self-modifying code correctly and also to take
7733 * any pending interrupts immediately.
7734 */
7735 gen_lookup_tb(s);
7736 return;
9ee6e8bb
PB
7737 default:
7738 goto illegal_op;
7739 }
7740 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7741 /* srs */
81465888 7742 if (IS_USER(s)) {
9ee6e8bb 7743 goto illegal_op;
9ee6e8bb 7744 }
81465888
PM
7745 ARCH(6);
7746 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7747 return;
ea825eee 7748 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7749 /* rfe */
c67b6b71 7750 int32_t offset;
9ee6e8bb
PB
7751 if (IS_USER(s))
7752 goto illegal_op;
7753 ARCH(6);
7754 rn = (insn >> 16) & 0xf;
b0109805 7755 addr = load_reg(s, rn);
9ee6e8bb
PB
7756 i = (insn >> 23) & 3;
7757 switch (i) {
b0109805 7758 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7759 case 1: offset = 0; break; /* IA */
7760 case 2: offset = -8; break; /* DB */
b0109805 7761 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7762 default: abort();
7763 }
7764 if (offset)
b0109805
PB
7765 tcg_gen_addi_i32(addr, addr, offset);
7766 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7767 tmp = tcg_temp_new_i32();
6ce2faf4 7768 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7769 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7770 tmp2 = tcg_temp_new_i32();
6ce2faf4 7771 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7772 if (insn & (1 << 21)) {
7773 /* Base writeback. */
7774 switch (i) {
b0109805 7775 case 0: offset = -8; break;
c67b6b71
FN
7776 case 1: offset = 4; break;
7777 case 2: offset = -4; break;
b0109805 7778 case 3: offset = 0; break;
9ee6e8bb
PB
7779 default: abort();
7780 }
7781 if (offset)
b0109805
PB
7782 tcg_gen_addi_i32(addr, addr, offset);
7783 store_reg(s, rn, addr);
7784 } else {
7d1b0095 7785 tcg_temp_free_i32(addr);
9ee6e8bb 7786 }
b0109805 7787 gen_rfe(s, tmp, tmp2);
c67b6b71 7788 return;
9ee6e8bb
PB
7789 } else if ((insn & 0x0e000000) == 0x0a000000) {
7790 /* branch link and change to thumb (blx <offset>) */
7791 int32_t offset;
7792
7793 val = (uint32_t)s->pc;
7d1b0095 7794 tmp = tcg_temp_new_i32();
d9ba4830
PB
7795 tcg_gen_movi_i32(tmp, val);
7796 store_reg(s, 14, tmp);
9ee6e8bb
PB
7797 /* Sign-extend the 24-bit offset */
7798 offset = (((int32_t)insn) << 8) >> 8;
7799 /* offset * 4 + bit24 * 2 + (thumb bit) */
7800 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7801 /* pipeline offset */
7802 val += 4;
be5e7a76 7803 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7804 gen_bx_im(s, val);
9ee6e8bb
PB
7805 return;
7806 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7807 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7808 /* iWMMXt register transfer. */
c0f4af17 7809 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7810 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7811 return;
c0f4af17
PM
7812 }
7813 }
9ee6e8bb
PB
7814 }
7815 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7816 /* Coprocessor double register transfer. */
be5e7a76 7817 ARCH(5TE);
9ee6e8bb
PB
7818 } else if ((insn & 0x0f000010) == 0x0e000010) {
7819 /* Additional coprocessor register transfer. */
7997d92f 7820 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7821 uint32_t mask;
7822 uint32_t val;
7823 /* cps (privileged) */
7824 if (IS_USER(s))
7825 return;
7826 mask = val = 0;
7827 if (insn & (1 << 19)) {
7828 if (insn & (1 << 8))
7829 mask |= CPSR_A;
7830 if (insn & (1 << 7))
7831 mask |= CPSR_I;
7832 if (insn & (1 << 6))
7833 mask |= CPSR_F;
7834 if (insn & (1 << 18))
7835 val |= mask;
7836 }
7997d92f 7837 if (insn & (1 << 17)) {
9ee6e8bb
PB
7838 mask |= CPSR_M;
7839 val |= (insn & 0x1f);
7840 }
7841 if (mask) {
2fbac54b 7842 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7843 }
7844 return;
7845 }
7846 goto illegal_op;
7847 }
7848 if (cond != 0xe) {
7849 /* if not always execute, we generate a conditional jump to
7850 next instruction */
7851 s->condlabel = gen_new_label();
39fb730a 7852 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7853 s->condjmp = 1;
7854 }
7855 if ((insn & 0x0f900000) == 0x03000000) {
7856 if ((insn & (1 << 21)) == 0) {
7857 ARCH(6T2);
7858 rd = (insn >> 12) & 0xf;
7859 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7860 if ((insn & (1 << 22)) == 0) {
7861 /* MOVW */
7d1b0095 7862 tmp = tcg_temp_new_i32();
5e3f878a 7863 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7864 } else {
7865 /* MOVT */
5e3f878a 7866 tmp = load_reg(s, rd);
86831435 7867 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7868 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7869 }
5e3f878a 7870 store_reg(s, rd, tmp);
9ee6e8bb
PB
7871 } else {
7872 if (((insn >> 12) & 0xf) != 0xf)
7873 goto illegal_op;
7874 if (((insn >> 16) & 0xf) == 0) {
7875 gen_nop_hint(s, insn & 0xff);
7876 } else {
7877 /* CPSR = immediate */
7878 val = insn & 0xff;
7879 shift = ((insn >> 8) & 0xf) * 2;
7880 if (shift)
7881 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7882 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7883 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7884 i, val)) {
9ee6e8bb 7885 goto illegal_op;
7dcc1f89 7886 }
9ee6e8bb
PB
7887 }
7888 }
7889 } else if ((insn & 0x0f900000) == 0x01000000
7890 && (insn & 0x00000090) != 0x00000090) {
7891 /* miscellaneous instructions */
7892 op1 = (insn >> 21) & 3;
7893 sh = (insn >> 4) & 0xf;
7894 rm = insn & 0xf;
7895 switch (sh) {
7896 case 0x0: /* move program status register */
7897 if (op1 & 1) {
7898 /* PSR = reg */
2fbac54b 7899 tmp = load_reg(s, rm);
9ee6e8bb 7900 i = ((op1 & 2) != 0);
7dcc1f89 7901 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7902 goto illegal_op;
7903 } else {
7904 /* reg = PSR */
7905 rd = (insn >> 12) & 0xf;
7906 if (op1 & 2) {
7907 if (IS_USER(s))
7908 goto illegal_op;
d9ba4830 7909 tmp = load_cpu_field(spsr);
9ee6e8bb 7910 } else {
7d1b0095 7911 tmp = tcg_temp_new_i32();
9ef39277 7912 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7913 }
d9ba4830 7914 store_reg(s, rd, tmp);
9ee6e8bb
PB
7915 }
7916 break;
7917 case 0x1:
7918 if (op1 == 1) {
7919 /* branch/exchange thumb (bx). */
be5e7a76 7920 ARCH(4T);
d9ba4830
PB
7921 tmp = load_reg(s, rm);
7922 gen_bx(s, tmp);
9ee6e8bb
PB
7923 } else if (op1 == 3) {
7924 /* clz */
be5e7a76 7925 ARCH(5);
9ee6e8bb 7926 rd = (insn >> 12) & 0xf;
1497c961
PB
7927 tmp = load_reg(s, rm);
7928 gen_helper_clz(tmp, tmp);
7929 store_reg(s, rd, tmp);
9ee6e8bb
PB
7930 } else {
7931 goto illegal_op;
7932 }
7933 break;
7934 case 0x2:
7935 if (op1 == 1) {
7936 ARCH(5J); /* bxj */
7937 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7938 tmp = load_reg(s, rm);
7939 gen_bx(s, tmp);
9ee6e8bb
PB
7940 } else {
7941 goto illegal_op;
7942 }
7943 break;
7944 case 0x3:
7945 if (op1 != 1)
7946 goto illegal_op;
7947
be5e7a76 7948 ARCH(5);
9ee6e8bb 7949 /* branch link/exchange thumb (blx) */
d9ba4830 7950 tmp = load_reg(s, rm);
7d1b0095 7951 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7952 tcg_gen_movi_i32(tmp2, s->pc);
7953 store_reg(s, 14, tmp2);
7954 gen_bx(s, tmp);
9ee6e8bb 7955 break;
eb0ecd5a
WN
7956 case 0x4:
7957 {
7958 /* crc32/crc32c */
7959 uint32_t c = extract32(insn, 8, 4);
7960
7961 /* Check this CPU supports ARMv8 CRC instructions.
7962 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7963 * Bits 8, 10 and 11 should be zero.
7964 */
d614a513 7965 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
7966 (c & 0xd) != 0) {
7967 goto illegal_op;
7968 }
7969
7970 rn = extract32(insn, 16, 4);
7971 rd = extract32(insn, 12, 4);
7972
7973 tmp = load_reg(s, rn);
7974 tmp2 = load_reg(s, rm);
aa633469
PM
7975 if (op1 == 0) {
7976 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7977 } else if (op1 == 1) {
7978 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7979 }
eb0ecd5a
WN
7980 tmp3 = tcg_const_i32(1 << op1);
7981 if (c & 0x2) {
7982 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7983 } else {
7984 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7985 }
7986 tcg_temp_free_i32(tmp2);
7987 tcg_temp_free_i32(tmp3);
7988 store_reg(s, rd, tmp);
7989 break;
7990 }
9ee6e8bb 7991 case 0x5: /* saturating add/subtract */
be5e7a76 7992 ARCH(5TE);
9ee6e8bb
PB
7993 rd = (insn >> 12) & 0xf;
7994 rn = (insn >> 16) & 0xf;
b40d0353 7995 tmp = load_reg(s, rm);
5e3f878a 7996 tmp2 = load_reg(s, rn);
9ee6e8bb 7997 if (op1 & 2)
9ef39277 7998 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7999 if (op1 & 1)
9ef39277 8000 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8001 else
9ef39277 8002 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8003 tcg_temp_free_i32(tmp2);
5e3f878a 8004 store_reg(s, rd, tmp);
9ee6e8bb 8005 break;
49e14940 8006 case 7:
d4a2dc67
PM
8007 {
8008 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
8009 switch (op1) {
8010 case 1:
8011 /* bkpt */
8012 ARCH(5);
8013 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8014 syn_aa32_bkpt(imm16, false),
8015 default_exception_el(s));
37e6456e
PM
8016 break;
8017 case 2:
8018 /* Hypervisor call (v7) */
8019 ARCH(7);
8020 if (IS_USER(s)) {
8021 goto illegal_op;
8022 }
8023 gen_hvc(s, imm16);
8024 break;
8025 case 3:
8026 /* Secure monitor call (v6+) */
8027 ARCH(6K);
8028 if (IS_USER(s)) {
8029 goto illegal_op;
8030 }
8031 gen_smc(s);
8032 break;
8033 default:
49e14940
AL
8034 goto illegal_op;
8035 }
9ee6e8bb 8036 break;
d4a2dc67 8037 }
9ee6e8bb
PB
8038 case 0x8: /* signed multiply */
8039 case 0xa:
8040 case 0xc:
8041 case 0xe:
be5e7a76 8042 ARCH(5TE);
9ee6e8bb
PB
8043 rs = (insn >> 8) & 0xf;
8044 rn = (insn >> 12) & 0xf;
8045 rd = (insn >> 16) & 0xf;
8046 if (op1 == 1) {
8047 /* (32 * 16) >> 16 */
5e3f878a
PB
8048 tmp = load_reg(s, rm);
8049 tmp2 = load_reg(s, rs);
9ee6e8bb 8050 if (sh & 4)
5e3f878a 8051 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8052 else
5e3f878a 8053 gen_sxth(tmp2);
a7812ae4
PB
8054 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8055 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8056 tmp = tcg_temp_new_i32();
ecc7b3aa 8057 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8058 tcg_temp_free_i64(tmp64);
9ee6e8bb 8059 if ((sh & 2) == 0) {
5e3f878a 8060 tmp2 = load_reg(s, rn);
9ef39277 8061 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8062 tcg_temp_free_i32(tmp2);
9ee6e8bb 8063 }
5e3f878a 8064 store_reg(s, rd, tmp);
9ee6e8bb
PB
8065 } else {
8066 /* 16 * 16 */
5e3f878a
PB
8067 tmp = load_reg(s, rm);
8068 tmp2 = load_reg(s, rs);
8069 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8070 tcg_temp_free_i32(tmp2);
9ee6e8bb 8071 if (op1 == 2) {
a7812ae4
PB
8072 tmp64 = tcg_temp_new_i64();
8073 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8074 tcg_temp_free_i32(tmp);
a7812ae4
PB
8075 gen_addq(s, tmp64, rn, rd);
8076 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8077 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8078 } else {
8079 if (op1 == 0) {
5e3f878a 8080 tmp2 = load_reg(s, rn);
9ef39277 8081 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8082 tcg_temp_free_i32(tmp2);
9ee6e8bb 8083 }
5e3f878a 8084 store_reg(s, rd, tmp);
9ee6e8bb
PB
8085 }
8086 }
8087 break;
8088 default:
8089 goto illegal_op;
8090 }
8091 } else if (((insn & 0x0e000000) == 0 &&
8092 (insn & 0x00000090) != 0x90) ||
8093 ((insn & 0x0e000000) == (1 << 25))) {
8094 int set_cc, logic_cc, shiftop;
8095
8096 op1 = (insn >> 21) & 0xf;
8097 set_cc = (insn >> 20) & 1;
8098 logic_cc = table_logic_cc[op1] & set_cc;
8099
8100 /* data processing instruction */
8101 if (insn & (1 << 25)) {
8102 /* immediate operand */
8103 val = insn & 0xff;
8104 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8105 if (shift) {
9ee6e8bb 8106 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8107 }
7d1b0095 8108 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8109 tcg_gen_movi_i32(tmp2, val);
8110 if (logic_cc && shift) {
8111 gen_set_CF_bit31(tmp2);
8112 }
9ee6e8bb
PB
8113 } else {
8114 /* register */
8115 rm = (insn) & 0xf;
e9bb4aa9 8116 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8117 shiftop = (insn >> 5) & 3;
8118 if (!(insn & (1 << 4))) {
8119 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8120 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8121 } else {
8122 rs = (insn >> 8) & 0xf;
8984bd2e 8123 tmp = load_reg(s, rs);
e9bb4aa9 8124 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8125 }
8126 }
8127 if (op1 != 0x0f && op1 != 0x0d) {
8128 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8129 tmp = load_reg(s, rn);
8130 } else {
39d5492a 8131 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8132 }
8133 rd = (insn >> 12) & 0xf;
8134 switch(op1) {
8135 case 0x00:
e9bb4aa9
JR
8136 tcg_gen_and_i32(tmp, tmp, tmp2);
8137 if (logic_cc) {
8138 gen_logic_CC(tmp);
8139 }
7dcc1f89 8140 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8141 break;
8142 case 0x01:
e9bb4aa9
JR
8143 tcg_gen_xor_i32(tmp, tmp, tmp2);
8144 if (logic_cc) {
8145 gen_logic_CC(tmp);
8146 }
7dcc1f89 8147 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8148 break;
8149 case 0x02:
8150 if (set_cc && rd == 15) {
8151 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8152 if (IS_USER(s)) {
9ee6e8bb 8153 goto illegal_op;
e9bb4aa9 8154 }
72485ec4 8155 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8156 gen_exception_return(s, tmp);
9ee6e8bb 8157 } else {
e9bb4aa9 8158 if (set_cc) {
72485ec4 8159 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8160 } else {
8161 tcg_gen_sub_i32(tmp, tmp, tmp2);
8162 }
7dcc1f89 8163 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8164 }
8165 break;
8166 case 0x03:
e9bb4aa9 8167 if (set_cc) {
72485ec4 8168 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8169 } else {
8170 tcg_gen_sub_i32(tmp, tmp2, tmp);
8171 }
7dcc1f89 8172 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8173 break;
8174 case 0x04:
e9bb4aa9 8175 if (set_cc) {
72485ec4 8176 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8177 } else {
8178 tcg_gen_add_i32(tmp, tmp, tmp2);
8179 }
7dcc1f89 8180 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8181 break;
8182 case 0x05:
e9bb4aa9 8183 if (set_cc) {
49b4c31e 8184 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8185 } else {
8186 gen_add_carry(tmp, tmp, tmp2);
8187 }
7dcc1f89 8188 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8189 break;
8190 case 0x06:
e9bb4aa9 8191 if (set_cc) {
2de68a49 8192 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8193 } else {
8194 gen_sub_carry(tmp, tmp, tmp2);
8195 }
7dcc1f89 8196 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8197 break;
8198 case 0x07:
e9bb4aa9 8199 if (set_cc) {
2de68a49 8200 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8201 } else {
8202 gen_sub_carry(tmp, tmp2, tmp);
8203 }
7dcc1f89 8204 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8205 break;
8206 case 0x08:
8207 if (set_cc) {
e9bb4aa9
JR
8208 tcg_gen_and_i32(tmp, tmp, tmp2);
8209 gen_logic_CC(tmp);
9ee6e8bb 8210 }
7d1b0095 8211 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8212 break;
8213 case 0x09:
8214 if (set_cc) {
e9bb4aa9
JR
8215 tcg_gen_xor_i32(tmp, tmp, tmp2);
8216 gen_logic_CC(tmp);
9ee6e8bb 8217 }
7d1b0095 8218 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8219 break;
8220 case 0x0a:
8221 if (set_cc) {
72485ec4 8222 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8223 }
7d1b0095 8224 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8225 break;
8226 case 0x0b:
8227 if (set_cc) {
72485ec4 8228 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8229 }
7d1b0095 8230 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8231 break;
8232 case 0x0c:
e9bb4aa9
JR
8233 tcg_gen_or_i32(tmp, tmp, tmp2);
8234 if (logic_cc) {
8235 gen_logic_CC(tmp);
8236 }
7dcc1f89 8237 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8238 break;
8239 case 0x0d:
8240 if (logic_cc && rd == 15) {
8241 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8242 if (IS_USER(s)) {
9ee6e8bb 8243 goto illegal_op;
e9bb4aa9
JR
8244 }
8245 gen_exception_return(s, tmp2);
9ee6e8bb 8246 } else {
e9bb4aa9
JR
8247 if (logic_cc) {
8248 gen_logic_CC(tmp2);
8249 }
7dcc1f89 8250 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8251 }
8252 break;
8253 case 0x0e:
f669df27 8254 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8255 if (logic_cc) {
8256 gen_logic_CC(tmp);
8257 }
7dcc1f89 8258 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8259 break;
8260 default:
8261 case 0x0f:
e9bb4aa9
JR
8262 tcg_gen_not_i32(tmp2, tmp2);
8263 if (logic_cc) {
8264 gen_logic_CC(tmp2);
8265 }
7dcc1f89 8266 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8267 break;
8268 }
e9bb4aa9 8269 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8270 tcg_temp_free_i32(tmp2);
e9bb4aa9 8271 }
9ee6e8bb
PB
8272 } else {
8273 /* other instructions */
8274 op1 = (insn >> 24) & 0xf;
8275 switch(op1) {
8276 case 0x0:
8277 case 0x1:
8278 /* multiplies, extra load/stores */
8279 sh = (insn >> 5) & 3;
8280 if (sh == 0) {
8281 if (op1 == 0x0) {
8282 rd = (insn >> 16) & 0xf;
8283 rn = (insn >> 12) & 0xf;
8284 rs = (insn >> 8) & 0xf;
8285 rm = (insn) & 0xf;
8286 op1 = (insn >> 20) & 0xf;
8287 switch (op1) {
8288 case 0: case 1: case 2: case 3: case 6:
8289 /* 32 bit mul */
5e3f878a
PB
8290 tmp = load_reg(s, rs);
8291 tmp2 = load_reg(s, rm);
8292 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8293 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8294 if (insn & (1 << 22)) {
8295 /* Subtract (mls) */
8296 ARCH(6T2);
5e3f878a
PB
8297 tmp2 = load_reg(s, rn);
8298 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8299 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8300 } else if (insn & (1 << 21)) {
8301 /* Add */
5e3f878a
PB
8302 tmp2 = load_reg(s, rn);
8303 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8304 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8305 }
8306 if (insn & (1 << 20))
5e3f878a
PB
8307 gen_logic_CC(tmp);
8308 store_reg(s, rd, tmp);
9ee6e8bb 8309 break;
8aac08b1
AJ
8310 case 4:
8311 /* 64 bit mul double accumulate (UMAAL) */
8312 ARCH(6);
8313 tmp = load_reg(s, rs);
8314 tmp2 = load_reg(s, rm);
8315 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8316 gen_addq_lo(s, tmp64, rn);
8317 gen_addq_lo(s, tmp64, rd);
8318 gen_storeq_reg(s, rn, rd, tmp64);
8319 tcg_temp_free_i64(tmp64);
8320 break;
8321 case 8: case 9: case 10: case 11:
8322 case 12: case 13: case 14: case 15:
8323 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8324 tmp = load_reg(s, rs);
8325 tmp2 = load_reg(s, rm);
8aac08b1 8326 if (insn & (1 << 22)) {
c9f10124 8327 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8328 } else {
c9f10124 8329 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8330 }
8331 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8332 TCGv_i32 al = load_reg(s, rn);
8333 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8334 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8335 tcg_temp_free_i32(al);
8336 tcg_temp_free_i32(ah);
9ee6e8bb 8337 }
8aac08b1 8338 if (insn & (1 << 20)) {
c9f10124 8339 gen_logicq_cc(tmp, tmp2);
8aac08b1 8340 }
c9f10124
RH
8341 store_reg(s, rn, tmp);
8342 store_reg(s, rd, tmp2);
9ee6e8bb 8343 break;
8aac08b1
AJ
8344 default:
8345 goto illegal_op;
9ee6e8bb
PB
8346 }
8347 } else {
8348 rn = (insn >> 16) & 0xf;
8349 rd = (insn >> 12) & 0xf;
8350 if (insn & (1 << 23)) {
8351 /* load/store exclusive */
2359bf80 8352 int op2 = (insn >> 8) & 3;
86753403 8353 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8354
8355 switch (op2) {
8356 case 0: /* lda/stl */
8357 if (op1 == 1) {
8358 goto illegal_op;
8359 }
8360 ARCH(8);
8361 break;
8362 case 1: /* reserved */
8363 goto illegal_op;
8364 case 2: /* ldaex/stlex */
8365 ARCH(8);
8366 break;
8367 case 3: /* ldrex/strex */
8368 if (op1) {
8369 ARCH(6K);
8370 } else {
8371 ARCH(6);
8372 }
8373 break;
8374 }
8375
3174f8e9 8376 addr = tcg_temp_local_new_i32();
98a46317 8377 load_reg_var(s, addr, rn);
2359bf80
MR
8378
8379 /* Since the emulation does not have barriers,
8380 the acquire/release semantics need no special
8381 handling */
8382 if (op2 == 0) {
8383 if (insn & (1 << 20)) {
8384 tmp = tcg_temp_new_i32();
8385 switch (op1) {
8386 case 0: /* lda */
6ce2faf4 8387 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8388 break;
8389 case 2: /* ldab */
6ce2faf4 8390 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8391 break;
8392 case 3: /* ldah */
6ce2faf4 8393 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8394 break;
8395 default:
8396 abort();
8397 }
8398 store_reg(s, rd, tmp);
8399 } else {
8400 rm = insn & 0xf;
8401 tmp = load_reg(s, rm);
8402 switch (op1) {
8403 case 0: /* stl */
6ce2faf4 8404 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8405 break;
8406 case 2: /* stlb */
6ce2faf4 8407 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8408 break;
8409 case 3: /* stlh */
6ce2faf4 8410 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8411 break;
8412 default:
8413 abort();
8414 }
8415 tcg_temp_free_i32(tmp);
8416 }
8417 } else if (insn & (1 << 20)) {
86753403
PB
8418 switch (op1) {
8419 case 0: /* ldrex */
426f5abc 8420 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8421 break;
8422 case 1: /* ldrexd */
426f5abc 8423 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8424 break;
8425 case 2: /* ldrexb */
426f5abc 8426 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8427 break;
8428 case 3: /* ldrexh */
426f5abc 8429 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8430 break;
8431 default:
8432 abort();
8433 }
9ee6e8bb
PB
8434 } else {
8435 rm = insn & 0xf;
86753403
PB
8436 switch (op1) {
8437 case 0: /* strex */
426f5abc 8438 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8439 break;
8440 case 1: /* strexd */
502e64fe 8441 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8442 break;
8443 case 2: /* strexb */
426f5abc 8444 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8445 break;
8446 case 3: /* strexh */
426f5abc 8447 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8448 break;
8449 default:
8450 abort();
8451 }
9ee6e8bb 8452 }
39d5492a 8453 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8454 } else {
8455 /* SWP instruction */
8456 rm = (insn) & 0xf;
8457
8984bd2e
PB
8458 /* ??? This is not really atomic. However we know
8459 we never have multiple CPUs running in parallel,
8460 so it is good enough. */
8461 addr = load_reg(s, rn);
8462 tmp = load_reg(s, rm);
5a839c0d 8463 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8464 if (insn & (1 << 22)) {
6ce2faf4
EI
8465 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8466 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8467 } else {
6ce2faf4
EI
8468 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8469 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8470 }
5a839c0d 8471 tcg_temp_free_i32(tmp);
7d1b0095 8472 tcg_temp_free_i32(addr);
8984bd2e 8473 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8474 }
8475 }
8476 } else {
8477 int address_offset;
3960c336
PM
8478 bool load = insn & (1 << 20);
8479 bool doubleword = false;
9ee6e8bb
PB
8480 /* Misc load/store */
8481 rn = (insn >> 16) & 0xf;
8482 rd = (insn >> 12) & 0xf;
3960c336
PM
8483
8484 if (!load && (sh & 2)) {
8485 /* doubleword */
8486 ARCH(5TE);
8487 if (rd & 1) {
8488 /* UNPREDICTABLE; we choose to UNDEF */
8489 goto illegal_op;
8490 }
8491 load = (sh & 1) == 0;
8492 doubleword = true;
8493 }
8494
b0109805 8495 addr = load_reg(s, rn);
9ee6e8bb 8496 if (insn & (1 << 24))
b0109805 8497 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8498 address_offset = 0;
3960c336
PM
8499
8500 if (doubleword) {
8501 if (!load) {
9ee6e8bb 8502 /* store */
b0109805 8503 tmp = load_reg(s, rd);
6ce2faf4 8504 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8505 tcg_temp_free_i32(tmp);
b0109805
PB
8506 tcg_gen_addi_i32(addr, addr, 4);
8507 tmp = load_reg(s, rd + 1);
6ce2faf4 8508 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8509 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8510 } else {
8511 /* load */
5a839c0d 8512 tmp = tcg_temp_new_i32();
6ce2faf4 8513 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8514 store_reg(s, rd, tmp);
8515 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8516 tmp = tcg_temp_new_i32();
6ce2faf4 8517 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 8518 rd++;
9ee6e8bb
PB
8519 }
8520 address_offset = -4;
3960c336
PM
8521 } else if (load) {
8522 /* load */
8523 tmp = tcg_temp_new_i32();
8524 switch (sh) {
8525 case 1:
8526 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8527 break;
8528 case 2:
8529 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
8530 break;
8531 default:
8532 case 3:
8533 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
8534 break;
8535 }
9ee6e8bb
PB
8536 } else {
8537 /* store */
b0109805 8538 tmp = load_reg(s, rd);
6ce2faf4 8539 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8540 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8541 }
8542 /* Perform base writeback before the loaded value to
8543 ensure correct behavior with overlapping index registers.
b6af0975 8544 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8545 destination and index registers overlap. */
8546 if (!(insn & (1 << 24))) {
b0109805
PB
8547 gen_add_datah_offset(s, insn, address_offset, addr);
8548 store_reg(s, rn, addr);
9ee6e8bb
PB
8549 } else if (insn & (1 << 21)) {
8550 if (address_offset)
b0109805
PB
8551 tcg_gen_addi_i32(addr, addr, address_offset);
8552 store_reg(s, rn, addr);
8553 } else {
7d1b0095 8554 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8555 }
8556 if (load) {
8557 /* Complete the load. */
b0109805 8558 store_reg(s, rd, tmp);
9ee6e8bb
PB
8559 }
8560 }
8561 break;
8562 case 0x4:
8563 case 0x5:
8564 goto do_ldst;
8565 case 0x6:
8566 case 0x7:
8567 if (insn & (1 << 4)) {
8568 ARCH(6);
8569 /* Armv6 Media instructions. */
8570 rm = insn & 0xf;
8571 rn = (insn >> 16) & 0xf;
2c0262af 8572 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8573 rs = (insn >> 8) & 0xf;
8574 switch ((insn >> 23) & 3) {
8575 case 0: /* Parallel add/subtract. */
8576 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8577 tmp = load_reg(s, rn);
8578 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8579 sh = (insn >> 5) & 7;
8580 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8581 goto illegal_op;
6ddbc6e4 8582 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8583 tcg_temp_free_i32(tmp2);
6ddbc6e4 8584 store_reg(s, rd, tmp);
9ee6e8bb
PB
8585 break;
8586 case 1:
8587 if ((insn & 0x00700020) == 0) {
6c95676b 8588 /* Halfword pack. */
3670669c
PB
8589 tmp = load_reg(s, rn);
8590 tmp2 = load_reg(s, rm);
9ee6e8bb 8591 shift = (insn >> 7) & 0x1f;
3670669c
PB
8592 if (insn & (1 << 6)) {
8593 /* pkhtb */
22478e79
AZ
8594 if (shift == 0)
8595 shift = 31;
8596 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8597 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8598 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8599 } else {
8600 /* pkhbt */
22478e79
AZ
8601 if (shift)
8602 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8603 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8604 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8605 }
8606 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8607 tcg_temp_free_i32(tmp2);
3670669c 8608 store_reg(s, rd, tmp);
9ee6e8bb
PB
8609 } else if ((insn & 0x00200020) == 0x00200000) {
8610 /* [us]sat */
6ddbc6e4 8611 tmp = load_reg(s, rm);
9ee6e8bb
PB
8612 shift = (insn >> 7) & 0x1f;
8613 if (insn & (1 << 6)) {
8614 if (shift == 0)
8615 shift = 31;
6ddbc6e4 8616 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8617 } else {
6ddbc6e4 8618 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8619 }
8620 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8621 tmp2 = tcg_const_i32(sh);
8622 if (insn & (1 << 22))
9ef39277 8623 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8624 else
9ef39277 8625 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8626 tcg_temp_free_i32(tmp2);
6ddbc6e4 8627 store_reg(s, rd, tmp);
9ee6e8bb
PB
8628 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8629 /* [us]sat16 */
6ddbc6e4 8630 tmp = load_reg(s, rm);
9ee6e8bb 8631 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8632 tmp2 = tcg_const_i32(sh);
8633 if (insn & (1 << 22))
9ef39277 8634 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8635 else
9ef39277 8636 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8637 tcg_temp_free_i32(tmp2);
6ddbc6e4 8638 store_reg(s, rd, tmp);
9ee6e8bb
PB
8639 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8640 /* Select bytes. */
6ddbc6e4
PB
8641 tmp = load_reg(s, rn);
8642 tmp2 = load_reg(s, rm);
7d1b0095 8643 tmp3 = tcg_temp_new_i32();
0ecb72a5 8644 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8645 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8646 tcg_temp_free_i32(tmp3);
8647 tcg_temp_free_i32(tmp2);
6ddbc6e4 8648 store_reg(s, rd, tmp);
9ee6e8bb 8649 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8650 tmp = load_reg(s, rm);
9ee6e8bb 8651 shift = (insn >> 10) & 3;
1301f322 8652 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8653 rotate, a shift is sufficient. */
8654 if (shift != 0)
f669df27 8655 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8656 op1 = (insn >> 20) & 7;
8657 switch (op1) {
5e3f878a
PB
8658 case 0: gen_sxtb16(tmp); break;
8659 case 2: gen_sxtb(tmp); break;
8660 case 3: gen_sxth(tmp); break;
8661 case 4: gen_uxtb16(tmp); break;
8662 case 6: gen_uxtb(tmp); break;
8663 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8664 default: goto illegal_op;
8665 }
8666 if (rn != 15) {
5e3f878a 8667 tmp2 = load_reg(s, rn);
9ee6e8bb 8668 if ((op1 & 3) == 0) {
5e3f878a 8669 gen_add16(tmp, tmp2);
9ee6e8bb 8670 } else {
5e3f878a 8671 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8672 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8673 }
8674 }
6c95676b 8675 store_reg(s, rd, tmp);
9ee6e8bb
PB
8676 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8677 /* rev */
b0109805 8678 tmp = load_reg(s, rm);
9ee6e8bb
PB
8679 if (insn & (1 << 22)) {
8680 if (insn & (1 << 7)) {
b0109805 8681 gen_revsh(tmp);
9ee6e8bb
PB
8682 } else {
8683 ARCH(6T2);
b0109805 8684 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8685 }
8686 } else {
8687 if (insn & (1 << 7))
b0109805 8688 gen_rev16(tmp);
9ee6e8bb 8689 else
66896cb8 8690 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8691 }
b0109805 8692 store_reg(s, rd, tmp);
9ee6e8bb
PB
8693 } else {
8694 goto illegal_op;
8695 }
8696 break;
8697 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8698 switch ((insn >> 20) & 0x7) {
8699 case 5:
8700 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8701 /* op2 not 00x or 11x : UNDEF */
8702 goto illegal_op;
8703 }
838fa72d
AJ
8704 /* Signed multiply most significant [accumulate].
8705 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8706 tmp = load_reg(s, rm);
8707 tmp2 = load_reg(s, rs);
a7812ae4 8708 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8709
955a7dd5 8710 if (rd != 15) {
838fa72d 8711 tmp = load_reg(s, rd);
9ee6e8bb 8712 if (insn & (1 << 6)) {
838fa72d 8713 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8714 } else {
838fa72d 8715 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8716 }
8717 }
838fa72d
AJ
8718 if (insn & (1 << 5)) {
8719 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8720 }
8721 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8722 tmp = tcg_temp_new_i32();
ecc7b3aa 8723 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 8724 tcg_temp_free_i64(tmp64);
955a7dd5 8725 store_reg(s, rn, tmp);
41e9564d
PM
8726 break;
8727 case 0:
8728 case 4:
8729 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8730 if (insn & (1 << 7)) {
8731 goto illegal_op;
8732 }
8733 tmp = load_reg(s, rm);
8734 tmp2 = load_reg(s, rs);
9ee6e8bb 8735 if (insn & (1 << 5))
5e3f878a
PB
8736 gen_swap_half(tmp2);
8737 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8738 if (insn & (1 << 22)) {
5e3f878a 8739 /* smlald, smlsld */
33bbd75a
PC
8740 TCGv_i64 tmp64_2;
8741
a7812ae4 8742 tmp64 = tcg_temp_new_i64();
33bbd75a 8743 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8744 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8745 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8746 tcg_temp_free_i32(tmp);
33bbd75a
PC
8747 tcg_temp_free_i32(tmp2);
8748 if (insn & (1 << 6)) {
8749 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8750 } else {
8751 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8752 }
8753 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8754 gen_addq(s, tmp64, rd, rn);
8755 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8756 tcg_temp_free_i64(tmp64);
9ee6e8bb 8757 } else {
5e3f878a 8758 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8759 if (insn & (1 << 6)) {
8760 /* This subtraction cannot overflow. */
8761 tcg_gen_sub_i32(tmp, tmp, tmp2);
8762 } else {
8763 /* This addition cannot overflow 32 bits;
8764 * however it may overflow considered as a
8765 * signed operation, in which case we must set
8766 * the Q flag.
8767 */
8768 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8769 }
8770 tcg_temp_free_i32(tmp2);
22478e79 8771 if (rd != 15)
9ee6e8bb 8772 {
22478e79 8773 tmp2 = load_reg(s, rd);
9ef39277 8774 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8775 tcg_temp_free_i32(tmp2);
9ee6e8bb 8776 }
22478e79 8777 store_reg(s, rn, tmp);
9ee6e8bb 8778 }
41e9564d 8779 break;
b8b8ea05
PM
8780 case 1:
8781 case 3:
8782 /* SDIV, UDIV */
d614a513 8783 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
8784 goto illegal_op;
8785 }
8786 if (((insn >> 5) & 7) || (rd != 15)) {
8787 goto illegal_op;
8788 }
8789 tmp = load_reg(s, rm);
8790 tmp2 = load_reg(s, rs);
8791 if (insn & (1 << 21)) {
8792 gen_helper_udiv(tmp, tmp, tmp2);
8793 } else {
8794 gen_helper_sdiv(tmp, tmp, tmp2);
8795 }
8796 tcg_temp_free_i32(tmp2);
8797 store_reg(s, rn, tmp);
8798 break;
41e9564d
PM
8799 default:
8800 goto illegal_op;
9ee6e8bb
PB
8801 }
8802 break;
8803 case 3:
8804 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8805 switch (op1) {
8806 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8807 ARCH(6);
8808 tmp = load_reg(s, rm);
8809 tmp2 = load_reg(s, rs);
8810 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8811 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8812 if (rd != 15) {
8813 tmp2 = load_reg(s, rd);
6ddbc6e4 8814 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8815 tcg_temp_free_i32(tmp2);
9ee6e8bb 8816 }
ded9d295 8817 store_reg(s, rn, tmp);
9ee6e8bb
PB
8818 break;
8819 case 0x20: case 0x24: case 0x28: case 0x2c:
8820 /* Bitfield insert/clear. */
8821 ARCH(6T2);
8822 shift = (insn >> 7) & 0x1f;
8823 i = (insn >> 16) & 0x1f;
45140a57
KB
8824 if (i < shift) {
8825 /* UNPREDICTABLE; we choose to UNDEF */
8826 goto illegal_op;
8827 }
9ee6e8bb
PB
8828 i = i + 1 - shift;
8829 if (rm == 15) {
7d1b0095 8830 tmp = tcg_temp_new_i32();
5e3f878a 8831 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8832 } else {
5e3f878a 8833 tmp = load_reg(s, rm);
9ee6e8bb
PB
8834 }
8835 if (i != 32) {
5e3f878a 8836 tmp2 = load_reg(s, rd);
d593c48e 8837 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8838 tcg_temp_free_i32(tmp2);
9ee6e8bb 8839 }
5e3f878a 8840 store_reg(s, rd, tmp);
9ee6e8bb
PB
8841 break;
8842 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8843 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8844 ARCH(6T2);
5e3f878a 8845 tmp = load_reg(s, rm);
9ee6e8bb
PB
8846 shift = (insn >> 7) & 0x1f;
8847 i = ((insn >> 16) & 0x1f) + 1;
8848 if (shift + i > 32)
8849 goto illegal_op;
8850 if (i < 32) {
8851 if (op1 & 0x20) {
5e3f878a 8852 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8853 } else {
5e3f878a 8854 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8855 }
8856 }
5e3f878a 8857 store_reg(s, rd, tmp);
9ee6e8bb
PB
8858 break;
8859 default:
8860 goto illegal_op;
8861 }
8862 break;
8863 }
8864 break;
8865 }
8866 do_ldst:
8867 /* Check for undefined extension instructions
8868 * per the ARM Bible IE:
8869 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8870 */
8871 sh = (0xf << 20) | (0xf << 4);
8872 if (op1 == 0x7 && ((insn & sh) == sh))
8873 {
8874 goto illegal_op;
8875 }
8876 /* load/store byte/word */
8877 rn = (insn >> 16) & 0xf;
8878 rd = (insn >> 12) & 0xf;
b0109805 8879 tmp2 = load_reg(s, rn);
a99caa48
PM
8880 if ((insn & 0x01200000) == 0x00200000) {
8881 /* ldrt/strt */
579d21cc 8882 i = get_a32_user_mem_index(s);
a99caa48
PM
8883 } else {
8884 i = get_mem_index(s);
8885 }
9ee6e8bb 8886 if (insn & (1 << 24))
b0109805 8887 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8888 if (insn & (1 << 20)) {
8889 /* load */
5a839c0d 8890 tmp = tcg_temp_new_i32();
9ee6e8bb 8891 if (insn & (1 << 22)) {
08307563 8892 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8893 } else {
08307563 8894 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8895 }
9ee6e8bb
PB
8896 } else {
8897 /* store */
b0109805 8898 tmp = load_reg(s, rd);
5a839c0d 8899 if (insn & (1 << 22)) {
08307563 8900 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8901 } else {
08307563 8902 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8903 }
8904 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8905 }
8906 if (!(insn & (1 << 24))) {
b0109805
PB
8907 gen_add_data_offset(s, insn, tmp2);
8908 store_reg(s, rn, tmp2);
8909 } else if (insn & (1 << 21)) {
8910 store_reg(s, rn, tmp2);
8911 } else {
7d1b0095 8912 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8913 }
8914 if (insn & (1 << 20)) {
8915 /* Complete the load. */
7dcc1f89 8916 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
8917 }
8918 break;
8919 case 0x08:
8920 case 0x09:
8921 {
da3e53dd
PM
8922 int j, n, loaded_base;
8923 bool exc_return = false;
8924 bool is_load = extract32(insn, 20, 1);
8925 bool user = false;
39d5492a 8926 TCGv_i32 loaded_var;
9ee6e8bb
PB
8927 /* load/store multiple words */
8928 /* XXX: store correct base if write back */
9ee6e8bb 8929 if (insn & (1 << 22)) {
da3e53dd 8930 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
8931 if (IS_USER(s))
8932 goto illegal_op; /* only usable in supervisor mode */
8933
da3e53dd
PM
8934 if (is_load && extract32(insn, 15, 1)) {
8935 exc_return = true;
8936 } else {
8937 user = true;
8938 }
9ee6e8bb
PB
8939 }
8940 rn = (insn >> 16) & 0xf;
b0109805 8941 addr = load_reg(s, rn);
9ee6e8bb
PB
8942
8943 /* compute total size */
8944 loaded_base = 0;
39d5492a 8945 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8946 n = 0;
8947 for(i=0;i<16;i++) {
8948 if (insn & (1 << i))
8949 n++;
8950 }
8951 /* XXX: test invalid n == 0 case ? */
8952 if (insn & (1 << 23)) {
8953 if (insn & (1 << 24)) {
8954 /* pre increment */
b0109805 8955 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8956 } else {
8957 /* post increment */
8958 }
8959 } else {
8960 if (insn & (1 << 24)) {
8961 /* pre decrement */
b0109805 8962 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8963 } else {
8964 /* post decrement */
8965 if (n != 1)
b0109805 8966 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8967 }
8968 }
8969 j = 0;
8970 for(i=0;i<16;i++) {
8971 if (insn & (1 << i)) {
da3e53dd 8972 if (is_load) {
9ee6e8bb 8973 /* load */
5a839c0d 8974 tmp = tcg_temp_new_i32();
6ce2faf4 8975 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8976 if (user) {
b75263d6 8977 tmp2 = tcg_const_i32(i);
1ce94f81 8978 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8979 tcg_temp_free_i32(tmp2);
7d1b0095 8980 tcg_temp_free_i32(tmp);
9ee6e8bb 8981 } else if (i == rn) {
b0109805 8982 loaded_var = tmp;
9ee6e8bb
PB
8983 loaded_base = 1;
8984 } else {
7dcc1f89 8985 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
8986 }
8987 } else {
8988 /* store */
8989 if (i == 15) {
8990 /* special case: r15 = PC + 8 */
8991 val = (long)s->pc + 4;
7d1b0095 8992 tmp = tcg_temp_new_i32();
b0109805 8993 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8994 } else if (user) {
7d1b0095 8995 tmp = tcg_temp_new_i32();
b75263d6 8996 tmp2 = tcg_const_i32(i);
9ef39277 8997 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8998 tcg_temp_free_i32(tmp2);
9ee6e8bb 8999 } else {
b0109805 9000 tmp = load_reg(s, i);
9ee6e8bb 9001 }
6ce2faf4 9002 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 9003 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9004 }
9005 j++;
9006 /* no need to add after the last transfer */
9007 if (j != n)
b0109805 9008 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9009 }
9010 }
9011 if (insn & (1 << 21)) {
9012 /* write back */
9013 if (insn & (1 << 23)) {
9014 if (insn & (1 << 24)) {
9015 /* pre increment */
9016 } else {
9017 /* post increment */
b0109805 9018 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9019 }
9020 } else {
9021 if (insn & (1 << 24)) {
9022 /* pre decrement */
9023 if (n != 1)
b0109805 9024 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9025 } else {
9026 /* post decrement */
b0109805 9027 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9028 }
9029 }
b0109805
PB
9030 store_reg(s, rn, addr);
9031 } else {
7d1b0095 9032 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9033 }
9034 if (loaded_base) {
b0109805 9035 store_reg(s, rn, loaded_var);
9ee6e8bb 9036 }
da3e53dd 9037 if (exc_return) {
9ee6e8bb 9038 /* Restore CPSR from SPSR. */
d9ba4830 9039 tmp = load_cpu_field(spsr);
4051e12c 9040 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 9041 tcg_temp_free_i32(tmp);
577bf808 9042 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9043 }
9044 }
9045 break;
9046 case 0xa:
9047 case 0xb:
9048 {
9049 int32_t offset;
9050
9051 /* branch (and link) */
9052 val = (int32_t)s->pc;
9053 if (insn & (1 << 24)) {
7d1b0095 9054 tmp = tcg_temp_new_i32();
5e3f878a
PB
9055 tcg_gen_movi_i32(tmp, val);
9056 store_reg(s, 14, tmp);
9ee6e8bb 9057 }
534df156
PM
9058 offset = sextract32(insn << 2, 0, 26);
9059 val += offset + 4;
9ee6e8bb
PB
9060 gen_jmp(s, val);
9061 }
9062 break;
9063 case 0xc:
9064 case 0xd:
9065 case 0xe:
6a57f3eb
WN
9066 if (((insn >> 8) & 0xe) == 10) {
9067 /* VFP. */
7dcc1f89 9068 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9069 goto illegal_op;
9070 }
7dcc1f89 9071 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9072 /* Coprocessor. */
9ee6e8bb 9073 goto illegal_op;
6a57f3eb 9074 }
9ee6e8bb
PB
9075 break;
9076 case 0xf:
9077 /* swi */
eaed129d 9078 gen_set_pc_im(s, s->pc);
d4a2dc67 9079 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9080 s->is_jmp = DISAS_SWI;
9081 break;
9082 default:
9083 illegal_op:
73710361
GB
9084 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9085 default_exception_el(s));
9ee6e8bb
PB
9086 break;
9087 }
9088 }
9089}
9090
9091/* Return true if this is a Thumb-2 logical op. */
9092static int
9093thumb2_logic_op(int op)
9094{
9095 return (op < 8);
9096}
9097
9098/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9099 then set condition code flags based on the result of the operation.
9100 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9101 to the high bit of T1.
9102 Returns zero if the opcode is valid. */
9103
9104static int
39d5492a
PM
9105gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9106 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9107{
9108 int logic_cc;
9109
9110 logic_cc = 0;
9111 switch (op) {
9112 case 0: /* and */
396e467c 9113 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9114 logic_cc = conds;
9115 break;
9116 case 1: /* bic */
f669df27 9117 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9118 logic_cc = conds;
9119 break;
9120 case 2: /* orr */
396e467c 9121 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9122 logic_cc = conds;
9123 break;
9124 case 3: /* orn */
29501f1b 9125 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9126 logic_cc = conds;
9127 break;
9128 case 4: /* eor */
396e467c 9129 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9130 logic_cc = conds;
9131 break;
9132 case 8: /* add */
9133 if (conds)
72485ec4 9134 gen_add_CC(t0, t0, t1);
9ee6e8bb 9135 else
396e467c 9136 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9137 break;
9138 case 10: /* adc */
9139 if (conds)
49b4c31e 9140 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9141 else
396e467c 9142 gen_adc(t0, t1);
9ee6e8bb
PB
9143 break;
9144 case 11: /* sbc */
2de68a49
RH
9145 if (conds) {
9146 gen_sbc_CC(t0, t0, t1);
9147 } else {
396e467c 9148 gen_sub_carry(t0, t0, t1);
2de68a49 9149 }
9ee6e8bb
PB
9150 break;
9151 case 13: /* sub */
9152 if (conds)
72485ec4 9153 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9154 else
396e467c 9155 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9156 break;
9157 case 14: /* rsb */
9158 if (conds)
72485ec4 9159 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9160 else
396e467c 9161 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9162 break;
9163 default: /* 5, 6, 7, 9, 12, 15. */
9164 return 1;
9165 }
9166 if (logic_cc) {
396e467c 9167 gen_logic_CC(t0);
9ee6e8bb 9168 if (shifter_out)
396e467c 9169 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9170 }
9171 return 0;
9172}
9173
9174/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9175 is not legal. */
0ecb72a5 9176static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9177{
b0109805 9178 uint32_t insn, imm, shift, offset;
9ee6e8bb 9179 uint32_t rd, rn, rm, rs;
39d5492a
PM
9180 TCGv_i32 tmp;
9181 TCGv_i32 tmp2;
9182 TCGv_i32 tmp3;
9183 TCGv_i32 addr;
a7812ae4 9184 TCGv_i64 tmp64;
9ee6e8bb
PB
9185 int op;
9186 int shiftop;
9187 int conds;
9188 int logic_cc;
9189
d614a513
PM
9190 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9191 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9192 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9193 16-bit instructions to get correct prefetch abort behavior. */
9194 insn = insn_hw1;
9195 if ((insn & (1 << 12)) == 0) {
be5e7a76 9196 ARCH(5);
9ee6e8bb
PB
9197 /* Second half of blx. */
9198 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9199 tmp = load_reg(s, 14);
9200 tcg_gen_addi_i32(tmp, tmp, offset);
9201 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9202
7d1b0095 9203 tmp2 = tcg_temp_new_i32();
b0109805 9204 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9205 store_reg(s, 14, tmp2);
9206 gen_bx(s, tmp);
9ee6e8bb
PB
9207 return 0;
9208 }
9209 if (insn & (1 << 11)) {
9210 /* Second half of bl. */
9211 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9212 tmp = load_reg(s, 14);
6a0d8a1d 9213 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9214
7d1b0095 9215 tmp2 = tcg_temp_new_i32();
b0109805 9216 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9217 store_reg(s, 14, tmp2);
9218 gen_bx(s, tmp);
9ee6e8bb
PB
9219 return 0;
9220 }
9221 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9222 /* Instruction spans a page boundary. Implement it as two
9223 16-bit instructions in case the second half causes an
9224 prefetch abort. */
9225 offset = ((int32_t)insn << 21) >> 9;
396e467c 9226 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9227 return 0;
9228 }
9229 /* Fall through to 32-bit decode. */
9230 }
9231
d31dd73e 9232 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9233 s->pc += 2;
9234 insn |= (uint32_t)insn_hw1 << 16;
9235
9236 if ((insn & 0xf800e800) != 0xf000e800) {
9237 ARCH(6T2);
9238 }
9239
9240 rn = (insn >> 16) & 0xf;
9241 rs = (insn >> 12) & 0xf;
9242 rd = (insn >> 8) & 0xf;
9243 rm = insn & 0xf;
9244 switch ((insn >> 25) & 0xf) {
9245 case 0: case 1: case 2: case 3:
9246 /* 16-bit instructions. Should never happen. */
9247 abort();
9248 case 4:
9249 if (insn & (1 << 22)) {
9250 /* Other load/store, table branch. */
9251 if (insn & 0x01200000) {
9252 /* Load/store doubleword. */
9253 if (rn == 15) {
7d1b0095 9254 addr = tcg_temp_new_i32();
b0109805 9255 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9256 } else {
b0109805 9257 addr = load_reg(s, rn);
9ee6e8bb
PB
9258 }
9259 offset = (insn & 0xff) * 4;
9260 if ((insn & (1 << 23)) == 0)
9261 offset = -offset;
9262 if (insn & (1 << 24)) {
b0109805 9263 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9264 offset = 0;
9265 }
9266 if (insn & (1 << 20)) {
9267 /* ldrd */
e2592fad 9268 tmp = tcg_temp_new_i32();
6ce2faf4 9269 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9270 store_reg(s, rs, tmp);
9271 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9272 tmp = tcg_temp_new_i32();
6ce2faf4 9273 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9274 store_reg(s, rd, tmp);
9ee6e8bb
PB
9275 } else {
9276 /* strd */
b0109805 9277 tmp = load_reg(s, rs);
6ce2faf4 9278 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9279 tcg_temp_free_i32(tmp);
b0109805
PB
9280 tcg_gen_addi_i32(addr, addr, 4);
9281 tmp = load_reg(s, rd);
6ce2faf4 9282 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9283 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9284 }
9285 if (insn & (1 << 21)) {
9286 /* Base writeback. */
9287 if (rn == 15)
9288 goto illegal_op;
b0109805
PB
9289 tcg_gen_addi_i32(addr, addr, offset - 4);
9290 store_reg(s, rn, addr);
9291 } else {
7d1b0095 9292 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9293 }
9294 } else if ((insn & (1 << 23)) == 0) {
9295 /* Load/store exclusive word. */
39d5492a 9296 addr = tcg_temp_local_new_i32();
98a46317 9297 load_reg_var(s, addr, rn);
426f5abc 9298 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9299 if (insn & (1 << 20)) {
426f5abc 9300 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9301 } else {
426f5abc 9302 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9303 }
39d5492a 9304 tcg_temp_free_i32(addr);
2359bf80 9305 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9306 /* Table Branch. */
9307 if (rn == 15) {
7d1b0095 9308 addr = tcg_temp_new_i32();
b0109805 9309 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9310 } else {
b0109805 9311 addr = load_reg(s, rn);
9ee6e8bb 9312 }
b26eefb6 9313 tmp = load_reg(s, rm);
b0109805 9314 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9315 if (insn & (1 << 4)) {
9316 /* tbh */
b0109805 9317 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9318 tcg_temp_free_i32(tmp);
e2592fad 9319 tmp = tcg_temp_new_i32();
6ce2faf4 9320 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9321 } else { /* tbb */
7d1b0095 9322 tcg_temp_free_i32(tmp);
e2592fad 9323 tmp = tcg_temp_new_i32();
6ce2faf4 9324 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9325 }
7d1b0095 9326 tcg_temp_free_i32(addr);
b0109805
PB
9327 tcg_gen_shli_i32(tmp, tmp, 1);
9328 tcg_gen_addi_i32(tmp, tmp, s->pc);
9329 store_reg(s, 15, tmp);
9ee6e8bb 9330 } else {
2359bf80 9331 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9332 op = (insn >> 4) & 0x3;
2359bf80
MR
9333 switch (op2) {
9334 case 0:
426f5abc 9335 goto illegal_op;
2359bf80
MR
9336 case 1:
9337 /* Load/store exclusive byte/halfword/doubleword */
9338 if (op == 2) {
9339 goto illegal_op;
9340 }
9341 ARCH(7);
9342 break;
9343 case 2:
9344 /* Load-acquire/store-release */
9345 if (op == 3) {
9346 goto illegal_op;
9347 }
9348 /* Fall through */
9349 case 3:
9350 /* Load-acquire/store-release exclusive */
9351 ARCH(8);
9352 break;
426f5abc 9353 }
39d5492a 9354 addr = tcg_temp_local_new_i32();
98a46317 9355 load_reg_var(s, addr, rn);
2359bf80
MR
9356 if (!(op2 & 1)) {
9357 if (insn & (1 << 20)) {
9358 tmp = tcg_temp_new_i32();
9359 switch (op) {
9360 case 0: /* ldab */
6ce2faf4 9361 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9362 break;
9363 case 1: /* ldah */
6ce2faf4 9364 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9365 break;
9366 case 2: /* lda */
6ce2faf4 9367 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9368 break;
9369 default:
9370 abort();
9371 }
9372 store_reg(s, rs, tmp);
9373 } else {
9374 tmp = load_reg(s, rs);
9375 switch (op) {
9376 case 0: /* stlb */
6ce2faf4 9377 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9378 break;
9379 case 1: /* stlh */
6ce2faf4 9380 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9381 break;
9382 case 2: /* stl */
6ce2faf4 9383 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9384 break;
9385 default:
9386 abort();
9387 }
9388 tcg_temp_free_i32(tmp);
9389 }
9390 } else if (insn & (1 << 20)) {
426f5abc 9391 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9392 } else {
426f5abc 9393 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9394 }
39d5492a 9395 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9396 }
9397 } else {
9398 /* Load/store multiple, RFE, SRS. */
9399 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9400 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9401 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9402 goto illegal_op;
00115976 9403 }
9ee6e8bb
PB
9404 if (insn & (1 << 20)) {
9405 /* rfe */
b0109805
PB
9406 addr = load_reg(s, rn);
9407 if ((insn & (1 << 24)) == 0)
9408 tcg_gen_addi_i32(addr, addr, -8);
9409 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9410 tmp = tcg_temp_new_i32();
6ce2faf4 9411 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9412 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9413 tmp2 = tcg_temp_new_i32();
6ce2faf4 9414 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9415 if (insn & (1 << 21)) {
9416 /* Base writeback. */
b0109805
PB
9417 if (insn & (1 << 24)) {
9418 tcg_gen_addi_i32(addr, addr, 4);
9419 } else {
9420 tcg_gen_addi_i32(addr, addr, -4);
9421 }
9422 store_reg(s, rn, addr);
9423 } else {
7d1b0095 9424 tcg_temp_free_i32(addr);
9ee6e8bb 9425 }
b0109805 9426 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9427 } else {
9428 /* srs */
81465888
PM
9429 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9430 insn & (1 << 21));
9ee6e8bb
PB
9431 }
9432 } else {
5856d44e 9433 int i, loaded_base = 0;
39d5492a 9434 TCGv_i32 loaded_var;
9ee6e8bb 9435 /* Load/store multiple. */
b0109805 9436 addr = load_reg(s, rn);
9ee6e8bb
PB
9437 offset = 0;
9438 for (i = 0; i < 16; i++) {
9439 if (insn & (1 << i))
9440 offset += 4;
9441 }
9442 if (insn & (1 << 24)) {
b0109805 9443 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9444 }
9445
39d5492a 9446 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9447 for (i = 0; i < 16; i++) {
9448 if ((insn & (1 << i)) == 0)
9449 continue;
9450 if (insn & (1 << 20)) {
9451 /* Load. */
e2592fad 9452 tmp = tcg_temp_new_i32();
6ce2faf4 9453 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9454 if (i == 15) {
b0109805 9455 gen_bx(s, tmp);
5856d44e
YO
9456 } else if (i == rn) {
9457 loaded_var = tmp;
9458 loaded_base = 1;
9ee6e8bb 9459 } else {
b0109805 9460 store_reg(s, i, tmp);
9ee6e8bb
PB
9461 }
9462 } else {
9463 /* Store. */
b0109805 9464 tmp = load_reg(s, i);
6ce2faf4 9465 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9466 tcg_temp_free_i32(tmp);
9ee6e8bb 9467 }
b0109805 9468 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9469 }
5856d44e
YO
9470 if (loaded_base) {
9471 store_reg(s, rn, loaded_var);
9472 }
9ee6e8bb
PB
9473 if (insn & (1 << 21)) {
9474 /* Base register writeback. */
9475 if (insn & (1 << 24)) {
b0109805 9476 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9477 }
9478 /* Fault if writeback register is in register list. */
9479 if (insn & (1 << rn))
9480 goto illegal_op;
b0109805
PB
9481 store_reg(s, rn, addr);
9482 } else {
7d1b0095 9483 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9484 }
9485 }
9486 }
9487 break;
2af9ab77
JB
9488 case 5:
9489
9ee6e8bb 9490 op = (insn >> 21) & 0xf;
2af9ab77 9491 if (op == 6) {
62b44f05
AR
9492 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9493 goto illegal_op;
9494 }
2af9ab77
JB
9495 /* Halfword pack. */
9496 tmp = load_reg(s, rn);
9497 tmp2 = load_reg(s, rm);
9498 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9499 if (insn & (1 << 5)) {
9500 /* pkhtb */
9501 if (shift == 0)
9502 shift = 31;
9503 tcg_gen_sari_i32(tmp2, tmp2, shift);
9504 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9505 tcg_gen_ext16u_i32(tmp2, tmp2);
9506 } else {
9507 /* pkhbt */
9508 if (shift)
9509 tcg_gen_shli_i32(tmp2, tmp2, shift);
9510 tcg_gen_ext16u_i32(tmp, tmp);
9511 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9512 }
9513 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9514 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9515 store_reg(s, rd, tmp);
9516 } else {
2af9ab77
JB
9517 /* Data processing register constant shift. */
9518 if (rn == 15) {
7d1b0095 9519 tmp = tcg_temp_new_i32();
2af9ab77
JB
9520 tcg_gen_movi_i32(tmp, 0);
9521 } else {
9522 tmp = load_reg(s, rn);
9523 }
9524 tmp2 = load_reg(s, rm);
9525
9526 shiftop = (insn >> 4) & 3;
9527 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9528 conds = (insn & (1 << 20)) != 0;
9529 logic_cc = (conds && thumb2_logic_op(op));
9530 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9531 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9532 goto illegal_op;
7d1b0095 9533 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9534 if (rd != 15) {
9535 store_reg(s, rd, tmp);
9536 } else {
7d1b0095 9537 tcg_temp_free_i32(tmp);
2af9ab77 9538 }
3174f8e9 9539 }
9ee6e8bb
PB
9540 break;
9541 case 13: /* Misc data processing. */
9542 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9543 if (op < 4 && (insn & 0xf000) != 0xf000)
9544 goto illegal_op;
9545 switch (op) {
9546 case 0: /* Register controlled shift. */
8984bd2e
PB
9547 tmp = load_reg(s, rn);
9548 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9549 if ((insn & 0x70) != 0)
9550 goto illegal_op;
9551 op = (insn >> 21) & 3;
8984bd2e
PB
9552 logic_cc = (insn & (1 << 20)) != 0;
9553 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9554 if (logic_cc)
9555 gen_logic_CC(tmp);
7dcc1f89 9556 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9557 break;
9558 case 1: /* Sign/zero extend. */
62b44f05
AR
9559 op = (insn >> 20) & 7;
9560 switch (op) {
9561 case 0: /* SXTAH, SXTH */
9562 case 1: /* UXTAH, UXTH */
9563 case 4: /* SXTAB, SXTB */
9564 case 5: /* UXTAB, UXTB */
9565 break;
9566 case 2: /* SXTAB16, SXTB16 */
9567 case 3: /* UXTAB16, UXTB16 */
9568 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9569 goto illegal_op;
9570 }
9571 break;
9572 default:
9573 goto illegal_op;
9574 }
9575 if (rn != 15) {
9576 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9577 goto illegal_op;
9578 }
9579 }
5e3f878a 9580 tmp = load_reg(s, rm);
9ee6e8bb 9581 shift = (insn >> 4) & 3;
1301f322 9582 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9583 rotate, a shift is sufficient. */
9584 if (shift != 0)
f669df27 9585 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9586 op = (insn >> 20) & 7;
9587 switch (op) {
5e3f878a
PB
9588 case 0: gen_sxth(tmp); break;
9589 case 1: gen_uxth(tmp); break;
9590 case 2: gen_sxtb16(tmp); break;
9591 case 3: gen_uxtb16(tmp); break;
9592 case 4: gen_sxtb(tmp); break;
9593 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9594 default:
9595 g_assert_not_reached();
9ee6e8bb
PB
9596 }
9597 if (rn != 15) {
5e3f878a 9598 tmp2 = load_reg(s, rn);
9ee6e8bb 9599 if ((op >> 1) == 1) {
5e3f878a 9600 gen_add16(tmp, tmp2);
9ee6e8bb 9601 } else {
5e3f878a 9602 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9603 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9604 }
9605 }
5e3f878a 9606 store_reg(s, rd, tmp);
9ee6e8bb
PB
9607 break;
9608 case 2: /* SIMD add/subtract. */
62b44f05
AR
9609 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9610 goto illegal_op;
9611 }
9ee6e8bb
PB
9612 op = (insn >> 20) & 7;
9613 shift = (insn >> 4) & 7;
9614 if ((op & 3) == 3 || (shift & 3) == 3)
9615 goto illegal_op;
6ddbc6e4
PB
9616 tmp = load_reg(s, rn);
9617 tmp2 = load_reg(s, rm);
9618 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9619 tcg_temp_free_i32(tmp2);
6ddbc6e4 9620 store_reg(s, rd, tmp);
9ee6e8bb
PB
9621 break;
9622 case 3: /* Other data processing. */
9623 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9624 if (op < 4) {
9625 /* Saturating add/subtract. */
62b44f05
AR
9626 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9627 goto illegal_op;
9628 }
d9ba4830
PB
9629 tmp = load_reg(s, rn);
9630 tmp2 = load_reg(s, rm);
9ee6e8bb 9631 if (op & 1)
9ef39277 9632 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9633 if (op & 2)
9ef39277 9634 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9635 else
9ef39277 9636 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9637 tcg_temp_free_i32(tmp2);
9ee6e8bb 9638 } else {
62b44f05
AR
9639 switch (op) {
9640 case 0x0a: /* rbit */
9641 case 0x08: /* rev */
9642 case 0x09: /* rev16 */
9643 case 0x0b: /* revsh */
9644 case 0x18: /* clz */
9645 break;
9646 case 0x10: /* sel */
9647 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9648 goto illegal_op;
9649 }
9650 break;
9651 case 0x20: /* crc32/crc32c */
9652 case 0x21:
9653 case 0x22:
9654 case 0x28:
9655 case 0x29:
9656 case 0x2a:
9657 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
9658 goto illegal_op;
9659 }
9660 break;
9661 default:
9662 goto illegal_op;
9663 }
d9ba4830 9664 tmp = load_reg(s, rn);
9ee6e8bb
PB
9665 switch (op) {
9666 case 0x0a: /* rbit */
d9ba4830 9667 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9668 break;
9669 case 0x08: /* rev */
66896cb8 9670 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9671 break;
9672 case 0x09: /* rev16 */
d9ba4830 9673 gen_rev16(tmp);
9ee6e8bb
PB
9674 break;
9675 case 0x0b: /* revsh */
d9ba4830 9676 gen_revsh(tmp);
9ee6e8bb
PB
9677 break;
9678 case 0x10: /* sel */
d9ba4830 9679 tmp2 = load_reg(s, rm);
7d1b0095 9680 tmp3 = tcg_temp_new_i32();
0ecb72a5 9681 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9682 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9683 tcg_temp_free_i32(tmp3);
9684 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9685 break;
9686 case 0x18: /* clz */
d9ba4830 9687 gen_helper_clz(tmp, tmp);
9ee6e8bb 9688 break;
eb0ecd5a
WN
9689 case 0x20:
9690 case 0x21:
9691 case 0x22:
9692 case 0x28:
9693 case 0x29:
9694 case 0x2a:
9695 {
9696 /* crc32/crc32c */
9697 uint32_t sz = op & 0x3;
9698 uint32_t c = op & 0x8;
9699
eb0ecd5a 9700 tmp2 = load_reg(s, rm);
aa633469
PM
9701 if (sz == 0) {
9702 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9703 } else if (sz == 1) {
9704 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9705 }
eb0ecd5a
WN
9706 tmp3 = tcg_const_i32(1 << sz);
9707 if (c) {
9708 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9709 } else {
9710 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9711 }
9712 tcg_temp_free_i32(tmp2);
9713 tcg_temp_free_i32(tmp3);
9714 break;
9715 }
9ee6e8bb 9716 default:
62b44f05 9717 g_assert_not_reached();
9ee6e8bb
PB
9718 }
9719 }
d9ba4830 9720 store_reg(s, rd, tmp);
9ee6e8bb
PB
9721 break;
9722 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
9723 switch ((insn >> 20) & 7) {
9724 case 0: /* 32 x 32 -> 32 */
9725 case 7: /* Unsigned sum of absolute differences. */
9726 break;
9727 case 1: /* 16 x 16 -> 32 */
9728 case 2: /* Dual multiply add. */
9729 case 3: /* 32 * 16 -> 32msb */
9730 case 4: /* Dual multiply subtract. */
9731 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9732 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9733 goto illegal_op;
9734 }
9735 break;
9736 }
9ee6e8bb 9737 op = (insn >> 4) & 0xf;
d9ba4830
PB
9738 tmp = load_reg(s, rn);
9739 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9740 switch ((insn >> 20) & 7) {
9741 case 0: /* 32 x 32 -> 32 */
d9ba4830 9742 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9743 tcg_temp_free_i32(tmp2);
9ee6e8bb 9744 if (rs != 15) {
d9ba4830 9745 tmp2 = load_reg(s, rs);
9ee6e8bb 9746 if (op)
d9ba4830 9747 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9748 else
d9ba4830 9749 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9750 tcg_temp_free_i32(tmp2);
9ee6e8bb 9751 }
9ee6e8bb
PB
9752 break;
9753 case 1: /* 16 x 16 -> 32 */
d9ba4830 9754 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9755 tcg_temp_free_i32(tmp2);
9ee6e8bb 9756 if (rs != 15) {
d9ba4830 9757 tmp2 = load_reg(s, rs);
9ef39277 9758 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9759 tcg_temp_free_i32(tmp2);
9ee6e8bb 9760 }
9ee6e8bb
PB
9761 break;
9762 case 2: /* Dual multiply add. */
9763 case 4: /* Dual multiply subtract. */
9764 if (op)
d9ba4830
PB
9765 gen_swap_half(tmp2);
9766 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9767 if (insn & (1 << 22)) {
e1d177b9 9768 /* This subtraction cannot overflow. */
d9ba4830 9769 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9770 } else {
e1d177b9
PM
9771 /* This addition cannot overflow 32 bits;
9772 * however it may overflow considered as a signed
9773 * operation, in which case we must set the Q flag.
9774 */
9ef39277 9775 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9776 }
7d1b0095 9777 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9778 if (rs != 15)
9779 {
d9ba4830 9780 tmp2 = load_reg(s, rs);
9ef39277 9781 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9782 tcg_temp_free_i32(tmp2);
9ee6e8bb 9783 }
9ee6e8bb
PB
9784 break;
9785 case 3: /* 32 * 16 -> 32msb */
9786 if (op)
d9ba4830 9787 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9788 else
d9ba4830 9789 gen_sxth(tmp2);
a7812ae4
PB
9790 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9791 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9792 tmp = tcg_temp_new_i32();
ecc7b3aa 9793 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 9794 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9795 if (rs != 15)
9796 {
d9ba4830 9797 tmp2 = load_reg(s, rs);
9ef39277 9798 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9799 tcg_temp_free_i32(tmp2);
9ee6e8bb 9800 }
9ee6e8bb 9801 break;
838fa72d
AJ
9802 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9803 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9804 if (rs != 15) {
838fa72d
AJ
9805 tmp = load_reg(s, rs);
9806 if (insn & (1 << 20)) {
9807 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9808 } else {
838fa72d 9809 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9810 }
2c0262af 9811 }
838fa72d
AJ
9812 if (insn & (1 << 4)) {
9813 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9814 }
9815 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9816 tmp = tcg_temp_new_i32();
ecc7b3aa 9817 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9818 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9819 break;
9820 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9821 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9822 tcg_temp_free_i32(tmp2);
9ee6e8bb 9823 if (rs != 15) {
d9ba4830
PB
9824 tmp2 = load_reg(s, rs);
9825 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9826 tcg_temp_free_i32(tmp2);
5fd46862 9827 }
9ee6e8bb 9828 break;
2c0262af 9829 }
d9ba4830 9830 store_reg(s, rd, tmp);
2c0262af 9831 break;
9ee6e8bb
PB
9832 case 6: case 7: /* 64-bit multiply, Divide. */
9833 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9834 tmp = load_reg(s, rn);
9835 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9836 if ((op & 0x50) == 0x10) {
9837 /* sdiv, udiv */
d614a513 9838 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9839 goto illegal_op;
47789990 9840 }
9ee6e8bb 9841 if (op & 0x20)
5e3f878a 9842 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9843 else
5e3f878a 9844 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9845 tcg_temp_free_i32(tmp2);
5e3f878a 9846 store_reg(s, rd, tmp);
9ee6e8bb
PB
9847 } else if ((op & 0xe) == 0xc) {
9848 /* Dual multiply accumulate long. */
62b44f05
AR
9849 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9850 tcg_temp_free_i32(tmp);
9851 tcg_temp_free_i32(tmp2);
9852 goto illegal_op;
9853 }
9ee6e8bb 9854 if (op & 1)
5e3f878a
PB
9855 gen_swap_half(tmp2);
9856 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9857 if (op & 0x10) {
5e3f878a 9858 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9859 } else {
5e3f878a 9860 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9861 }
7d1b0095 9862 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9863 /* BUGFIX */
9864 tmp64 = tcg_temp_new_i64();
9865 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9866 tcg_temp_free_i32(tmp);
a7812ae4
PB
9867 gen_addq(s, tmp64, rs, rd);
9868 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9869 tcg_temp_free_i64(tmp64);
2c0262af 9870 } else {
9ee6e8bb
PB
9871 if (op & 0x20) {
9872 /* Unsigned 64-bit multiply */
a7812ae4 9873 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9874 } else {
9ee6e8bb
PB
9875 if (op & 8) {
9876 /* smlalxy */
62b44f05
AR
9877 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9878 tcg_temp_free_i32(tmp2);
9879 tcg_temp_free_i32(tmp);
9880 goto illegal_op;
9881 }
5e3f878a 9882 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9883 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9884 tmp64 = tcg_temp_new_i64();
9885 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9886 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9887 } else {
9888 /* Signed 64-bit multiply */
a7812ae4 9889 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9890 }
b5ff1b31 9891 }
9ee6e8bb
PB
9892 if (op & 4) {
9893 /* umaal */
62b44f05
AR
9894 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9895 tcg_temp_free_i64(tmp64);
9896 goto illegal_op;
9897 }
a7812ae4
PB
9898 gen_addq_lo(s, tmp64, rs);
9899 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9900 } else if (op & 0x40) {
9901 /* 64-bit accumulate. */
a7812ae4 9902 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9903 }
a7812ae4 9904 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9905 tcg_temp_free_i64(tmp64);
5fd46862 9906 }
2c0262af 9907 break;
9ee6e8bb
PB
9908 }
9909 break;
9910 case 6: case 7: case 14: case 15:
9911 /* Coprocessor. */
9912 if (((insn >> 24) & 3) == 3) {
9913 /* Translate into the equivalent ARM encoding. */
f06053e3 9914 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 9915 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9916 goto illegal_op;
7dcc1f89 9917 }
6a57f3eb 9918 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 9919 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9920 goto illegal_op;
9921 }
9ee6e8bb
PB
9922 } else {
9923 if (insn & (1 << 28))
9924 goto illegal_op;
7dcc1f89 9925 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 9926 goto illegal_op;
7dcc1f89 9927 }
9ee6e8bb
PB
9928 }
9929 break;
9930 case 8: case 9: case 10: case 11:
9931 if (insn & (1 << 15)) {
9932 /* Branches, misc control. */
9933 if (insn & 0x5000) {
9934 /* Unconditional branch. */
9935 /* signextend(hw1[10:0]) -> offset[:12]. */
9936 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9937 /* hw1[10:0] -> offset[11:1]. */
9938 offset |= (insn & 0x7ff) << 1;
9939 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9940 offset[24:22] already have the same value because of the
9941 sign extension above. */
9942 offset ^= ((~insn) & (1 << 13)) << 10;
9943 offset ^= ((~insn) & (1 << 11)) << 11;
9944
9ee6e8bb
PB
9945 if (insn & (1 << 14)) {
9946 /* Branch and link. */
3174f8e9 9947 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9948 }
3b46e624 9949
b0109805 9950 offset += s->pc;
9ee6e8bb
PB
9951 if (insn & (1 << 12)) {
9952 /* b/bl */
b0109805 9953 gen_jmp(s, offset);
9ee6e8bb
PB
9954 } else {
9955 /* blx */
b0109805 9956 offset &= ~(uint32_t)2;
be5e7a76 9957 /* thumb2 bx, no need to check */
b0109805 9958 gen_bx_im(s, offset);
2c0262af 9959 }
9ee6e8bb
PB
9960 } else if (((insn >> 23) & 7) == 7) {
9961 /* Misc control */
9962 if (insn & (1 << 13))
9963 goto illegal_op;
9964
9965 if (insn & (1 << 26)) {
37e6456e
PM
9966 if (!(insn & (1 << 20))) {
9967 /* Hypervisor call (v7) */
9968 int imm16 = extract32(insn, 16, 4) << 12
9969 | extract32(insn, 0, 12);
9970 ARCH(7);
9971 if (IS_USER(s)) {
9972 goto illegal_op;
9973 }
9974 gen_hvc(s, imm16);
9975 } else {
9976 /* Secure monitor call (v6+) */
9977 ARCH(6K);
9978 if (IS_USER(s)) {
9979 goto illegal_op;
9980 }
9981 gen_smc(s);
9982 }
2c0262af 9983 } else {
9ee6e8bb
PB
9984 op = (insn >> 20) & 7;
9985 switch (op) {
9986 case 0: /* msr cpsr. */
b53d8923 9987 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
9988 tmp = load_reg(s, rn);
9989 addr = tcg_const_i32(insn & 0xff);
9990 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9991 tcg_temp_free_i32(addr);
7d1b0095 9992 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9993 gen_lookup_tb(s);
9994 break;
9995 }
9996 /* fall through */
9997 case 1: /* msr spsr. */
b53d8923 9998 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9999 goto illegal_op;
b53d8923 10000 }
2fbac54b
FN
10001 tmp = load_reg(s, rn);
10002 if (gen_set_psr(s,
7dcc1f89 10003 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10004 op == 1, tmp))
9ee6e8bb
PB
10005 goto illegal_op;
10006 break;
10007 case 2: /* cps, nop-hint. */
10008 if (((insn >> 8) & 7) == 0) {
10009 gen_nop_hint(s, insn & 0xff);
10010 }
10011 /* Implemented as NOP in user mode. */
10012 if (IS_USER(s))
10013 break;
10014 offset = 0;
10015 imm = 0;
10016 if (insn & (1 << 10)) {
10017 if (insn & (1 << 7))
10018 offset |= CPSR_A;
10019 if (insn & (1 << 6))
10020 offset |= CPSR_I;
10021 if (insn & (1 << 5))
10022 offset |= CPSR_F;
10023 if (insn & (1 << 9))
10024 imm = CPSR_A | CPSR_I | CPSR_F;
10025 }
10026 if (insn & (1 << 8)) {
10027 offset |= 0x1f;
10028 imm |= (insn & 0x1f);
10029 }
10030 if (offset) {
2fbac54b 10031 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10032 }
10033 break;
10034 case 3: /* Special control operations. */
426f5abc 10035 ARCH(7);
9ee6e8bb
PB
10036 op = (insn >> 4) & 0xf;
10037 switch (op) {
10038 case 2: /* clrex */
426f5abc 10039 gen_clrex(s);
9ee6e8bb
PB
10040 break;
10041 case 4: /* dsb */
10042 case 5: /* dmb */
9ee6e8bb 10043 /* These execute as NOPs. */
9ee6e8bb 10044 break;
6df99dec
SS
10045 case 6: /* isb */
10046 /* We need to break the TB after this insn
10047 * to execute self-modifying code correctly
10048 * and also to take any pending interrupts
10049 * immediately.
10050 */
10051 gen_lookup_tb(s);
10052 break;
9ee6e8bb
PB
10053 default:
10054 goto illegal_op;
10055 }
10056 break;
10057 case 4: /* bxj */
10058 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10059 tmp = load_reg(s, rn);
10060 gen_bx(s, tmp);
9ee6e8bb
PB
10061 break;
10062 case 5: /* Exception return. */
b8b45b68
RV
10063 if (IS_USER(s)) {
10064 goto illegal_op;
10065 }
10066 if (rn != 14 || rd != 15) {
10067 goto illegal_op;
10068 }
10069 tmp = load_reg(s, rn);
10070 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10071 gen_exception_return(s, tmp);
10072 break;
9ee6e8bb 10073 case 6: /* mrs cpsr. */
7d1b0095 10074 tmp = tcg_temp_new_i32();
b53d8923 10075 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10076 addr = tcg_const_i32(insn & 0xff);
10077 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10078 tcg_temp_free_i32(addr);
9ee6e8bb 10079 } else {
9ef39277 10080 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10081 }
8984bd2e 10082 store_reg(s, rd, tmp);
9ee6e8bb
PB
10083 break;
10084 case 7: /* mrs spsr. */
10085 /* Not accessible in user mode. */
b53d8923 10086 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10087 goto illegal_op;
b53d8923 10088 }
d9ba4830
PB
10089 tmp = load_cpu_field(spsr);
10090 store_reg(s, rd, tmp);
9ee6e8bb 10091 break;
2c0262af
FB
10092 }
10093 }
9ee6e8bb
PB
10094 } else {
10095 /* Conditional branch. */
10096 op = (insn >> 22) & 0xf;
10097 /* Generate a conditional jump to next instruction. */
10098 s->condlabel = gen_new_label();
39fb730a 10099 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10100 s->condjmp = 1;
10101
10102 /* offset[11:1] = insn[10:0] */
10103 offset = (insn & 0x7ff) << 1;
10104 /* offset[17:12] = insn[21:16]. */
10105 offset |= (insn & 0x003f0000) >> 4;
10106 /* offset[31:20] = insn[26]. */
10107 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10108 /* offset[18] = insn[13]. */
10109 offset |= (insn & (1 << 13)) << 5;
10110 /* offset[19] = insn[11]. */
10111 offset |= (insn & (1 << 11)) << 8;
10112
10113 /* jump to the offset */
b0109805 10114 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10115 }
10116 } else {
10117 /* Data processing immediate. */
10118 if (insn & (1 << 25)) {
10119 if (insn & (1 << 24)) {
10120 if (insn & (1 << 20))
10121 goto illegal_op;
10122 /* Bitfield/Saturate. */
10123 op = (insn >> 21) & 7;
10124 imm = insn & 0x1f;
10125 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10126 if (rn == 15) {
7d1b0095 10127 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10128 tcg_gen_movi_i32(tmp, 0);
10129 } else {
10130 tmp = load_reg(s, rn);
10131 }
9ee6e8bb
PB
10132 switch (op) {
10133 case 2: /* Signed bitfield extract. */
10134 imm++;
10135 if (shift + imm > 32)
10136 goto illegal_op;
10137 if (imm < 32)
6ddbc6e4 10138 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10139 break;
10140 case 6: /* Unsigned bitfield extract. */
10141 imm++;
10142 if (shift + imm > 32)
10143 goto illegal_op;
10144 if (imm < 32)
6ddbc6e4 10145 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10146 break;
10147 case 3: /* Bitfield insert/clear. */
10148 if (imm < shift)
10149 goto illegal_op;
10150 imm = imm + 1 - shift;
10151 if (imm != 32) {
6ddbc6e4 10152 tmp2 = load_reg(s, rd);
d593c48e 10153 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10154 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10155 }
10156 break;
10157 case 7:
10158 goto illegal_op;
10159 default: /* Saturate. */
9ee6e8bb
PB
10160 if (shift) {
10161 if (op & 1)
6ddbc6e4 10162 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10163 else
6ddbc6e4 10164 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10165 }
6ddbc6e4 10166 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10167 if (op & 4) {
10168 /* Unsigned. */
62b44f05
AR
10169 if ((op & 1) && shift == 0) {
10170 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10171 tcg_temp_free_i32(tmp);
10172 tcg_temp_free_i32(tmp2);
10173 goto illegal_op;
10174 }
9ef39277 10175 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10176 } else {
9ef39277 10177 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10178 }
2c0262af 10179 } else {
9ee6e8bb 10180 /* Signed. */
62b44f05
AR
10181 if ((op & 1) && shift == 0) {
10182 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10183 tcg_temp_free_i32(tmp);
10184 tcg_temp_free_i32(tmp2);
10185 goto illegal_op;
10186 }
9ef39277 10187 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10188 } else {
9ef39277 10189 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10190 }
2c0262af 10191 }
b75263d6 10192 tcg_temp_free_i32(tmp2);
9ee6e8bb 10193 break;
2c0262af 10194 }
6ddbc6e4 10195 store_reg(s, rd, tmp);
9ee6e8bb
PB
10196 } else {
10197 imm = ((insn & 0x04000000) >> 15)
10198 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10199 if (insn & (1 << 22)) {
10200 /* 16-bit immediate. */
10201 imm |= (insn >> 4) & 0xf000;
10202 if (insn & (1 << 23)) {
10203 /* movt */
5e3f878a 10204 tmp = load_reg(s, rd);
86831435 10205 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10206 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10207 } else {
9ee6e8bb 10208 /* movw */
7d1b0095 10209 tmp = tcg_temp_new_i32();
5e3f878a 10210 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10211 }
10212 } else {
9ee6e8bb
PB
10213 /* Add/sub 12-bit immediate. */
10214 if (rn == 15) {
b0109805 10215 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10216 if (insn & (1 << 23))
b0109805 10217 offset -= imm;
9ee6e8bb 10218 else
b0109805 10219 offset += imm;
7d1b0095 10220 tmp = tcg_temp_new_i32();
5e3f878a 10221 tcg_gen_movi_i32(tmp, offset);
2c0262af 10222 } else {
5e3f878a 10223 tmp = load_reg(s, rn);
9ee6e8bb 10224 if (insn & (1 << 23))
5e3f878a 10225 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10226 else
5e3f878a 10227 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10228 }
9ee6e8bb 10229 }
5e3f878a 10230 store_reg(s, rd, tmp);
191abaa2 10231 }
9ee6e8bb
PB
10232 } else {
10233 int shifter_out = 0;
10234 /* modified 12-bit immediate. */
10235 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10236 imm = (insn & 0xff);
10237 switch (shift) {
10238 case 0: /* XY */
10239 /* Nothing to do. */
10240 break;
10241 case 1: /* 00XY00XY */
10242 imm |= imm << 16;
10243 break;
10244 case 2: /* XY00XY00 */
10245 imm |= imm << 16;
10246 imm <<= 8;
10247 break;
10248 case 3: /* XYXYXYXY */
10249 imm |= imm << 16;
10250 imm |= imm << 8;
10251 break;
10252 default: /* Rotated constant. */
10253 shift = (shift << 1) | (imm >> 7);
10254 imm |= 0x80;
10255 imm = imm << (32 - shift);
10256 shifter_out = 1;
10257 break;
b5ff1b31 10258 }
7d1b0095 10259 tmp2 = tcg_temp_new_i32();
3174f8e9 10260 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10261 rn = (insn >> 16) & 0xf;
3174f8e9 10262 if (rn == 15) {
7d1b0095 10263 tmp = tcg_temp_new_i32();
3174f8e9
FN
10264 tcg_gen_movi_i32(tmp, 0);
10265 } else {
10266 tmp = load_reg(s, rn);
10267 }
9ee6e8bb
PB
10268 op = (insn >> 21) & 0xf;
10269 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10270 shifter_out, tmp, tmp2))
9ee6e8bb 10271 goto illegal_op;
7d1b0095 10272 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10273 rd = (insn >> 8) & 0xf;
10274 if (rd != 15) {
3174f8e9
FN
10275 store_reg(s, rd, tmp);
10276 } else {
7d1b0095 10277 tcg_temp_free_i32(tmp);
2c0262af 10278 }
2c0262af 10279 }
9ee6e8bb
PB
10280 }
10281 break;
10282 case 12: /* Load/store single data item. */
10283 {
10284 int postinc = 0;
10285 int writeback = 0;
a99caa48 10286 int memidx;
9ee6e8bb 10287 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10288 if (disas_neon_ls_insn(s, insn)) {
c1713132 10289 goto illegal_op;
7dcc1f89 10290 }
9ee6e8bb
PB
10291 break;
10292 }
a2fdc890
PM
10293 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10294 if (rs == 15) {
10295 if (!(insn & (1 << 20))) {
10296 goto illegal_op;
10297 }
10298 if (op != 2) {
10299 /* Byte or halfword load space with dest == r15 : memory hints.
10300 * Catch them early so we don't emit pointless addressing code.
10301 * This space is a mix of:
10302 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10303 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10304 * cores)
10305 * unallocated hints, which must be treated as NOPs
10306 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10307 * which is easiest for the decoding logic
10308 * Some space which must UNDEF
10309 */
10310 int op1 = (insn >> 23) & 3;
10311 int op2 = (insn >> 6) & 0x3f;
10312 if (op & 2) {
10313 goto illegal_op;
10314 }
10315 if (rn == 15) {
02afbf64
PM
10316 /* UNPREDICTABLE, unallocated hint or
10317 * PLD/PLDW/PLI (literal)
10318 */
a2fdc890
PM
10319 return 0;
10320 }
10321 if (op1 & 1) {
02afbf64 10322 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10323 }
10324 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10325 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10326 }
10327 /* UNDEF space, or an UNPREDICTABLE */
10328 return 1;
10329 }
10330 }
a99caa48 10331 memidx = get_mem_index(s);
9ee6e8bb 10332 if (rn == 15) {
7d1b0095 10333 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10334 /* PC relative. */
10335 /* s->pc has already been incremented by 4. */
10336 imm = s->pc & 0xfffffffc;
10337 if (insn & (1 << 23))
10338 imm += insn & 0xfff;
10339 else
10340 imm -= insn & 0xfff;
b0109805 10341 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10342 } else {
b0109805 10343 addr = load_reg(s, rn);
9ee6e8bb
PB
10344 if (insn & (1 << 23)) {
10345 /* Positive offset. */
10346 imm = insn & 0xfff;
b0109805 10347 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10348 } else {
9ee6e8bb 10349 imm = insn & 0xff;
2a0308c5
PM
10350 switch ((insn >> 8) & 0xf) {
10351 case 0x0: /* Shifted Register. */
9ee6e8bb 10352 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10353 if (shift > 3) {
10354 tcg_temp_free_i32(addr);
18c9b560 10355 goto illegal_op;
2a0308c5 10356 }
b26eefb6 10357 tmp = load_reg(s, rm);
9ee6e8bb 10358 if (shift)
b26eefb6 10359 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10360 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10361 tcg_temp_free_i32(tmp);
9ee6e8bb 10362 break;
2a0308c5 10363 case 0xc: /* Negative offset. */
b0109805 10364 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10365 break;
2a0308c5 10366 case 0xe: /* User privilege. */
b0109805 10367 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10368 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10369 break;
2a0308c5 10370 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10371 imm = -imm;
10372 /* Fall through. */
2a0308c5 10373 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10374 postinc = 1;
10375 writeback = 1;
10376 break;
2a0308c5 10377 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10378 imm = -imm;
10379 /* Fall through. */
2a0308c5 10380 case 0xf: /* Pre-increment. */
b0109805 10381 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10382 writeback = 1;
10383 break;
10384 default:
2a0308c5 10385 tcg_temp_free_i32(addr);
b7bcbe95 10386 goto illegal_op;
9ee6e8bb
PB
10387 }
10388 }
10389 }
9ee6e8bb
PB
10390 if (insn & (1 << 20)) {
10391 /* Load. */
5a839c0d 10392 tmp = tcg_temp_new_i32();
a2fdc890 10393 switch (op) {
5a839c0d 10394 case 0:
a99caa48 10395 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10396 break;
10397 case 4:
a99caa48 10398 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10399 break;
10400 case 1:
a99caa48 10401 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10402 break;
10403 case 5:
a99caa48 10404 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10405 break;
10406 case 2:
a99caa48 10407 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10408 break;
2a0308c5 10409 default:
5a839c0d 10410 tcg_temp_free_i32(tmp);
2a0308c5
PM
10411 tcg_temp_free_i32(addr);
10412 goto illegal_op;
a2fdc890
PM
10413 }
10414 if (rs == 15) {
10415 gen_bx(s, tmp);
9ee6e8bb 10416 } else {
a2fdc890 10417 store_reg(s, rs, tmp);
9ee6e8bb
PB
10418 }
10419 } else {
10420 /* Store. */
b0109805 10421 tmp = load_reg(s, rs);
9ee6e8bb 10422 switch (op) {
5a839c0d 10423 case 0:
a99caa48 10424 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10425 break;
10426 case 1:
a99caa48 10427 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10428 break;
10429 case 2:
a99caa48 10430 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10431 break;
2a0308c5 10432 default:
5a839c0d 10433 tcg_temp_free_i32(tmp);
2a0308c5
PM
10434 tcg_temp_free_i32(addr);
10435 goto illegal_op;
b7bcbe95 10436 }
5a839c0d 10437 tcg_temp_free_i32(tmp);
2c0262af 10438 }
9ee6e8bb 10439 if (postinc)
b0109805
PB
10440 tcg_gen_addi_i32(addr, addr, imm);
10441 if (writeback) {
10442 store_reg(s, rn, addr);
10443 } else {
7d1b0095 10444 tcg_temp_free_i32(addr);
b0109805 10445 }
9ee6e8bb
PB
10446 }
10447 break;
10448 default:
10449 goto illegal_op;
2c0262af 10450 }
9ee6e8bb
PB
10451 return 0;
10452illegal_op:
10453 return 1;
2c0262af
FB
10454}
10455
0ecb72a5 10456static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10457{
10458 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10459 int32_t offset;
10460 int i;
39d5492a
PM
10461 TCGv_i32 tmp;
10462 TCGv_i32 tmp2;
10463 TCGv_i32 addr;
99c475ab 10464
9ee6e8bb
PB
10465 if (s->condexec_mask) {
10466 cond = s->condexec_cond;
bedd2912
JB
10467 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10468 s->condlabel = gen_new_label();
39fb730a 10469 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10470 s->condjmp = 1;
10471 }
9ee6e8bb
PB
10472 }
10473
d31dd73e 10474 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10475 s->pc += 2;
b5ff1b31 10476
99c475ab
FB
10477 switch (insn >> 12) {
10478 case 0: case 1:
396e467c 10479
99c475ab
FB
10480 rd = insn & 7;
10481 op = (insn >> 11) & 3;
10482 if (op == 3) {
10483 /* add/subtract */
10484 rn = (insn >> 3) & 7;
396e467c 10485 tmp = load_reg(s, rn);
99c475ab
FB
10486 if (insn & (1 << 10)) {
10487 /* immediate */
7d1b0095 10488 tmp2 = tcg_temp_new_i32();
396e467c 10489 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10490 } else {
10491 /* reg */
10492 rm = (insn >> 6) & 7;
396e467c 10493 tmp2 = load_reg(s, rm);
99c475ab 10494 }
9ee6e8bb
PB
10495 if (insn & (1 << 9)) {
10496 if (s->condexec_mask)
396e467c 10497 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10498 else
72485ec4 10499 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10500 } else {
10501 if (s->condexec_mask)
396e467c 10502 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10503 else
72485ec4 10504 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10505 }
7d1b0095 10506 tcg_temp_free_i32(tmp2);
396e467c 10507 store_reg(s, rd, tmp);
99c475ab
FB
10508 } else {
10509 /* shift immediate */
10510 rm = (insn >> 3) & 7;
10511 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10512 tmp = load_reg(s, rm);
10513 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10514 if (!s->condexec_mask)
10515 gen_logic_CC(tmp);
10516 store_reg(s, rd, tmp);
99c475ab
FB
10517 }
10518 break;
10519 case 2: case 3:
10520 /* arithmetic large immediate */
10521 op = (insn >> 11) & 3;
10522 rd = (insn >> 8) & 0x7;
396e467c 10523 if (op == 0) { /* mov */
7d1b0095 10524 tmp = tcg_temp_new_i32();
396e467c 10525 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10526 if (!s->condexec_mask)
396e467c
FN
10527 gen_logic_CC(tmp);
10528 store_reg(s, rd, tmp);
10529 } else {
10530 tmp = load_reg(s, rd);
7d1b0095 10531 tmp2 = tcg_temp_new_i32();
396e467c
FN
10532 tcg_gen_movi_i32(tmp2, insn & 0xff);
10533 switch (op) {
10534 case 1: /* cmp */
72485ec4 10535 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10536 tcg_temp_free_i32(tmp);
10537 tcg_temp_free_i32(tmp2);
396e467c
FN
10538 break;
10539 case 2: /* add */
10540 if (s->condexec_mask)
10541 tcg_gen_add_i32(tmp, tmp, tmp2);
10542 else
72485ec4 10543 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10544 tcg_temp_free_i32(tmp2);
396e467c
FN
10545 store_reg(s, rd, tmp);
10546 break;
10547 case 3: /* sub */
10548 if (s->condexec_mask)
10549 tcg_gen_sub_i32(tmp, tmp, tmp2);
10550 else
72485ec4 10551 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10552 tcg_temp_free_i32(tmp2);
396e467c
FN
10553 store_reg(s, rd, tmp);
10554 break;
10555 }
99c475ab 10556 }
99c475ab
FB
10557 break;
10558 case 4:
10559 if (insn & (1 << 11)) {
10560 rd = (insn >> 8) & 7;
5899f386
FB
10561 /* load pc-relative. Bit 1 of PC is ignored. */
10562 val = s->pc + 2 + ((insn & 0xff) * 4);
10563 val &= ~(uint32_t)2;
7d1b0095 10564 addr = tcg_temp_new_i32();
b0109805 10565 tcg_gen_movi_i32(addr, val);
c40c8556 10566 tmp = tcg_temp_new_i32();
6ce2faf4 10567 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10568 tcg_temp_free_i32(addr);
b0109805 10569 store_reg(s, rd, tmp);
99c475ab
FB
10570 break;
10571 }
10572 if (insn & (1 << 10)) {
10573 /* data processing extended or blx */
10574 rd = (insn & 7) | ((insn >> 4) & 8);
10575 rm = (insn >> 3) & 0xf;
10576 op = (insn >> 8) & 3;
10577 switch (op) {
10578 case 0: /* add */
396e467c
FN
10579 tmp = load_reg(s, rd);
10580 tmp2 = load_reg(s, rm);
10581 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10582 tcg_temp_free_i32(tmp2);
396e467c 10583 store_reg(s, rd, tmp);
99c475ab
FB
10584 break;
10585 case 1: /* cmp */
396e467c
FN
10586 tmp = load_reg(s, rd);
10587 tmp2 = load_reg(s, rm);
72485ec4 10588 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10589 tcg_temp_free_i32(tmp2);
10590 tcg_temp_free_i32(tmp);
99c475ab
FB
10591 break;
10592 case 2: /* mov/cpy */
396e467c
FN
10593 tmp = load_reg(s, rm);
10594 store_reg(s, rd, tmp);
99c475ab
FB
10595 break;
10596 case 3:/* branch [and link] exchange thumb register */
b0109805 10597 tmp = load_reg(s, rm);
99c475ab 10598 if (insn & (1 << 7)) {
be5e7a76 10599 ARCH(5);
99c475ab 10600 val = (uint32_t)s->pc | 1;
7d1b0095 10601 tmp2 = tcg_temp_new_i32();
b0109805
PB
10602 tcg_gen_movi_i32(tmp2, val);
10603 store_reg(s, 14, tmp2);
99c475ab 10604 }
be5e7a76 10605 /* already thumb, no need to check */
d9ba4830 10606 gen_bx(s, tmp);
99c475ab
FB
10607 break;
10608 }
10609 break;
10610 }
10611
10612 /* data processing register */
10613 rd = insn & 7;
10614 rm = (insn >> 3) & 7;
10615 op = (insn >> 6) & 0xf;
10616 if (op == 2 || op == 3 || op == 4 || op == 7) {
10617 /* the shift/rotate ops want the operands backwards */
10618 val = rm;
10619 rm = rd;
10620 rd = val;
10621 val = 1;
10622 } else {
10623 val = 0;
10624 }
10625
396e467c 10626 if (op == 9) { /* neg */
7d1b0095 10627 tmp = tcg_temp_new_i32();
396e467c
FN
10628 tcg_gen_movi_i32(tmp, 0);
10629 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10630 tmp = load_reg(s, rd);
10631 } else {
39d5492a 10632 TCGV_UNUSED_I32(tmp);
396e467c 10633 }
99c475ab 10634
396e467c 10635 tmp2 = load_reg(s, rm);
5899f386 10636 switch (op) {
99c475ab 10637 case 0x0: /* and */
396e467c 10638 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10639 if (!s->condexec_mask)
396e467c 10640 gen_logic_CC(tmp);
99c475ab
FB
10641 break;
10642 case 0x1: /* eor */
396e467c 10643 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10644 if (!s->condexec_mask)
396e467c 10645 gen_logic_CC(tmp);
99c475ab
FB
10646 break;
10647 case 0x2: /* lsl */
9ee6e8bb 10648 if (s->condexec_mask) {
365af80e 10649 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10650 } else {
9ef39277 10651 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10652 gen_logic_CC(tmp2);
9ee6e8bb 10653 }
99c475ab
FB
10654 break;
10655 case 0x3: /* lsr */
9ee6e8bb 10656 if (s->condexec_mask) {
365af80e 10657 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10658 } else {
9ef39277 10659 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10660 gen_logic_CC(tmp2);
9ee6e8bb 10661 }
99c475ab
FB
10662 break;
10663 case 0x4: /* asr */
9ee6e8bb 10664 if (s->condexec_mask) {
365af80e 10665 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10666 } else {
9ef39277 10667 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10668 gen_logic_CC(tmp2);
9ee6e8bb 10669 }
99c475ab
FB
10670 break;
10671 case 0x5: /* adc */
49b4c31e 10672 if (s->condexec_mask) {
396e467c 10673 gen_adc(tmp, tmp2);
49b4c31e
RH
10674 } else {
10675 gen_adc_CC(tmp, tmp, tmp2);
10676 }
99c475ab
FB
10677 break;
10678 case 0x6: /* sbc */
2de68a49 10679 if (s->condexec_mask) {
396e467c 10680 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10681 } else {
10682 gen_sbc_CC(tmp, tmp, tmp2);
10683 }
99c475ab
FB
10684 break;
10685 case 0x7: /* ror */
9ee6e8bb 10686 if (s->condexec_mask) {
f669df27
AJ
10687 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10688 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10689 } else {
9ef39277 10690 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10691 gen_logic_CC(tmp2);
9ee6e8bb 10692 }
99c475ab
FB
10693 break;
10694 case 0x8: /* tst */
396e467c
FN
10695 tcg_gen_and_i32(tmp, tmp, tmp2);
10696 gen_logic_CC(tmp);
99c475ab 10697 rd = 16;
5899f386 10698 break;
99c475ab 10699 case 0x9: /* neg */
9ee6e8bb 10700 if (s->condexec_mask)
396e467c 10701 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10702 else
72485ec4 10703 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10704 break;
10705 case 0xa: /* cmp */
72485ec4 10706 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10707 rd = 16;
10708 break;
10709 case 0xb: /* cmn */
72485ec4 10710 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10711 rd = 16;
10712 break;
10713 case 0xc: /* orr */
396e467c 10714 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10715 if (!s->condexec_mask)
396e467c 10716 gen_logic_CC(tmp);
99c475ab
FB
10717 break;
10718 case 0xd: /* mul */
7b2919a0 10719 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10720 if (!s->condexec_mask)
396e467c 10721 gen_logic_CC(tmp);
99c475ab
FB
10722 break;
10723 case 0xe: /* bic */
f669df27 10724 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10725 if (!s->condexec_mask)
396e467c 10726 gen_logic_CC(tmp);
99c475ab
FB
10727 break;
10728 case 0xf: /* mvn */
396e467c 10729 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10730 if (!s->condexec_mask)
396e467c 10731 gen_logic_CC(tmp2);
99c475ab 10732 val = 1;
5899f386 10733 rm = rd;
99c475ab
FB
10734 break;
10735 }
10736 if (rd != 16) {
396e467c
FN
10737 if (val) {
10738 store_reg(s, rm, tmp2);
10739 if (op != 0xf)
7d1b0095 10740 tcg_temp_free_i32(tmp);
396e467c
FN
10741 } else {
10742 store_reg(s, rd, tmp);
7d1b0095 10743 tcg_temp_free_i32(tmp2);
396e467c
FN
10744 }
10745 } else {
7d1b0095
PM
10746 tcg_temp_free_i32(tmp);
10747 tcg_temp_free_i32(tmp2);
99c475ab
FB
10748 }
10749 break;
10750
10751 case 5:
10752 /* load/store register offset. */
10753 rd = insn & 7;
10754 rn = (insn >> 3) & 7;
10755 rm = (insn >> 6) & 7;
10756 op = (insn >> 9) & 7;
b0109805 10757 addr = load_reg(s, rn);
b26eefb6 10758 tmp = load_reg(s, rm);
b0109805 10759 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10760 tcg_temp_free_i32(tmp);
99c475ab 10761
c40c8556 10762 if (op < 3) { /* store */
b0109805 10763 tmp = load_reg(s, rd);
c40c8556
PM
10764 } else {
10765 tmp = tcg_temp_new_i32();
10766 }
99c475ab
FB
10767
10768 switch (op) {
10769 case 0: /* str */
6ce2faf4 10770 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10771 break;
10772 case 1: /* strh */
6ce2faf4 10773 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10774 break;
10775 case 2: /* strb */
6ce2faf4 10776 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10777 break;
10778 case 3: /* ldrsb */
6ce2faf4 10779 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10780 break;
10781 case 4: /* ldr */
6ce2faf4 10782 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10783 break;
10784 case 5: /* ldrh */
6ce2faf4 10785 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10786 break;
10787 case 6: /* ldrb */
6ce2faf4 10788 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10789 break;
10790 case 7: /* ldrsh */
6ce2faf4 10791 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10792 break;
10793 }
c40c8556 10794 if (op >= 3) { /* load */
b0109805 10795 store_reg(s, rd, tmp);
c40c8556
PM
10796 } else {
10797 tcg_temp_free_i32(tmp);
10798 }
7d1b0095 10799 tcg_temp_free_i32(addr);
99c475ab
FB
10800 break;
10801
10802 case 6:
10803 /* load/store word immediate offset */
10804 rd = insn & 7;
10805 rn = (insn >> 3) & 7;
b0109805 10806 addr = load_reg(s, rn);
99c475ab 10807 val = (insn >> 4) & 0x7c;
b0109805 10808 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10809
10810 if (insn & (1 << 11)) {
10811 /* load */
c40c8556 10812 tmp = tcg_temp_new_i32();
6ce2faf4 10813 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10814 store_reg(s, rd, tmp);
99c475ab
FB
10815 } else {
10816 /* store */
b0109805 10817 tmp = load_reg(s, rd);
6ce2faf4 10818 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10819 tcg_temp_free_i32(tmp);
99c475ab 10820 }
7d1b0095 10821 tcg_temp_free_i32(addr);
99c475ab
FB
10822 break;
10823
10824 case 7:
10825 /* load/store byte immediate offset */
10826 rd = insn & 7;
10827 rn = (insn >> 3) & 7;
b0109805 10828 addr = load_reg(s, rn);
99c475ab 10829 val = (insn >> 6) & 0x1f;
b0109805 10830 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10831
10832 if (insn & (1 << 11)) {
10833 /* load */
c40c8556 10834 tmp = tcg_temp_new_i32();
6ce2faf4 10835 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10836 store_reg(s, rd, tmp);
99c475ab
FB
10837 } else {
10838 /* store */
b0109805 10839 tmp = load_reg(s, rd);
6ce2faf4 10840 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10841 tcg_temp_free_i32(tmp);
99c475ab 10842 }
7d1b0095 10843 tcg_temp_free_i32(addr);
99c475ab
FB
10844 break;
10845
10846 case 8:
10847 /* load/store halfword immediate offset */
10848 rd = insn & 7;
10849 rn = (insn >> 3) & 7;
b0109805 10850 addr = load_reg(s, rn);
99c475ab 10851 val = (insn >> 5) & 0x3e;
b0109805 10852 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10853
10854 if (insn & (1 << 11)) {
10855 /* load */
c40c8556 10856 tmp = tcg_temp_new_i32();
6ce2faf4 10857 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10858 store_reg(s, rd, tmp);
99c475ab
FB
10859 } else {
10860 /* store */
b0109805 10861 tmp = load_reg(s, rd);
6ce2faf4 10862 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10863 tcg_temp_free_i32(tmp);
99c475ab 10864 }
7d1b0095 10865 tcg_temp_free_i32(addr);
99c475ab
FB
10866 break;
10867
10868 case 9:
10869 /* load/store from stack */
10870 rd = (insn >> 8) & 7;
b0109805 10871 addr = load_reg(s, 13);
99c475ab 10872 val = (insn & 0xff) * 4;
b0109805 10873 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10874
10875 if (insn & (1 << 11)) {
10876 /* load */
c40c8556 10877 tmp = tcg_temp_new_i32();
6ce2faf4 10878 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10879 store_reg(s, rd, tmp);
99c475ab
FB
10880 } else {
10881 /* store */
b0109805 10882 tmp = load_reg(s, rd);
6ce2faf4 10883 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10884 tcg_temp_free_i32(tmp);
99c475ab 10885 }
7d1b0095 10886 tcg_temp_free_i32(addr);
99c475ab
FB
10887 break;
10888
10889 case 10:
10890 /* add to high reg */
10891 rd = (insn >> 8) & 7;
5899f386
FB
10892 if (insn & (1 << 11)) {
10893 /* SP */
5e3f878a 10894 tmp = load_reg(s, 13);
5899f386
FB
10895 } else {
10896 /* PC. bit 1 is ignored. */
7d1b0095 10897 tmp = tcg_temp_new_i32();
5e3f878a 10898 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10899 }
99c475ab 10900 val = (insn & 0xff) * 4;
5e3f878a
PB
10901 tcg_gen_addi_i32(tmp, tmp, val);
10902 store_reg(s, rd, tmp);
99c475ab
FB
10903 break;
10904
10905 case 11:
10906 /* misc */
10907 op = (insn >> 8) & 0xf;
10908 switch (op) {
10909 case 0:
10910 /* adjust stack pointer */
b26eefb6 10911 tmp = load_reg(s, 13);
99c475ab
FB
10912 val = (insn & 0x7f) * 4;
10913 if (insn & (1 << 7))
6a0d8a1d 10914 val = -(int32_t)val;
b26eefb6
PB
10915 tcg_gen_addi_i32(tmp, tmp, val);
10916 store_reg(s, 13, tmp);
99c475ab
FB
10917 break;
10918
9ee6e8bb
PB
10919 case 2: /* sign/zero extend. */
10920 ARCH(6);
10921 rd = insn & 7;
10922 rm = (insn >> 3) & 7;
b0109805 10923 tmp = load_reg(s, rm);
9ee6e8bb 10924 switch ((insn >> 6) & 3) {
b0109805
PB
10925 case 0: gen_sxth(tmp); break;
10926 case 1: gen_sxtb(tmp); break;
10927 case 2: gen_uxth(tmp); break;
10928 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10929 }
b0109805 10930 store_reg(s, rd, tmp);
9ee6e8bb 10931 break;
99c475ab
FB
10932 case 4: case 5: case 0xc: case 0xd:
10933 /* push/pop */
b0109805 10934 addr = load_reg(s, 13);
5899f386
FB
10935 if (insn & (1 << 8))
10936 offset = 4;
99c475ab 10937 else
5899f386
FB
10938 offset = 0;
10939 for (i = 0; i < 8; i++) {
10940 if (insn & (1 << i))
10941 offset += 4;
10942 }
10943 if ((insn & (1 << 11)) == 0) {
b0109805 10944 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10945 }
99c475ab
FB
10946 for (i = 0; i < 8; i++) {
10947 if (insn & (1 << i)) {
10948 if (insn & (1 << 11)) {
10949 /* pop */
c40c8556 10950 tmp = tcg_temp_new_i32();
6ce2faf4 10951 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10952 store_reg(s, i, tmp);
99c475ab
FB
10953 } else {
10954 /* push */
b0109805 10955 tmp = load_reg(s, i);
6ce2faf4 10956 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10957 tcg_temp_free_i32(tmp);
99c475ab 10958 }
5899f386 10959 /* advance to the next address. */
b0109805 10960 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10961 }
10962 }
39d5492a 10963 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10964 if (insn & (1 << 8)) {
10965 if (insn & (1 << 11)) {
10966 /* pop pc */
c40c8556 10967 tmp = tcg_temp_new_i32();
6ce2faf4 10968 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10969 /* don't set the pc until the rest of the instruction
10970 has completed */
10971 } else {
10972 /* push lr */
b0109805 10973 tmp = load_reg(s, 14);
6ce2faf4 10974 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10975 tcg_temp_free_i32(tmp);
99c475ab 10976 }
b0109805 10977 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10978 }
5899f386 10979 if ((insn & (1 << 11)) == 0) {
b0109805 10980 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10981 }
99c475ab 10982 /* write back the new stack pointer */
b0109805 10983 store_reg(s, 13, addr);
99c475ab 10984 /* set the new PC value */
be5e7a76 10985 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 10986 store_reg_from_load(s, 15, tmp);
be5e7a76 10987 }
99c475ab
FB
10988 break;
10989
9ee6e8bb
PB
10990 case 1: case 3: case 9: case 11: /* czb */
10991 rm = insn & 7;
d9ba4830 10992 tmp = load_reg(s, rm);
9ee6e8bb
PB
10993 s->condlabel = gen_new_label();
10994 s->condjmp = 1;
10995 if (insn & (1 << 11))
cb63669a 10996 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10997 else
cb63669a 10998 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10999 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11000 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11001 val = (uint32_t)s->pc + 2;
11002 val += offset;
11003 gen_jmp(s, val);
11004 break;
11005
11006 case 15: /* IT, nop-hint. */
11007 if ((insn & 0xf) == 0) {
11008 gen_nop_hint(s, (insn >> 4) & 0xf);
11009 break;
11010 }
11011 /* If Then. */
11012 s->condexec_cond = (insn >> 4) & 0xe;
11013 s->condexec_mask = insn & 0x1f;
11014 /* No actual code generated for this insn, just setup state. */
11015 break;
11016
06c949e6 11017 case 0xe: /* bkpt */
d4a2dc67
PM
11018 {
11019 int imm8 = extract32(insn, 0, 8);
be5e7a76 11020 ARCH(5);
73710361
GB
11021 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11022 default_exception_el(s));
06c949e6 11023 break;
d4a2dc67 11024 }
06c949e6 11025
9ee6e8bb
PB
11026 case 0xa: /* rev */
11027 ARCH(6);
11028 rn = (insn >> 3) & 0x7;
11029 rd = insn & 0x7;
b0109805 11030 tmp = load_reg(s, rn);
9ee6e8bb 11031 switch ((insn >> 6) & 3) {
66896cb8 11032 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11033 case 1: gen_rev16(tmp); break;
11034 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
11035 default: goto illegal_op;
11036 }
b0109805 11037 store_reg(s, rd, tmp);
9ee6e8bb
PB
11038 break;
11039
d9e028c1
PM
11040 case 6:
11041 switch ((insn >> 5) & 7) {
11042 case 2:
11043 /* setend */
11044 ARCH(6);
10962fd5
PM
11045 if (((insn >> 3) & 1) != s->bswap_code) {
11046 /* Dynamic endianness switching not implemented. */
e0c270d9 11047 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
11048 goto illegal_op;
11049 }
9ee6e8bb 11050 break;
d9e028c1
PM
11051 case 3:
11052 /* cps */
11053 ARCH(6);
11054 if (IS_USER(s)) {
11055 break;
8984bd2e 11056 }
b53d8923 11057 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11058 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11059 /* FAULTMASK */
11060 if (insn & 1) {
11061 addr = tcg_const_i32(19);
11062 gen_helper_v7m_msr(cpu_env, addr, tmp);
11063 tcg_temp_free_i32(addr);
11064 }
11065 /* PRIMASK */
11066 if (insn & 2) {
11067 addr = tcg_const_i32(16);
11068 gen_helper_v7m_msr(cpu_env, addr, tmp);
11069 tcg_temp_free_i32(addr);
11070 }
11071 tcg_temp_free_i32(tmp);
11072 gen_lookup_tb(s);
11073 } else {
11074 if (insn & (1 << 4)) {
11075 shift = CPSR_A | CPSR_I | CPSR_F;
11076 } else {
11077 shift = 0;
11078 }
11079 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11080 }
d9e028c1
PM
11081 break;
11082 default:
11083 goto undef;
9ee6e8bb
PB
11084 }
11085 break;
11086
99c475ab
FB
11087 default:
11088 goto undef;
11089 }
11090 break;
11091
11092 case 12:
a7d3970d 11093 {
99c475ab 11094 /* load/store multiple */
39d5492a
PM
11095 TCGv_i32 loaded_var;
11096 TCGV_UNUSED_I32(loaded_var);
99c475ab 11097 rn = (insn >> 8) & 0x7;
b0109805 11098 addr = load_reg(s, rn);
99c475ab
FB
11099 for (i = 0; i < 8; i++) {
11100 if (insn & (1 << i)) {
99c475ab
FB
11101 if (insn & (1 << 11)) {
11102 /* load */
c40c8556 11103 tmp = tcg_temp_new_i32();
6ce2faf4 11104 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
11105 if (i == rn) {
11106 loaded_var = tmp;
11107 } else {
11108 store_reg(s, i, tmp);
11109 }
99c475ab
FB
11110 } else {
11111 /* store */
b0109805 11112 tmp = load_reg(s, i);
6ce2faf4 11113 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 11114 tcg_temp_free_i32(tmp);
99c475ab 11115 }
5899f386 11116 /* advance to the next address */
b0109805 11117 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11118 }
11119 }
b0109805 11120 if ((insn & (1 << rn)) == 0) {
a7d3970d 11121 /* base reg not in list: base register writeback */
b0109805
PB
11122 store_reg(s, rn, addr);
11123 } else {
a7d3970d
PM
11124 /* base reg in list: if load, complete it now */
11125 if (insn & (1 << 11)) {
11126 store_reg(s, rn, loaded_var);
11127 }
7d1b0095 11128 tcg_temp_free_i32(addr);
b0109805 11129 }
99c475ab 11130 break;
a7d3970d 11131 }
99c475ab
FB
11132 case 13:
11133 /* conditional branch or swi */
11134 cond = (insn >> 8) & 0xf;
11135 if (cond == 0xe)
11136 goto undef;
11137
11138 if (cond == 0xf) {
11139 /* swi */
eaed129d 11140 gen_set_pc_im(s, s->pc);
d4a2dc67 11141 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11142 s->is_jmp = DISAS_SWI;
99c475ab
FB
11143 break;
11144 }
11145 /* generate a conditional jump to next instruction */
e50e6a20 11146 s->condlabel = gen_new_label();
39fb730a 11147 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11148 s->condjmp = 1;
99c475ab
FB
11149
11150 /* jump to the offset */
5899f386 11151 val = (uint32_t)s->pc + 2;
99c475ab 11152 offset = ((int32_t)insn << 24) >> 24;
5899f386 11153 val += offset << 1;
8aaca4c0 11154 gen_jmp(s, val);
99c475ab
FB
11155 break;
11156
11157 case 14:
358bf29e 11158 if (insn & (1 << 11)) {
9ee6e8bb
PB
11159 if (disas_thumb2_insn(env, s, insn))
11160 goto undef32;
358bf29e
PB
11161 break;
11162 }
9ee6e8bb 11163 /* unconditional branch */
99c475ab
FB
11164 val = (uint32_t)s->pc;
11165 offset = ((int32_t)insn << 21) >> 21;
11166 val += (offset << 1) + 2;
8aaca4c0 11167 gen_jmp(s, val);
99c475ab
FB
11168 break;
11169
11170 case 15:
9ee6e8bb 11171 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11172 goto undef32;
9ee6e8bb 11173 break;
99c475ab
FB
11174 }
11175 return;
9ee6e8bb 11176undef32:
73710361
GB
11177 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11178 default_exception_el(s));
9ee6e8bb
PB
11179 return;
11180illegal_op:
99c475ab 11181undef:
73710361
GB
11182 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11183 default_exception_el(s));
99c475ab
FB
11184}
11185
541ebcd4
PM
11186static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11187{
11188 /* Return true if the insn at dc->pc might cross a page boundary.
11189 * (False positives are OK, false negatives are not.)
11190 */
11191 uint16_t insn;
11192
11193 if ((s->pc & 3) == 0) {
11194 /* At a 4-aligned address we can't be crossing a page */
11195 return false;
11196 }
11197
11198 /* This must be a Thumb insn */
11199 insn = arm_lduw_code(env, s->pc, s->bswap_code);
11200
11201 if ((insn >> 11) >= 0x1d) {
11202 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11203 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11204 * end up actually treating this as two 16-bit insns (see the
11205 * code at the start of disas_thumb2_insn()) but we don't bother
11206 * to check for that as it is unlikely, and false positives here
11207 * are harmless.
11208 */
11209 return true;
11210 }
11211 /* Definitely a 16-bit insn, can't be crossing a page. */
11212 return false;
11213}
11214
20157705 11215/* generate intermediate code for basic block 'tb'. */
4e5e1215 11216void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11217{
4e5e1215 11218 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11219 CPUState *cs = CPU(cpu);
2c0262af 11220 DisasContext dc1, *dc = &dc1;
0fa85d43 11221 target_ulong pc_start;
0a2461fa 11222 target_ulong next_page_start;
2e70f6ef
PB
11223 int num_insns;
11224 int max_insns;
541ebcd4 11225 bool end_of_page;
3b46e624 11226
2c0262af 11227 /* generate intermediate code */
40f860cd
PM
11228
11229 /* The A64 decoder has its own top level loop, because it doesn't need
11230 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11231 */
11232 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11233 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11234 return;
11235 }
11236
0fa85d43 11237 pc_start = tb->pc;
3b46e624 11238
2c0262af
FB
11239 dc->tb = tb;
11240
2c0262af
FB
11241 dc->is_jmp = DISAS_NEXT;
11242 dc->pc = pc_start;
ed2803da 11243 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11244 dc->condjmp = 0;
3926cc84 11245
40f860cd 11246 dc->aarch64 = 0;
cef9ee70
SS
11247 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11248 * there is no secure EL1, so we route exceptions to EL3.
11249 */
11250 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11251 !arm_el_is_aa64(env, 3);
40f860cd
PM
11252 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11253 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11254 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11255 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11256 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11257 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11258#if !defined(CONFIG_USER_ONLY)
c1e37810 11259 dc->user = (dc->current_el == 0);
3926cc84 11260#endif
3f342b9e 11261 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11262 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11263 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11264 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11265 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11266 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11267 dc->cp_regs = cpu->cp_regs;
a984e42c 11268 dc->features = env->features;
40f860cd 11269
50225ad0
PM
11270 /* Single step state. The code-generation logic here is:
11271 * SS_ACTIVE == 0:
11272 * generate code with no special handling for single-stepping (except
11273 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11274 * this happens anyway because those changes are all system register or
11275 * PSTATE writes).
11276 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11277 * emit code for one insn
11278 * emit code to clear PSTATE.SS
11279 * emit code to generate software step exception for completed step
11280 * end TB (as usual for having generated an exception)
11281 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11282 * emit code to generate a software step exception
11283 * end the TB
11284 */
11285 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11286 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11287 dc->is_ldex = false;
11288 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11289
a7812ae4
PB
11290 cpu_F0s = tcg_temp_new_i32();
11291 cpu_F1s = tcg_temp_new_i32();
11292 cpu_F0d = tcg_temp_new_i64();
11293 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11294 cpu_V0 = cpu_F0d;
11295 cpu_V1 = cpu_F1d;
e677137d 11296 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11297 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11298 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11299 num_insns = 0;
11300 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11301 if (max_insns == 0) {
2e70f6ef 11302 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11303 }
11304 if (max_insns > TCG_MAX_INSNS) {
11305 max_insns = TCG_MAX_INSNS;
11306 }
2e70f6ef 11307
cd42d5b2 11308 gen_tb_start(tb);
e12ce78d 11309
3849902c
PM
11310 tcg_clear_temp_count();
11311
e12ce78d
PM
11312 /* A note on handling of the condexec (IT) bits:
11313 *
11314 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11315 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11316 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11317 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11318 * to do it at the end of the block. (For example if we don't do this
11319 * it's hard to identify whether we can safely skip writing condexec
11320 * at the end of the TB, which we definitely want to do for the case
11321 * where a TB doesn't do anything with the IT state at all.)
11322 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11323 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11324 * This is done both for leaving the TB at the end, and for leaving
11325 * it because of an exception we know will happen, which is done in
11326 * gen_exception_insn(). The latter is necessary because we need to
11327 * leave the TB with the PC/IT state just prior to execution of the
11328 * instruction which caused the exception.
11329 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11330 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11331 * This is handled in the same way as restoration of the
4e5e1215
RH
11332 * PC in these situations; we save the value of the condexec bits
11333 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11334 * then uses this to restore them after an exception.
e12ce78d
PM
11335 *
11336 * Note that there are no instructions which can read the condexec
11337 * bits, and none which can write non-static values to them, so
0ecb72a5 11338 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11339 * middle of a TB.
11340 */
11341
9ee6e8bb
PB
11342 /* Reset the conditional execution bits immediately. This avoids
11343 complications trying to do it at the end of the block. */
98eac7ca 11344 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11345 {
39d5492a 11346 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11347 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11348 store_cpu_field(tmp, condexec_bits);
8f01245e 11349 }
2c0262af 11350 do {
52e971d9
RH
11351 tcg_gen_insn_start(dc->pc,
11352 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
b933066a
RH
11353 num_insns++;
11354
fbb4a2e3
PB
11355#ifdef CONFIG_USER_ONLY
11356 /* Intercept jump to the magic kernel page. */
40f860cd 11357 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11358 /* We always get here via a jump, so know we are not in a
11359 conditional execution block. */
d4a2dc67 11360 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11361 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11362 break;
11363 }
11364#else
b53d8923 11365 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11366 /* We always get here via a jump, so know we are not in a
11367 conditional execution block. */
d4a2dc67 11368 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11369 dc->is_jmp = DISAS_EXC;
d60bb01c 11370 break;
9ee6e8bb
PB
11371 }
11372#endif
11373
f0c3c505 11374 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11375 CPUBreakpoint *bp;
f0c3c505 11376 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11377 if (bp->pc == dc->pc) {
5d98bf8f 11378 if (bp->flags & BP_CPU) {
ce8a1b54 11379 gen_set_condexec(dc);
ed6c6448 11380 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11381 gen_helper_check_breakpoints(cpu_env);
11382 /* End the TB early; it's likely not going to be executed */
11383 dc->is_jmp = DISAS_UPDATE;
11384 } else {
11385 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11386 /* The address covered by the breakpoint must be
11387 included in [tb->pc, tb->pc + tb->size) in order
11388 to for it to be properly cleared -- thus we
11389 increment the PC here so that the logic setting
11390 tb->size below does the right thing. */
5d98bf8f
SF
11391 /* TODO: Advance PC by correct instruction length to
11392 * avoid disassembler error messages */
11393 dc->pc += 2;
11394 goto done_generating;
11395 }
11396 break;
1fddef4b
FB
11397 }
11398 }
11399 }
e50e6a20 11400
959082fc 11401 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11402 gen_io_start();
959082fc 11403 }
2e70f6ef 11404
50225ad0
PM
11405 if (dc->ss_active && !dc->pstate_ss) {
11406 /* Singlestep state is Active-pending.
11407 * If we're in this state at the start of a TB then either
11408 * a) we just took an exception to an EL which is being debugged
11409 * and this is the first insn in the exception handler
11410 * b) debug exceptions were masked and we just unmasked them
11411 * without changing EL (eg by clearing PSTATE.D)
11412 * In either case we're going to take a swstep exception in the
11413 * "did not step an insn" case, and so the syndrome ISV and EX
11414 * bits should be zero.
11415 */
959082fc 11416 assert(num_insns == 1);
73710361
GB
11417 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11418 default_exception_el(dc));
50225ad0
PM
11419 goto done_generating;
11420 }
11421
40f860cd 11422 if (dc->thumb) {
9ee6e8bb
PB
11423 disas_thumb_insn(env, dc);
11424 if (dc->condexec_mask) {
11425 dc->condexec_cond = (dc->condexec_cond & 0xe)
11426 | ((dc->condexec_mask >> 4) & 1);
11427 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11428 if (dc->condexec_mask == 0) {
11429 dc->condexec_cond = 0;
11430 }
11431 }
11432 } else {
f4df2210
PM
11433 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11434 dc->pc += 4;
11435 disas_arm_insn(dc, insn);
9ee6e8bb 11436 }
e50e6a20
FB
11437
11438 if (dc->condjmp && !dc->is_jmp) {
11439 gen_set_label(dc->condlabel);
11440 dc->condjmp = 0;
11441 }
3849902c
PM
11442
11443 if (tcg_check_temp_count()) {
0a2461fa
AG
11444 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11445 dc->pc);
3849902c
PM
11446 }
11447
aaf2d97d 11448 /* Translation stops when a conditional branch is encountered.
e50e6a20 11449 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11450 * Also stop translation when a page boundary is reached. This
bf20dc07 11451 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11452
11453 /* We want to stop the TB if the next insn starts in a new page,
11454 * or if it spans between this page and the next. This means that
11455 * if we're looking at the last halfword in the page we need to
11456 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11457 * or a 32-bit Thumb insn (which won't).
11458 * This is to avoid generating a silly TB with a single 16-bit insn
11459 * in it at the end of this page (which would execute correctly
11460 * but isn't very efficient).
11461 */
11462 end_of_page = (dc->pc >= next_page_start) ||
11463 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11464
fe700adb 11465 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11466 !cs->singlestep_enabled &&
1b530a6d 11467 !singlestep &&
50225ad0 11468 !dc->ss_active &&
541ebcd4 11469 !end_of_page &&
2e70f6ef
PB
11470 num_insns < max_insns);
11471
11472 if (tb->cflags & CF_LAST_IO) {
11473 if (dc->condjmp) {
11474 /* FIXME: This can theoretically happen with self-modifying
11475 code. */
a47dddd7 11476 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11477 }
11478 gen_io_end();
11479 }
9ee6e8bb 11480
b5ff1b31 11481 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11482 instruction was a conditional branch or trap, and the PC has
11483 already been written. */
50225ad0 11484 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11485 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11486 gen_set_condexec(dc);
7999a5c8
SF
11487 switch (dc->is_jmp) {
11488 case DISAS_SWI:
50225ad0 11489 gen_ss_advance(dc);
73710361
GB
11490 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11491 default_exception_el(dc));
7999a5c8
SF
11492 break;
11493 case DISAS_HVC:
37e6456e 11494 gen_ss_advance(dc);
73710361 11495 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11496 break;
11497 case DISAS_SMC:
37e6456e 11498 gen_ss_advance(dc);
73710361 11499 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11500 break;
11501 case DISAS_NEXT:
11502 case DISAS_UPDATE:
11503 gen_set_pc_im(dc, dc->pc);
11504 /* fall through */
11505 default:
11506 if (dc->ss_active) {
11507 gen_step_complete_exception(dc);
11508 } else {
11509 /* FIXME: Single stepping a WFI insn will not halt
11510 the CPU. */
11511 gen_exception_internal(EXCP_DEBUG);
11512 }
11513 }
11514 if (dc->condjmp) {
11515 /* "Condition failed" instruction codepath. */
11516 gen_set_label(dc->condlabel);
11517 gen_set_condexec(dc);
11518 gen_set_pc_im(dc, dc->pc);
11519 if (dc->ss_active) {
11520 gen_step_complete_exception(dc);
11521 } else {
11522 gen_exception_internal(EXCP_DEBUG);
11523 }
9ee6e8bb 11524 }
8aaca4c0 11525 } else {
9ee6e8bb
PB
11526 /* While branches must always occur at the end of an IT block,
11527 there are a few other things that can cause us to terminate
65626741 11528 the TB in the middle of an IT block:
9ee6e8bb
PB
11529 - Exception generating instructions (bkpt, swi, undefined).
11530 - Page boundaries.
11531 - Hardware watchpoints.
11532 Hardware breakpoints have already been handled and skip this code.
11533 */
11534 gen_set_condexec(dc);
8aaca4c0 11535 switch(dc->is_jmp) {
8aaca4c0 11536 case DISAS_NEXT:
6e256c93 11537 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11538 break;
8aaca4c0 11539 case DISAS_UPDATE:
577bf808
SF
11540 gen_set_pc_im(dc, dc->pc);
11541 /* fall through */
11542 case DISAS_JUMP:
11543 default:
8aaca4c0 11544 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11545 tcg_gen_exit_tb(0);
8aaca4c0
FB
11546 break;
11547 case DISAS_TB_JUMP:
11548 /* nothing more to generate */
11549 break;
9ee6e8bb 11550 case DISAS_WFI:
1ce94f81 11551 gen_helper_wfi(cpu_env);
84549b6d
PM
11552 /* The helper doesn't necessarily throw an exception, but we
11553 * must go back to the main loop to check for interrupts anyway.
11554 */
11555 tcg_gen_exit_tb(0);
9ee6e8bb 11556 break;
72c1d3af
PM
11557 case DISAS_WFE:
11558 gen_helper_wfe(cpu_env);
11559 break;
c87e5a61
PM
11560 case DISAS_YIELD:
11561 gen_helper_yield(cpu_env);
11562 break;
9ee6e8bb 11563 case DISAS_SWI:
73710361
GB
11564 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11565 default_exception_el(dc));
9ee6e8bb 11566 break;
37e6456e 11567 case DISAS_HVC:
73710361 11568 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11569 break;
11570 case DISAS_SMC:
73710361 11571 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 11572 break;
8aaca4c0 11573 }
e50e6a20
FB
11574 if (dc->condjmp) {
11575 gen_set_label(dc->condlabel);
9ee6e8bb 11576 gen_set_condexec(dc);
6e256c93 11577 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11578 dc->condjmp = 0;
11579 }
2c0262af 11580 }
2e70f6ef 11581
9ee6e8bb 11582done_generating:
806f352d 11583 gen_tb_end(tb, num_insns);
2c0262af
FB
11584
11585#ifdef DEBUG_DISAS
8fec2b8c 11586 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11587 qemu_log("----------------\n");
11588 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 11589 log_target_disas(cs, pc_start, dc->pc - pc_start,
d8fd2954 11590 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11591 qemu_log("\n");
2c0262af
FB
11592 }
11593#endif
4e5e1215
RH
11594 tb->size = dc->pc - pc_start;
11595 tb->icount = num_insns;
2c0262af
FB
11596}
11597
b5ff1b31 11598static const char *cpu_mode_names[16] = {
28c9457d
EI
11599 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11600 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11601};
9ee6e8bb 11602
878096ee
AF
11603void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11604 int flags)
2c0262af 11605{
878096ee
AF
11606 ARMCPU *cpu = ARM_CPU(cs);
11607 CPUARMState *env = &cpu->env;
2c0262af 11608 int i;
b5ff1b31 11609 uint32_t psr;
06e5cf7a 11610 const char *ns_status;
2c0262af 11611
17731115
PM
11612 if (is_a64(env)) {
11613 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11614 return;
11615 }
11616
2c0262af 11617 for(i=0;i<16;i++) {
7fe48483 11618 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11619 if ((i % 4) == 3)
7fe48483 11620 cpu_fprintf(f, "\n");
2c0262af 11621 else
7fe48483 11622 cpu_fprintf(f, " ");
2c0262af 11623 }
b5ff1b31 11624 psr = cpsr_read(env);
06e5cf7a
PM
11625
11626 if (arm_feature(env, ARM_FEATURE_EL3) &&
11627 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
11628 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
11629 } else {
11630 ns_status = "";
11631 }
11632
11633 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 11634 psr,
b5ff1b31
FB
11635 psr & (1 << 31) ? 'N' : '-',
11636 psr & (1 << 30) ? 'Z' : '-',
11637 psr & (1 << 29) ? 'C' : '-',
11638 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11639 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 11640 ns_status,
b5ff1b31 11641 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11642
f2617cfc
PM
11643 if (flags & CPU_DUMP_FPU) {
11644 int numvfpregs = 0;
11645 if (arm_feature(env, ARM_FEATURE_VFP)) {
11646 numvfpregs += 16;
11647 }
11648 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11649 numvfpregs += 16;
11650 }
11651 for (i = 0; i < numvfpregs; i++) {
11652 uint64_t v = float64_val(env->vfp.regs[i]);
11653 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11654 i * 2, (uint32_t)v,
11655 i * 2 + 1, (uint32_t)(v >> 32),
11656 i, v);
11657 }
11658 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11659 }
2c0262af 11660}
a6b025d3 11661
bad729e2
RH
11662void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
11663 target_ulong *data)
d2856f1a 11664{
3926cc84 11665 if (is_a64(env)) {
bad729e2 11666 env->pc = data[0];
40f860cd 11667 env->condexec_bits = 0;
3926cc84 11668 } else {
bad729e2
RH
11669 env->regs[15] = data[0];
11670 env->condexec_bits = data[1];
3926cc84 11671 }
d2856f1a 11672}