]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
exec: [tcg] Track which vCPU is performing translation and execution
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
1497c961 31
2ef6175a
RH
32#include "exec/helper-proto.h"
33#include "exec/helper-gen.h"
2c0262af 34
a7e30d84 35#include "trace-tcg.h"
508127e2 36#include "exec/log.h"
a7e30d84
LV
37
38
2b51668f
PM
39#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
40#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 41/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 42#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 43#define ENABLE_ARCH_5J 0
2b51668f
PM
44#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
45#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
46#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
47#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
48#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 49
86753403 50#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 51
f570c61e 52#include "translate.h"
e12ce78d 53
b5ff1b31
FB
54#if defined(CONFIG_USER_ONLY)
55#define IS_USER(s) 1
56#else
57#define IS_USER(s) (s->user)
58#endif
59
1bcea73e 60TCGv_env cpu_env;
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
78bcaa3e
RH
64TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65TCGv_i64 cpu_exclusive_addr;
66TCGv_i64 cpu_exclusive_val;
426f5abc 67#ifdef CONFIG_USER_ONLY
78bcaa3e
RH
68TCGv_i64 cpu_exclusive_test;
69TCGv_i32 cpu_exclusive_info;
426f5abc 70#endif
ad69471c 71
b26eefb6 72/* FIXME: These should be removed. */
39d5492a 73static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 74static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
155c3eac
FN
78static const char *regnames[] =
79 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
80 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
81
b26eefb6
PB
82/* initialize TCG globals. */
83void arm_translate_init(void)
84{
155c3eac
FN
85 int i;
86
a7812ae4 87 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 88 tcg_ctx.tcg_env = cpu_env;
a7812ae4 89
155c3eac 90 for (i = 0; i < 16; i++) {
e1ccc054 91 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 92 offsetof(CPUARMState, regs[i]),
155c3eac
FN
93 regnames[i]);
94 }
e1ccc054
RH
95 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
96 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
97 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
98 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 99
e1ccc054 100 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 101 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 102 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 103 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 104#ifdef CONFIG_USER_ONLY
e1ccc054 105 cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 106 offsetof(CPUARMState, exclusive_test), "exclusive_test");
e1ccc054 107 cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 108 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 109#endif
155c3eac 110
14ade10f 111 a64_translate_init();
b26eefb6
PB
112}
113
579d21cc
PM
114static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
115{
116 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
117 * insns:
118 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
119 * otherwise, access as if at PL0.
120 */
121 switch (s->mmu_idx) {
122 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
123 case ARMMMUIdx_S12NSE0:
124 case ARMMMUIdx_S12NSE1:
125 return ARMMMUIdx_S12NSE0;
126 case ARMMMUIdx_S1E3:
127 case ARMMMUIdx_S1SE0:
128 case ARMMMUIdx_S1SE1:
129 return ARMMMUIdx_S1SE0;
130 case ARMMMUIdx_S2NS:
131 default:
132 g_assert_not_reached();
133 }
134}
135
39d5492a 136static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 137{
39d5492a 138 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
141}
142
0ecb72a5 143#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 144
39d5492a 145static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
146{
147 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 148 tcg_temp_free_i32(var);
d9ba4830
PB
149}
150
151#define store_cpu_field(var, name) \
0ecb72a5 152 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 153
b26eefb6 154/* Set a variable to the value of a CPU register. */
39d5492a 155static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
156{
157 if (reg == 15) {
158 uint32_t addr;
b90372ad 159 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
155c3eac 166 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
167 }
168}
169
170/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 171static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 172{
39d5492a 173 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
174 load_reg_var(s, tmp, reg);
175 return tmp;
176}
177
178/* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
39d5492a 180static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
181{
182 if (reg == 15) {
183 tcg_gen_andi_i32(var, var, ~1);
184 s->is_jmp = DISAS_JUMP;
185 }
155c3eac 186 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 187 tcg_temp_free_i32(var);
b26eefb6
PB
188}
189
b26eefb6 190/* Value extensions. */
86831435
PB
191#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
192#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
193#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
194#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
195
1497c961
PB
196#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
197#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 198
b26eefb6 199
39d5492a 200static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 201{
39d5492a 202 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 203 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
204 tcg_temp_free_i32(tmp_mask);
205}
d9ba4830
PB
206/* Set NZCV flags from the high 4 bits of var. */
207#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
208
d4a2dc67 209static void gen_exception_internal(int excp)
d9ba4830 210{
d4a2dc67
PM
211 TCGv_i32 tcg_excp = tcg_const_i32(excp);
212
213 assert(excp_is_internal(excp));
214 gen_helper_exception_internal(cpu_env, tcg_excp);
215 tcg_temp_free_i32(tcg_excp);
216}
217
73710361 218static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
219{
220 TCGv_i32 tcg_excp = tcg_const_i32(excp);
221 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 222 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 223
73710361
GB
224 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
225 tcg_syn, tcg_el);
226
227 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
228 tcg_temp_free_i32(tcg_syn);
229 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
230}
231
50225ad0
PM
232static void gen_ss_advance(DisasContext *s)
233{
234 /* If the singlestep state is Active-not-pending, advance to
235 * Active-pending.
236 */
237 if (s->ss_active) {
238 s->pstate_ss = 0;
239 gen_helper_clear_pstate_ss(cpu_env);
240 }
241}
242
243static void gen_step_complete_exception(DisasContext *s)
244{
245 /* We just completed step of an insn. Move from Active-not-pending
246 * to Active-pending, and then also take the swstep exception.
247 * This corresponds to making the (IMPDEF) choice to prioritize
248 * swstep exceptions over asynchronous exceptions taken to an exception
249 * level where debug is disabled. This choice has the advantage that
250 * we do not need to maintain internal state corresponding to the
251 * ISV/EX syndrome bits between completion of the step and generation
252 * of the exception, and our syndrome information is always correct.
253 */
254 gen_ss_advance(s);
73710361
GB
255 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
256 default_exception_el(s));
50225ad0
PM
257 s->is_jmp = DISAS_EXC;
258}
259
39d5492a 260static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 261{
39d5492a
PM
262 TCGv_i32 tmp1 = tcg_temp_new_i32();
263 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
264 tcg_gen_ext16s_i32(tmp1, a);
265 tcg_gen_ext16s_i32(tmp2, b);
3670669c 266 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 267 tcg_temp_free_i32(tmp2);
3670669c
PB
268 tcg_gen_sari_i32(a, a, 16);
269 tcg_gen_sari_i32(b, b, 16);
270 tcg_gen_mul_i32(b, b, a);
271 tcg_gen_mov_i32(a, tmp1);
7d1b0095 272 tcg_temp_free_i32(tmp1);
3670669c
PB
273}
274
275/* Byteswap each halfword. */
39d5492a 276static void gen_rev16(TCGv_i32 var)
3670669c 277{
39d5492a 278 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
279 tcg_gen_shri_i32(tmp, var, 8);
280 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
281 tcg_gen_shli_i32(var, var, 8);
282 tcg_gen_andi_i32(var, var, 0xff00ff00);
283 tcg_gen_or_i32(var, var, tmp);
7d1b0095 284 tcg_temp_free_i32(tmp);
3670669c
PB
285}
286
287/* Byteswap low halfword and sign extend. */
39d5492a 288static void gen_revsh(TCGv_i32 var)
3670669c 289{
1a855029
AJ
290 tcg_gen_ext16u_i32(var, var);
291 tcg_gen_bswap16_i32(var, var);
292 tcg_gen_ext16s_i32(var, var);
3670669c
PB
293}
294
295/* Unsigned bitfield extract. */
39d5492a 296static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
297{
298 if (shift)
299 tcg_gen_shri_i32(var, var, shift);
300 tcg_gen_andi_i32(var, var, mask);
301}
302
303/* Signed bitfield extract. */
39d5492a 304static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
305{
306 uint32_t signbit;
307
308 if (shift)
309 tcg_gen_sari_i32(var, var, shift);
310 if (shift + width < 32) {
311 signbit = 1u << (width - 1);
312 tcg_gen_andi_i32(var, var, (1u << width) - 1);
313 tcg_gen_xori_i32(var, var, signbit);
314 tcg_gen_subi_i32(var, var, signbit);
315 }
316}
317
838fa72d 318/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 319static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 320{
838fa72d
AJ
321 TCGv_i64 tmp64 = tcg_temp_new_i64();
322
323 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 324 tcg_temp_free_i32(b);
838fa72d
AJ
325 tcg_gen_shli_i64(tmp64, tmp64, 32);
326 tcg_gen_add_i64(a, tmp64, a);
327
328 tcg_temp_free_i64(tmp64);
329 return a;
330}
331
332/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 333static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
334{
335 TCGv_i64 tmp64 = tcg_temp_new_i64();
336
337 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 338 tcg_temp_free_i32(b);
838fa72d
AJ
339 tcg_gen_shli_i64(tmp64, tmp64, 32);
340 tcg_gen_sub_i64(a, tmp64, a);
341
342 tcg_temp_free_i64(tmp64);
343 return a;
3670669c
PB
344}
345
5e3f878a 346/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 347static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 348{
39d5492a
PM
349 TCGv_i32 lo = tcg_temp_new_i32();
350 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 351 TCGv_i64 ret;
5e3f878a 352
831d7fe8 353 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 354 tcg_temp_free_i32(a);
7d1b0095 355 tcg_temp_free_i32(b);
831d7fe8
RH
356
357 ret = tcg_temp_new_i64();
358 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
359 tcg_temp_free_i32(lo);
360 tcg_temp_free_i32(hi);
831d7fe8
RH
361
362 return ret;
5e3f878a
PB
363}
364
39d5492a 365static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 366{
39d5492a
PM
367 TCGv_i32 lo = tcg_temp_new_i32();
368 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 369 TCGv_i64 ret;
5e3f878a 370
831d7fe8 371 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 372 tcg_temp_free_i32(a);
7d1b0095 373 tcg_temp_free_i32(b);
831d7fe8
RH
374
375 ret = tcg_temp_new_i64();
376 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
377 tcg_temp_free_i32(lo);
378 tcg_temp_free_i32(hi);
831d7fe8
RH
379
380 return ret;
5e3f878a
PB
381}
382
8f01245e 383/* Swap low and high halfwords. */
39d5492a 384static void gen_swap_half(TCGv_i32 var)
8f01245e 385{
39d5492a 386 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
387 tcg_gen_shri_i32(tmp, var, 16);
388 tcg_gen_shli_i32(var, var, 16);
389 tcg_gen_or_i32(var, var, tmp);
7d1b0095 390 tcg_temp_free_i32(tmp);
8f01245e
PB
391}
392
b26eefb6
PB
393/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
394 tmp = (t0 ^ t1) & 0x8000;
395 t0 &= ~0x8000;
396 t1 &= ~0x8000;
397 t0 = (t0 + t1) ^ tmp;
398 */
399
39d5492a 400static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 401{
39d5492a 402 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
403 tcg_gen_xor_i32(tmp, t0, t1);
404 tcg_gen_andi_i32(tmp, tmp, 0x8000);
405 tcg_gen_andi_i32(t0, t0, ~0x8000);
406 tcg_gen_andi_i32(t1, t1, ~0x8000);
407 tcg_gen_add_i32(t0, t0, t1);
408 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
409 tcg_temp_free_i32(tmp);
410 tcg_temp_free_i32(t1);
b26eefb6
PB
411}
412
413/* Set CF to the top bit of var. */
39d5492a 414static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 415{
66c374de 416 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
417}
418
419/* Set N and Z flags from var. */
39d5492a 420static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 421{
66c374de
AJ
422 tcg_gen_mov_i32(cpu_NF, var);
423 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
424}
425
426/* T0 += T1 + CF. */
39d5492a 427static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 428{
396e467c 429 tcg_gen_add_i32(t0, t0, t1);
66c374de 430 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
431}
432
e9bb4aa9 433/* dest = T0 + T1 + CF. */
39d5492a 434static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 435{
e9bb4aa9 436 tcg_gen_add_i32(dest, t0, t1);
66c374de 437 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
438}
439
3670669c 440/* dest = T0 - T1 + CF - 1. */
39d5492a 441static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 442{
3670669c 443 tcg_gen_sub_i32(dest, t0, t1);
66c374de 444 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 445 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
446}
447
72485ec4 448/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 449static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 450{
39d5492a 451 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
452 tcg_gen_movi_i32(tmp, 0);
453 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 454 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 455 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
456 tcg_gen_xor_i32(tmp, t0, t1);
457 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
458 tcg_temp_free_i32(tmp);
459 tcg_gen_mov_i32(dest, cpu_NF);
460}
461
49b4c31e 462/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 463static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 464{
39d5492a 465 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
466 if (TCG_TARGET_HAS_add2_i32) {
467 tcg_gen_movi_i32(tmp, 0);
468 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 469 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
470 } else {
471 TCGv_i64 q0 = tcg_temp_new_i64();
472 TCGv_i64 q1 = tcg_temp_new_i64();
473 tcg_gen_extu_i32_i64(q0, t0);
474 tcg_gen_extu_i32_i64(q1, t1);
475 tcg_gen_add_i64(q0, q0, q1);
476 tcg_gen_extu_i32_i64(q1, cpu_CF);
477 tcg_gen_add_i64(q0, q0, q1);
478 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
479 tcg_temp_free_i64(q0);
480 tcg_temp_free_i64(q1);
481 }
482 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
483 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
484 tcg_gen_xor_i32(tmp, t0, t1);
485 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
486 tcg_temp_free_i32(tmp);
487 tcg_gen_mov_i32(dest, cpu_NF);
488}
489
72485ec4 490/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 491static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 492{
39d5492a 493 TCGv_i32 tmp;
72485ec4
AJ
494 tcg_gen_sub_i32(cpu_NF, t0, t1);
495 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
496 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
497 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
498 tmp = tcg_temp_new_i32();
499 tcg_gen_xor_i32(tmp, t0, t1);
500 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
501 tcg_temp_free_i32(tmp);
502 tcg_gen_mov_i32(dest, cpu_NF);
503}
504
e77f0832 505/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 506static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 507{
39d5492a 508 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
509 tcg_gen_not_i32(tmp, t1);
510 gen_adc_CC(dest, t0, tmp);
39d5492a 511 tcg_temp_free_i32(tmp);
2de68a49
RH
512}
513
365af80e 514#define GEN_SHIFT(name) \
39d5492a 515static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 516{ \
39d5492a 517 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
518 tmp1 = tcg_temp_new_i32(); \
519 tcg_gen_andi_i32(tmp1, t1, 0xff); \
520 tmp2 = tcg_const_i32(0); \
521 tmp3 = tcg_const_i32(0x1f); \
522 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
523 tcg_temp_free_i32(tmp3); \
524 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
525 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
526 tcg_temp_free_i32(tmp2); \
527 tcg_temp_free_i32(tmp1); \
528}
529GEN_SHIFT(shl)
530GEN_SHIFT(shr)
531#undef GEN_SHIFT
532
39d5492a 533static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 534{
39d5492a 535 TCGv_i32 tmp1, tmp2;
365af80e
AJ
536 tmp1 = tcg_temp_new_i32();
537 tcg_gen_andi_i32(tmp1, t1, 0xff);
538 tmp2 = tcg_const_i32(0x1f);
539 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
540 tcg_temp_free_i32(tmp2);
541 tcg_gen_sar_i32(dest, t0, tmp1);
542 tcg_temp_free_i32(tmp1);
543}
544
39d5492a 545static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 546{
39d5492a
PM
547 TCGv_i32 c0 = tcg_const_i32(0);
548 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
549 tcg_gen_neg_i32(tmp, src);
550 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
551 tcg_temp_free_i32(c0);
552 tcg_temp_free_i32(tmp);
553}
ad69471c 554
39d5492a 555static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 556{
9a119ff6 557 if (shift == 0) {
66c374de 558 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 559 } else {
66c374de
AJ
560 tcg_gen_shri_i32(cpu_CF, var, shift);
561 if (shift != 31) {
562 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
563 }
9a119ff6 564 }
9a119ff6 565}
b26eefb6 566
9a119ff6 567/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
568static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
569 int shift, int flags)
9a119ff6
PB
570{
571 switch (shiftop) {
572 case 0: /* LSL */
573 if (shift != 0) {
574 if (flags)
575 shifter_out_im(var, 32 - shift);
576 tcg_gen_shli_i32(var, var, shift);
577 }
578 break;
579 case 1: /* LSR */
580 if (shift == 0) {
581 if (flags) {
66c374de 582 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
583 }
584 tcg_gen_movi_i32(var, 0);
585 } else {
586 if (flags)
587 shifter_out_im(var, shift - 1);
588 tcg_gen_shri_i32(var, var, shift);
589 }
590 break;
591 case 2: /* ASR */
592 if (shift == 0)
593 shift = 32;
594 if (flags)
595 shifter_out_im(var, shift - 1);
596 if (shift == 32)
597 shift = 31;
598 tcg_gen_sari_i32(var, var, shift);
599 break;
600 case 3: /* ROR/RRX */
601 if (shift != 0) {
602 if (flags)
603 shifter_out_im(var, shift - 1);
f669df27 604 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 605 } else {
39d5492a 606 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 607 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
608 if (flags)
609 shifter_out_im(var, 0);
610 tcg_gen_shri_i32(var, var, 1);
b26eefb6 611 tcg_gen_or_i32(var, var, tmp);
7d1b0095 612 tcg_temp_free_i32(tmp);
b26eefb6
PB
613 }
614 }
615};
616
39d5492a
PM
617static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
618 TCGv_i32 shift, int flags)
8984bd2e
PB
619{
620 if (flags) {
621 switch (shiftop) {
9ef39277
BS
622 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
623 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
624 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
625 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
626 }
627 } else {
628 switch (shiftop) {
365af80e
AJ
629 case 0:
630 gen_shl(var, var, shift);
631 break;
632 case 1:
633 gen_shr(var, var, shift);
634 break;
635 case 2:
636 gen_sar(var, var, shift);
637 break;
f669df27
AJ
638 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
639 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
640 }
641 }
7d1b0095 642 tcg_temp_free_i32(shift);
8984bd2e
PB
643}
644
6ddbc6e4
PB
645#define PAS_OP(pfx) \
646 switch (op2) { \
647 case 0: gen_pas_helper(glue(pfx,add16)); break; \
648 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
649 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
650 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
651 case 4: gen_pas_helper(glue(pfx,add8)); break; \
652 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
653 }
39d5492a 654static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 655{
a7812ae4 656 TCGv_ptr tmp;
6ddbc6e4
PB
657
658 switch (op1) {
659#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
660 case 1:
a7812ae4 661 tmp = tcg_temp_new_ptr();
0ecb72a5 662 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 663 PAS_OP(s)
b75263d6 664 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
665 break;
666 case 5:
a7812ae4 667 tmp = tcg_temp_new_ptr();
0ecb72a5 668 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 669 PAS_OP(u)
b75263d6 670 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
671 break;
672#undef gen_pas_helper
673#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
674 case 2:
675 PAS_OP(q);
676 break;
677 case 3:
678 PAS_OP(sh);
679 break;
680 case 6:
681 PAS_OP(uq);
682 break;
683 case 7:
684 PAS_OP(uh);
685 break;
686#undef gen_pas_helper
687 }
688}
9ee6e8bb
PB
689#undef PAS_OP
690
6ddbc6e4
PB
691/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
692#define PAS_OP(pfx) \
ed89a2f1 693 switch (op1) { \
6ddbc6e4
PB
694 case 0: gen_pas_helper(glue(pfx,add8)); break; \
695 case 1: gen_pas_helper(glue(pfx,add16)); break; \
696 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
697 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
698 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
699 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
700 }
39d5492a 701static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 702{
a7812ae4 703 TCGv_ptr tmp;
6ddbc6e4 704
ed89a2f1 705 switch (op2) {
6ddbc6e4
PB
706#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
707 case 0:
a7812ae4 708 tmp = tcg_temp_new_ptr();
0ecb72a5 709 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 710 PAS_OP(s)
b75263d6 711 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
712 break;
713 case 4:
a7812ae4 714 tmp = tcg_temp_new_ptr();
0ecb72a5 715 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 716 PAS_OP(u)
b75263d6 717 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
718 break;
719#undef gen_pas_helper
720#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
721 case 1:
722 PAS_OP(q);
723 break;
724 case 2:
725 PAS_OP(sh);
726 break;
727 case 5:
728 PAS_OP(uq);
729 break;
730 case 6:
731 PAS_OP(uh);
732 break;
733#undef gen_pas_helper
734 }
735}
9ee6e8bb
PB
736#undef PAS_OP
737
39fb730a 738/*
6c2c63d3 739 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
740 * This is common between ARM and Aarch64 targets.
741 */
6c2c63d3 742void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 743{
6c2c63d3
RH
744 TCGv_i32 value;
745 TCGCond cond;
746 bool global = true;
d9ba4830 747
d9ba4830
PB
748 switch (cc) {
749 case 0: /* eq: Z */
d9ba4830 750 case 1: /* ne: !Z */
6c2c63d3
RH
751 cond = TCG_COND_EQ;
752 value = cpu_ZF;
d9ba4830 753 break;
6c2c63d3 754
d9ba4830 755 case 2: /* cs: C */
d9ba4830 756 case 3: /* cc: !C */
6c2c63d3
RH
757 cond = TCG_COND_NE;
758 value = cpu_CF;
d9ba4830 759 break;
6c2c63d3 760
d9ba4830 761 case 4: /* mi: N */
d9ba4830 762 case 5: /* pl: !N */
6c2c63d3
RH
763 cond = TCG_COND_LT;
764 value = cpu_NF;
d9ba4830 765 break;
6c2c63d3 766
d9ba4830 767 case 6: /* vs: V */
d9ba4830 768 case 7: /* vc: !V */
6c2c63d3
RH
769 cond = TCG_COND_LT;
770 value = cpu_VF;
d9ba4830 771 break;
6c2c63d3 772
d9ba4830 773 case 8: /* hi: C && !Z */
6c2c63d3
RH
774 case 9: /* ls: !C || Z -> !(C && !Z) */
775 cond = TCG_COND_NE;
776 value = tcg_temp_new_i32();
777 global = false;
778 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
779 ZF is non-zero for !Z; so AND the two subexpressions. */
780 tcg_gen_neg_i32(value, cpu_CF);
781 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 782 break;
6c2c63d3 783
d9ba4830 784 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 785 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
786 /* Since we're only interested in the sign bit, == 0 is >= 0. */
787 cond = TCG_COND_GE;
788 value = tcg_temp_new_i32();
789 global = false;
790 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 791 break;
6c2c63d3 792
d9ba4830 793 case 12: /* gt: !Z && N == V */
d9ba4830 794 case 13: /* le: Z || N != V */
6c2c63d3
RH
795 cond = TCG_COND_NE;
796 value = tcg_temp_new_i32();
797 global = false;
798 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
799 * the sign bit then AND with ZF to yield the result. */
800 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
801 tcg_gen_sari_i32(value, value, 31);
802 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 803 break;
6c2c63d3 804
9305eac0
RH
805 case 14: /* always */
806 case 15: /* always */
807 /* Use the ALWAYS condition, which will fold early.
808 * It doesn't matter what we use for the value. */
809 cond = TCG_COND_ALWAYS;
810 value = cpu_ZF;
811 goto no_invert;
812
d9ba4830
PB
813 default:
814 fprintf(stderr, "Bad condition code 0x%x\n", cc);
815 abort();
816 }
6c2c63d3
RH
817
818 if (cc & 1) {
819 cond = tcg_invert_cond(cond);
820 }
821
9305eac0 822 no_invert:
6c2c63d3
RH
823 cmp->cond = cond;
824 cmp->value = value;
825 cmp->value_global = global;
826}
827
828void arm_free_cc(DisasCompare *cmp)
829{
830 if (!cmp->value_global) {
831 tcg_temp_free_i32(cmp->value);
832 }
833}
834
835void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
836{
837 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
838}
839
840void arm_gen_test_cc(int cc, TCGLabel *label)
841{
842 DisasCompare cmp;
843 arm_test_cc(&cmp, cc);
844 arm_jump_cc(&cmp, label);
845 arm_free_cc(&cmp);
d9ba4830 846}
2c0262af 847
b1d8e52e 848static const uint8_t table_logic_cc[16] = {
2c0262af
FB
849 1, /* and */
850 1, /* xor */
851 0, /* sub */
852 0, /* rsb */
853 0, /* add */
854 0, /* adc */
855 0, /* sbc */
856 0, /* rsc */
857 1, /* andl */
858 1, /* xorl */
859 0, /* cmp */
860 0, /* cmn */
861 1, /* orr */
862 1, /* mov */
863 1, /* bic */
864 1, /* mvn */
865};
3b46e624 866
d9ba4830
PB
867/* Set PC and Thumb state from an immediate address. */
868static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 869{
39d5492a 870 TCGv_i32 tmp;
99c475ab 871
577bf808 872 s->is_jmp = DISAS_JUMP;
d9ba4830 873 if (s->thumb != (addr & 1)) {
7d1b0095 874 tmp = tcg_temp_new_i32();
d9ba4830 875 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 876 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 877 tcg_temp_free_i32(tmp);
d9ba4830 878 }
155c3eac 879 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
880}
881
882/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 883static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 884{
577bf808 885 s->is_jmp = DISAS_JUMP;
155c3eac
FN
886 tcg_gen_andi_i32(cpu_R[15], var, ~1);
887 tcg_gen_andi_i32(var, var, 1);
888 store_cpu_field(var, thumb);
d9ba4830
PB
889}
890
21aeb343
JR
891/* Variant of store_reg which uses branch&exchange logic when storing
892 to r15 in ARM architecture v7 and above. The source must be a temporary
893 and will be marked as dead. */
7dcc1f89 894static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
895{
896 if (reg == 15 && ENABLE_ARCH_7) {
897 gen_bx(s, var);
898 } else {
899 store_reg(s, reg, var);
900 }
901}
902
be5e7a76
DES
903/* Variant of store_reg which uses branch&exchange logic when storing
904 * to r15 in ARM architecture v5T and above. This is used for storing
905 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
906 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 907static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
908{
909 if (reg == 15 && ENABLE_ARCH_5) {
910 gen_bx(s, var);
911 } else {
912 store_reg(s, reg, var);
913 }
914}
915
e334bd31
PB
916#ifdef CONFIG_USER_ONLY
917#define IS_USER_ONLY 1
918#else
919#define IS_USER_ONLY 0
920#endif
921
08307563
PM
922/* Abstractions of "generate code to do a guest load/store for
923 * AArch32", where a vaddr is always 32 bits (and is zero
924 * extended if we're a 64 bit core) and data is also
925 * 32 bits unless specifically doing a 64 bit access.
926 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 927 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
928 */
929#if TARGET_LONG_BITS == 32
930
e334bd31 931#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
932static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
933 TCGv_i32 addr, int index) \
08307563 934{ \
dacf0a2f 935 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
936 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
937 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
938 TCGv addr_be = tcg_temp_new(); \
939 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
940 tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
941 tcg_temp_free(addr_be); \
942 return; \
943 } \
dacf0a2f 944 tcg_gen_qemu_ld_i32(val, addr, index, opc); \
08307563
PM
945}
946
e334bd31 947#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
948static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
949 TCGv_i32 addr, int index) \
08307563 950{ \
dacf0a2f 951 TCGMemOp opc = (OPC) | s->be_data; \
e334bd31
PB
952 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
953 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
954 TCGv addr_be = tcg_temp_new(); \
955 tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
956 tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
957 tcg_temp_free(addr_be); \
958 return; \
959 } \
dacf0a2f 960 tcg_gen_qemu_st_i32(val, addr, index, opc); \
08307563
PM
961}
962
12dcc321
PB
963static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
964 TCGv_i32 addr, int index)
08307563 965{
dacf0a2f
PB
966 TCGMemOp opc = MO_Q | s->be_data;
967 tcg_gen_qemu_ld_i64(val, addr, index, opc);
e334bd31
PB
968 /* Not needed for user-mode BE32, where we use MO_BE instead. */
969 if (!IS_USER_ONLY && s->sctlr_b) {
970 tcg_gen_rotri_i64(val, val, 32);
971 }
08307563
PM
972}
973
12dcc321
PB
974static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
975 TCGv_i32 addr, int index)
08307563 976{
dacf0a2f 977 TCGMemOp opc = MO_Q | s->be_data;
e334bd31
PB
978 /* Not needed for user-mode BE32, where we use MO_BE instead. */
979 if (!IS_USER_ONLY && s->sctlr_b) {
980 TCGv_i64 tmp = tcg_temp_new_i64();
981 tcg_gen_rotri_i64(tmp, val, 32);
982 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
983 tcg_temp_free_i64(tmp);
984 return;
985 }
dacf0a2f 986 tcg_gen_qemu_st_i64(val, addr, index, opc);
08307563
PM
987}
988
989#else
990
e334bd31 991#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
12dcc321
PB
992static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
993 TCGv_i32 addr, int index) \
08307563 994{ \
dacf0a2f 995 TCGMemOp opc = (OPC) | s->be_data; \
08307563 996 TCGv addr64 = tcg_temp_new(); \
08307563 997 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
998 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
999 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1000 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1001 } \
dacf0a2f 1002 tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
08307563 1003 tcg_temp_free(addr64); \
08307563
PM
1004}
1005
e334bd31 1006#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
12dcc321
PB
1007static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
1008 TCGv_i32 addr, int index) \
08307563 1009{ \
dacf0a2f 1010 TCGMemOp opc = (OPC) | s->be_data; \
08307563 1011 TCGv addr64 = tcg_temp_new(); \
08307563 1012 tcg_gen_extu_i32_i64(addr64, addr); \
e334bd31
PB
1013 /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
1014 if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
1015 tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
1016 } \
dacf0a2f 1017 tcg_gen_qemu_st_i32(val, addr64, index, opc); \
08307563 1018 tcg_temp_free(addr64); \
08307563
PM
1019}
1020
12dcc321
PB
1021static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1022 TCGv_i32 addr, int index)
08307563 1023{
dacf0a2f 1024 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1025 TCGv addr64 = tcg_temp_new();
1026 tcg_gen_extu_i32_i64(addr64, addr);
dacf0a2f 1027 tcg_gen_qemu_ld_i64(val, addr64, index, opc);
e334bd31
PB
1028
1029 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1030 if (!IS_USER_ONLY && s->sctlr_b) {
1031 tcg_gen_rotri_i64(val, val, 32);
1032 }
08307563
PM
1033 tcg_temp_free(addr64);
1034}
1035
12dcc321
PB
1036static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1037 TCGv_i32 addr, int index)
08307563 1038{
dacf0a2f 1039 TCGMemOp opc = MO_Q | s->be_data;
08307563
PM
1040 TCGv addr64 = tcg_temp_new();
1041 tcg_gen_extu_i32_i64(addr64, addr);
e334bd31
PB
1042
1043 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1044 if (!IS_USER_ONLY && s->sctlr_b) {
1045 TCGv tmp = tcg_temp_new();
1046 tcg_gen_rotri_i64(tmp, val, 32);
1047 tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
1048 tcg_temp_free(tmp);
1049 } else {
1050 tcg_gen_qemu_st_i64(val, addr64, index, opc);
1051 }
08307563
PM
1052 tcg_temp_free(addr64);
1053}
1054
1055#endif
1056
e334bd31
PB
1057DO_GEN_LD(8s, MO_SB, 3)
1058DO_GEN_LD(8u, MO_UB, 3)
1059DO_GEN_LD(16s, MO_SW, 2)
1060DO_GEN_LD(16u, MO_UW, 2)
1061DO_GEN_LD(32u, MO_UL, 0)
30901475 1062/* 'a' variants include an alignment check */
e334bd31
PB
1063DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
1064DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
1065DO_GEN_ST(8, MO_UB, 3)
1066DO_GEN_ST(16, MO_UW, 2)
1067DO_GEN_ST(32, MO_UL, 0)
08307563 1068
eaed129d 1069static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 1070{
40f860cd 1071 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
1072}
1073
37e6456e
PM
1074static inline void gen_hvc(DisasContext *s, int imm16)
1075{
1076 /* The pre HVC helper handles cases when HVC gets trapped
1077 * as an undefined insn by runtime configuration (ie before
1078 * the insn really executes).
1079 */
1080 gen_set_pc_im(s, s->pc - 4);
1081 gen_helper_pre_hvc(cpu_env);
1082 /* Otherwise we will treat this as a real exception which
1083 * happens after execution of the insn. (The distinction matters
1084 * for the PC value reported to the exception handler and also
1085 * for single stepping.)
1086 */
1087 s->svc_imm = imm16;
1088 gen_set_pc_im(s, s->pc);
1089 s->is_jmp = DISAS_HVC;
1090}
1091
1092static inline void gen_smc(DisasContext *s)
1093{
1094 /* As with HVC, we may take an exception either before or after
1095 * the insn executes.
1096 */
1097 TCGv_i32 tmp;
1098
1099 gen_set_pc_im(s, s->pc - 4);
1100 tmp = tcg_const_i32(syn_aa32_smc());
1101 gen_helper_pre_smc(cpu_env, tmp);
1102 tcg_temp_free_i32(tmp);
1103 gen_set_pc_im(s, s->pc);
1104 s->is_jmp = DISAS_SMC;
1105}
1106
d4a2dc67
PM
1107static inline void
1108gen_set_condexec (DisasContext *s)
1109{
1110 if (s->condexec_mask) {
1111 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1112 TCGv_i32 tmp = tcg_temp_new_i32();
1113 tcg_gen_movi_i32(tmp, val);
1114 store_cpu_field(tmp, condexec_bits);
1115 }
1116}
1117
1118static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1119{
1120 gen_set_condexec(s);
1121 gen_set_pc_im(s, s->pc - offset);
1122 gen_exception_internal(excp);
1123 s->is_jmp = DISAS_JUMP;
1124}
1125
73710361
GB
1126static void gen_exception_insn(DisasContext *s, int offset, int excp,
1127 int syn, uint32_t target_el)
d4a2dc67
PM
1128{
1129 gen_set_condexec(s);
1130 gen_set_pc_im(s, s->pc - offset);
73710361 1131 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1132 s->is_jmp = DISAS_JUMP;
1133}
1134
b5ff1b31
FB
1135/* Force a TB lookup after an instruction that changes the CPU state. */
1136static inline void gen_lookup_tb(DisasContext *s)
1137{
a6445c52 1138 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1139 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1140}
1141
b0109805 1142static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1143 TCGv_i32 var)
2c0262af 1144{
1e8d4eec 1145 int val, rm, shift, shiftop;
39d5492a 1146 TCGv_i32 offset;
2c0262af
FB
1147
1148 if (!(insn & (1 << 25))) {
1149 /* immediate */
1150 val = insn & 0xfff;
1151 if (!(insn & (1 << 23)))
1152 val = -val;
537730b9 1153 if (val != 0)
b0109805 1154 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1155 } else {
1156 /* shift/register */
1157 rm = (insn) & 0xf;
1158 shift = (insn >> 7) & 0x1f;
1e8d4eec 1159 shiftop = (insn >> 5) & 3;
b26eefb6 1160 offset = load_reg(s, rm);
9a119ff6 1161 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1162 if (!(insn & (1 << 23)))
b0109805 1163 tcg_gen_sub_i32(var, var, offset);
2c0262af 1164 else
b0109805 1165 tcg_gen_add_i32(var, var, offset);
7d1b0095 1166 tcg_temp_free_i32(offset);
2c0262af
FB
1167 }
1168}
1169
191f9a93 1170static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1171 int extra, TCGv_i32 var)
2c0262af
FB
1172{
1173 int val, rm;
39d5492a 1174 TCGv_i32 offset;
3b46e624 1175
2c0262af
FB
1176 if (insn & (1 << 22)) {
1177 /* immediate */
1178 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1179 if (!(insn & (1 << 23)))
1180 val = -val;
18acad92 1181 val += extra;
537730b9 1182 if (val != 0)
b0109805 1183 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1184 } else {
1185 /* register */
191f9a93 1186 if (extra)
b0109805 1187 tcg_gen_addi_i32(var, var, extra);
2c0262af 1188 rm = (insn) & 0xf;
b26eefb6 1189 offset = load_reg(s, rm);
2c0262af 1190 if (!(insn & (1 << 23)))
b0109805 1191 tcg_gen_sub_i32(var, var, offset);
2c0262af 1192 else
b0109805 1193 tcg_gen_add_i32(var, var, offset);
7d1b0095 1194 tcg_temp_free_i32(offset);
2c0262af
FB
1195 }
1196}
1197
5aaebd13
PM
1198static TCGv_ptr get_fpstatus_ptr(int neon)
1199{
1200 TCGv_ptr statusptr = tcg_temp_new_ptr();
1201 int offset;
1202 if (neon) {
0ecb72a5 1203 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1204 } else {
0ecb72a5 1205 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1206 }
1207 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1208 return statusptr;
1209}
1210
4373f3ce
PB
1211#define VFP_OP2(name) \
1212static inline void gen_vfp_##name(int dp) \
1213{ \
ae1857ec
PM
1214 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1215 if (dp) { \
1216 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1217 } else { \
1218 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1219 } \
1220 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1221}
1222
4373f3ce
PB
1223VFP_OP2(add)
1224VFP_OP2(sub)
1225VFP_OP2(mul)
1226VFP_OP2(div)
1227
1228#undef VFP_OP2
1229
605a6aed
PM
1230static inline void gen_vfp_F1_mul(int dp)
1231{
1232 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1233 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1234 if (dp) {
ae1857ec 1235 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1236 } else {
ae1857ec 1237 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1238 }
ae1857ec 1239 tcg_temp_free_ptr(fpst);
605a6aed
PM
1240}
1241
1242static inline void gen_vfp_F1_neg(int dp)
1243{
1244 /* Like gen_vfp_neg() but put result in F1 */
1245 if (dp) {
1246 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1247 } else {
1248 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1249 }
1250}
1251
4373f3ce
PB
1252static inline void gen_vfp_abs(int dp)
1253{
1254 if (dp)
1255 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1256 else
1257 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1258}
1259
1260static inline void gen_vfp_neg(int dp)
1261{
1262 if (dp)
1263 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1264 else
1265 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1266}
1267
1268static inline void gen_vfp_sqrt(int dp)
1269{
1270 if (dp)
1271 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1272 else
1273 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1274}
1275
1276static inline void gen_vfp_cmp(int dp)
1277{
1278 if (dp)
1279 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1280 else
1281 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1282}
1283
1284static inline void gen_vfp_cmpe(int dp)
1285{
1286 if (dp)
1287 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1288 else
1289 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1290}
1291
1292static inline void gen_vfp_F1_ld0(int dp)
1293{
1294 if (dp)
5b340b51 1295 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1296 else
5b340b51 1297 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1298}
1299
5500b06c
PM
1300#define VFP_GEN_ITOF(name) \
1301static inline void gen_vfp_##name(int dp, int neon) \
1302{ \
5aaebd13 1303 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1304 if (dp) { \
1305 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1306 } else { \
1307 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1308 } \
b7fa9214 1309 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1310}
1311
5500b06c
PM
1312VFP_GEN_ITOF(uito)
1313VFP_GEN_ITOF(sito)
1314#undef VFP_GEN_ITOF
4373f3ce 1315
5500b06c
PM
1316#define VFP_GEN_FTOI(name) \
1317static inline void gen_vfp_##name(int dp, int neon) \
1318{ \
5aaebd13 1319 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1320 if (dp) { \
1321 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1322 } else { \
1323 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1324 } \
b7fa9214 1325 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1326}
1327
5500b06c
PM
1328VFP_GEN_FTOI(toui)
1329VFP_GEN_FTOI(touiz)
1330VFP_GEN_FTOI(tosi)
1331VFP_GEN_FTOI(tosiz)
1332#undef VFP_GEN_FTOI
4373f3ce 1333
16d5b3ca 1334#define VFP_GEN_FIX(name, round) \
5500b06c 1335static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1336{ \
39d5492a 1337 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1338 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1339 if (dp) { \
16d5b3ca
WN
1340 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1341 statusptr); \
5500b06c 1342 } else { \
16d5b3ca
WN
1343 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1344 statusptr); \
5500b06c 1345 } \
b75263d6 1346 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1347 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1348}
16d5b3ca
WN
1349VFP_GEN_FIX(tosh, _round_to_zero)
1350VFP_GEN_FIX(tosl, _round_to_zero)
1351VFP_GEN_FIX(touh, _round_to_zero)
1352VFP_GEN_FIX(toul, _round_to_zero)
1353VFP_GEN_FIX(shto, )
1354VFP_GEN_FIX(slto, )
1355VFP_GEN_FIX(uhto, )
1356VFP_GEN_FIX(ulto, )
4373f3ce 1357#undef VFP_GEN_FIX
9ee6e8bb 1358
39d5492a 1359static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1360{
08307563 1361 if (dp) {
12dcc321 1362 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1363 } else {
12dcc321 1364 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1365 }
b5ff1b31
FB
1366}
1367
39d5492a 1368static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1369{
08307563 1370 if (dp) {
12dcc321 1371 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1372 } else {
12dcc321 1373 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1374 }
b5ff1b31
FB
1375}
1376
8e96005d
FB
1377static inline long
1378vfp_reg_offset (int dp, int reg)
1379{
1380 if (dp)
1381 return offsetof(CPUARMState, vfp.regs[reg]);
1382 else if (reg & 1) {
1383 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1384 + offsetof(CPU_DoubleU, l.upper);
1385 } else {
1386 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1387 + offsetof(CPU_DoubleU, l.lower);
1388 }
1389}
9ee6e8bb
PB
1390
1391/* Return the offset of a 32-bit piece of a NEON register.
1392 zero is the least significant end of the register. */
1393static inline long
1394neon_reg_offset (int reg, int n)
1395{
1396 int sreg;
1397 sreg = reg * 2 + n;
1398 return vfp_reg_offset(0, sreg);
1399}
1400
39d5492a 1401static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1402{
39d5492a 1403 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1404 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1405 return tmp;
1406}
1407
39d5492a 1408static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1409{
1410 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1411 tcg_temp_free_i32(var);
8f8e3aa4
PB
1412}
1413
a7812ae4 1414static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1415{
1416 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1417}
1418
a7812ae4 1419static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1420{
1421 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1422}
1423
4373f3ce
PB
1424#define tcg_gen_ld_f32 tcg_gen_ld_i32
1425#define tcg_gen_ld_f64 tcg_gen_ld_i64
1426#define tcg_gen_st_f32 tcg_gen_st_i32
1427#define tcg_gen_st_f64 tcg_gen_st_i64
1428
b7bcbe95
FB
1429static inline void gen_mov_F0_vreg(int dp, int reg)
1430{
1431 if (dp)
4373f3ce 1432 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1433 else
4373f3ce 1434 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1435}
1436
1437static inline void gen_mov_F1_vreg(int dp, int reg)
1438{
1439 if (dp)
4373f3ce 1440 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1441 else
4373f3ce 1442 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1443}
1444
1445static inline void gen_mov_vreg_F0(int dp, int reg)
1446{
1447 if (dp)
4373f3ce 1448 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1449 else
4373f3ce 1450 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1451}
1452
18c9b560
AZ
1453#define ARM_CP_RW_BIT (1 << 20)
1454
a7812ae4 1455static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1456{
0ecb72a5 1457 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1458}
1459
a7812ae4 1460static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1461{
0ecb72a5 1462 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1463}
1464
39d5492a 1465static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1466{
39d5492a 1467 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1468 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1469 return var;
e677137d
PB
1470}
1471
39d5492a 1472static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1473{
0ecb72a5 1474 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1475 tcg_temp_free_i32(var);
e677137d
PB
1476}
1477
1478static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1479{
1480 iwmmxt_store_reg(cpu_M0, rn);
1481}
1482
1483static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1484{
1485 iwmmxt_load_reg(cpu_M0, rn);
1486}
1487
1488static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1489{
1490 iwmmxt_load_reg(cpu_V1, rn);
1491 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1492}
1493
1494static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1495{
1496 iwmmxt_load_reg(cpu_V1, rn);
1497 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1498}
1499
1500static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1501{
1502 iwmmxt_load_reg(cpu_V1, rn);
1503 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1504}
1505
1506#define IWMMXT_OP(name) \
1507static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1508{ \
1509 iwmmxt_load_reg(cpu_V1, rn); \
1510 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1511}
1512
477955bd
PM
1513#define IWMMXT_OP_ENV(name) \
1514static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1515{ \
1516 iwmmxt_load_reg(cpu_V1, rn); \
1517 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1518}
1519
1520#define IWMMXT_OP_ENV_SIZE(name) \
1521IWMMXT_OP_ENV(name##b) \
1522IWMMXT_OP_ENV(name##w) \
1523IWMMXT_OP_ENV(name##l)
e677137d 1524
477955bd 1525#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1526static inline void gen_op_iwmmxt_##name##_M0(void) \
1527{ \
477955bd 1528 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1529}
1530
1531IWMMXT_OP(maddsq)
1532IWMMXT_OP(madduq)
1533IWMMXT_OP(sadb)
1534IWMMXT_OP(sadw)
1535IWMMXT_OP(mulslw)
1536IWMMXT_OP(mulshw)
1537IWMMXT_OP(mululw)
1538IWMMXT_OP(muluhw)
1539IWMMXT_OP(macsw)
1540IWMMXT_OP(macuw)
1541
477955bd
PM
1542IWMMXT_OP_ENV_SIZE(unpackl)
1543IWMMXT_OP_ENV_SIZE(unpackh)
1544
1545IWMMXT_OP_ENV1(unpacklub)
1546IWMMXT_OP_ENV1(unpackluw)
1547IWMMXT_OP_ENV1(unpacklul)
1548IWMMXT_OP_ENV1(unpackhub)
1549IWMMXT_OP_ENV1(unpackhuw)
1550IWMMXT_OP_ENV1(unpackhul)
1551IWMMXT_OP_ENV1(unpacklsb)
1552IWMMXT_OP_ENV1(unpacklsw)
1553IWMMXT_OP_ENV1(unpacklsl)
1554IWMMXT_OP_ENV1(unpackhsb)
1555IWMMXT_OP_ENV1(unpackhsw)
1556IWMMXT_OP_ENV1(unpackhsl)
1557
1558IWMMXT_OP_ENV_SIZE(cmpeq)
1559IWMMXT_OP_ENV_SIZE(cmpgtu)
1560IWMMXT_OP_ENV_SIZE(cmpgts)
1561
1562IWMMXT_OP_ENV_SIZE(mins)
1563IWMMXT_OP_ENV_SIZE(minu)
1564IWMMXT_OP_ENV_SIZE(maxs)
1565IWMMXT_OP_ENV_SIZE(maxu)
1566
1567IWMMXT_OP_ENV_SIZE(subn)
1568IWMMXT_OP_ENV_SIZE(addn)
1569IWMMXT_OP_ENV_SIZE(subu)
1570IWMMXT_OP_ENV_SIZE(addu)
1571IWMMXT_OP_ENV_SIZE(subs)
1572IWMMXT_OP_ENV_SIZE(adds)
1573
1574IWMMXT_OP_ENV(avgb0)
1575IWMMXT_OP_ENV(avgb1)
1576IWMMXT_OP_ENV(avgw0)
1577IWMMXT_OP_ENV(avgw1)
e677137d 1578
477955bd
PM
1579IWMMXT_OP_ENV(packuw)
1580IWMMXT_OP_ENV(packul)
1581IWMMXT_OP_ENV(packuq)
1582IWMMXT_OP_ENV(packsw)
1583IWMMXT_OP_ENV(packsl)
1584IWMMXT_OP_ENV(packsq)
e677137d 1585
e677137d
PB
1586static void gen_op_iwmmxt_set_mup(void)
1587{
39d5492a 1588 TCGv_i32 tmp;
e677137d
PB
1589 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1590 tcg_gen_ori_i32(tmp, tmp, 2);
1591 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1592}
1593
1594static void gen_op_iwmmxt_set_cup(void)
1595{
39d5492a 1596 TCGv_i32 tmp;
e677137d
PB
1597 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1598 tcg_gen_ori_i32(tmp, tmp, 1);
1599 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1600}
1601
1602static void gen_op_iwmmxt_setpsr_nz(void)
1603{
39d5492a 1604 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1605 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1606 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1607}
1608
1609static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1610{
1611 iwmmxt_load_reg(cpu_V1, rn);
86831435 1612 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1613 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1614}
1615
39d5492a
PM
1616static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1617 TCGv_i32 dest)
18c9b560
AZ
1618{
1619 int rd;
1620 uint32_t offset;
39d5492a 1621 TCGv_i32 tmp;
18c9b560
AZ
1622
1623 rd = (insn >> 16) & 0xf;
da6b5335 1624 tmp = load_reg(s, rd);
18c9b560
AZ
1625
1626 offset = (insn & 0xff) << ((insn >> 7) & 2);
1627 if (insn & (1 << 24)) {
1628 /* Pre indexed */
1629 if (insn & (1 << 23))
da6b5335 1630 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1631 else
da6b5335
FN
1632 tcg_gen_addi_i32(tmp, tmp, -offset);
1633 tcg_gen_mov_i32(dest, tmp);
18c9b560 1634 if (insn & (1 << 21))
da6b5335
FN
1635 store_reg(s, rd, tmp);
1636 else
7d1b0095 1637 tcg_temp_free_i32(tmp);
18c9b560
AZ
1638 } else if (insn & (1 << 21)) {
1639 /* Post indexed */
da6b5335 1640 tcg_gen_mov_i32(dest, tmp);
18c9b560 1641 if (insn & (1 << 23))
da6b5335 1642 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1643 else
da6b5335
FN
1644 tcg_gen_addi_i32(tmp, tmp, -offset);
1645 store_reg(s, rd, tmp);
18c9b560
AZ
1646 } else if (!(insn & (1 << 23)))
1647 return 1;
1648 return 0;
1649}
1650
39d5492a 1651static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1652{
1653 int rd = (insn >> 0) & 0xf;
39d5492a 1654 TCGv_i32 tmp;
18c9b560 1655
da6b5335
FN
1656 if (insn & (1 << 8)) {
1657 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1658 return 1;
da6b5335
FN
1659 } else {
1660 tmp = iwmmxt_load_creg(rd);
1661 }
1662 } else {
7d1b0095 1663 tmp = tcg_temp_new_i32();
da6b5335 1664 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1665 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1666 }
1667 tcg_gen_andi_i32(tmp, tmp, mask);
1668 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1669 tcg_temp_free_i32(tmp);
18c9b560
AZ
1670 return 0;
1671}
1672
a1c7273b 1673/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1674 (ie. an undefined instruction). */
7dcc1f89 1675static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1676{
1677 int rd, wrd;
1678 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1679 TCGv_i32 addr;
1680 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1681
1682 if ((insn & 0x0e000e00) == 0x0c000000) {
1683 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1684 wrd = insn & 0xf;
1685 rdlo = (insn >> 12) & 0xf;
1686 rdhi = (insn >> 16) & 0xf;
1687 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1688 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1689 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1690 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1691 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1692 } else { /* TMCRR */
da6b5335
FN
1693 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1694 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1695 gen_op_iwmmxt_set_mup();
1696 }
1697 return 0;
1698 }
1699
1700 wrd = (insn >> 12) & 0xf;
7d1b0095 1701 addr = tcg_temp_new_i32();
da6b5335 1702 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1703 tcg_temp_free_i32(addr);
18c9b560 1704 return 1;
da6b5335 1705 }
18c9b560
AZ
1706 if (insn & ARM_CP_RW_BIT) {
1707 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1708 tmp = tcg_temp_new_i32();
12dcc321 1709 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1710 iwmmxt_store_creg(wrd, tmp);
18c9b560 1711 } else {
e677137d
PB
1712 i = 1;
1713 if (insn & (1 << 8)) {
1714 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1715 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1716 i = 0;
1717 } else { /* WLDRW wRd */
29531141 1718 tmp = tcg_temp_new_i32();
12dcc321 1719 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1720 }
1721 } else {
29531141 1722 tmp = tcg_temp_new_i32();
e677137d 1723 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1724 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1725 } else { /* WLDRB */
12dcc321 1726 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1727 }
1728 }
1729 if (i) {
1730 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1731 tcg_temp_free_i32(tmp);
e677137d 1732 }
18c9b560
AZ
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 }
1735 } else {
1736 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1737 tmp = iwmmxt_load_creg(wrd);
12dcc321 1738 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1739 } else {
1740 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1741 tmp = tcg_temp_new_i32();
e677137d
PB
1742 if (insn & (1 << 8)) {
1743 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1744 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1745 } else { /* WSTRW wRd */
ecc7b3aa 1746 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1747 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1748 }
1749 } else {
1750 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1751 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1752 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1753 } else { /* WSTRB */
ecc7b3aa 1754 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1755 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1756 }
1757 }
18c9b560 1758 }
29531141 1759 tcg_temp_free_i32(tmp);
18c9b560 1760 }
7d1b0095 1761 tcg_temp_free_i32(addr);
18c9b560
AZ
1762 return 0;
1763 }
1764
1765 if ((insn & 0x0f000000) != 0x0e000000)
1766 return 1;
1767
1768 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1769 case 0x000: /* WOR */
1770 wrd = (insn >> 12) & 0xf;
1771 rd0 = (insn >> 0) & 0xf;
1772 rd1 = (insn >> 16) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0);
1774 gen_op_iwmmxt_orq_M0_wRn(rd1);
1775 gen_op_iwmmxt_setpsr_nz();
1776 gen_op_iwmmxt_movq_wRn_M0(wrd);
1777 gen_op_iwmmxt_set_mup();
1778 gen_op_iwmmxt_set_cup();
1779 break;
1780 case 0x011: /* TMCR */
1781 if (insn & 0xf)
1782 return 1;
1783 rd = (insn >> 12) & 0xf;
1784 wrd = (insn >> 16) & 0xf;
1785 switch (wrd) {
1786 case ARM_IWMMXT_wCID:
1787 case ARM_IWMMXT_wCASF:
1788 break;
1789 case ARM_IWMMXT_wCon:
1790 gen_op_iwmmxt_set_cup();
1791 /* Fall through. */
1792 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1793 tmp = iwmmxt_load_creg(wrd);
1794 tmp2 = load_reg(s, rd);
f669df27 1795 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1796 tcg_temp_free_i32(tmp2);
da6b5335 1797 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1798 break;
1799 case ARM_IWMMXT_wCGR0:
1800 case ARM_IWMMXT_wCGR1:
1801 case ARM_IWMMXT_wCGR2:
1802 case ARM_IWMMXT_wCGR3:
1803 gen_op_iwmmxt_set_cup();
da6b5335
FN
1804 tmp = load_reg(s, rd);
1805 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1806 break;
1807 default:
1808 return 1;
1809 }
1810 break;
1811 case 0x100: /* WXOR */
1812 wrd = (insn >> 12) & 0xf;
1813 rd0 = (insn >> 0) & 0xf;
1814 rd1 = (insn >> 16) & 0xf;
1815 gen_op_iwmmxt_movq_M0_wRn(rd0);
1816 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1817 gen_op_iwmmxt_setpsr_nz();
1818 gen_op_iwmmxt_movq_wRn_M0(wrd);
1819 gen_op_iwmmxt_set_mup();
1820 gen_op_iwmmxt_set_cup();
1821 break;
1822 case 0x111: /* TMRC */
1823 if (insn & 0xf)
1824 return 1;
1825 rd = (insn >> 12) & 0xf;
1826 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1827 tmp = iwmmxt_load_creg(wrd);
1828 store_reg(s, rd, tmp);
18c9b560
AZ
1829 break;
1830 case 0x300: /* WANDN */
1831 wrd = (insn >> 12) & 0xf;
1832 rd0 = (insn >> 0) & 0xf;
1833 rd1 = (insn >> 16) & 0xf;
1834 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1835 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1836 gen_op_iwmmxt_andq_M0_wRn(rd1);
1837 gen_op_iwmmxt_setpsr_nz();
1838 gen_op_iwmmxt_movq_wRn_M0(wrd);
1839 gen_op_iwmmxt_set_mup();
1840 gen_op_iwmmxt_set_cup();
1841 break;
1842 case 0x200: /* WAND */
1843 wrd = (insn >> 12) & 0xf;
1844 rd0 = (insn >> 0) & 0xf;
1845 rd1 = (insn >> 16) & 0xf;
1846 gen_op_iwmmxt_movq_M0_wRn(rd0);
1847 gen_op_iwmmxt_andq_M0_wRn(rd1);
1848 gen_op_iwmmxt_setpsr_nz();
1849 gen_op_iwmmxt_movq_wRn_M0(wrd);
1850 gen_op_iwmmxt_set_mup();
1851 gen_op_iwmmxt_set_cup();
1852 break;
1853 case 0x810: case 0xa10: /* WMADD */
1854 wrd = (insn >> 12) & 0xf;
1855 rd0 = (insn >> 0) & 0xf;
1856 rd1 = (insn >> 16) & 0xf;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 if (insn & (1 << 21))
1859 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1860 else
1861 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1862 gen_op_iwmmxt_movq_wRn_M0(wrd);
1863 gen_op_iwmmxt_set_mup();
1864 break;
1865 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1866 wrd = (insn >> 12) & 0xf;
1867 rd0 = (insn >> 16) & 0xf;
1868 rd1 = (insn >> 0) & 0xf;
1869 gen_op_iwmmxt_movq_M0_wRn(rd0);
1870 switch ((insn >> 22) & 3) {
1871 case 0:
1872 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1873 break;
1874 case 1:
1875 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1876 break;
1877 case 2:
1878 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1879 break;
1880 case 3:
1881 return 1;
1882 }
1883 gen_op_iwmmxt_movq_wRn_M0(wrd);
1884 gen_op_iwmmxt_set_mup();
1885 gen_op_iwmmxt_set_cup();
1886 break;
1887 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1888 wrd = (insn >> 12) & 0xf;
1889 rd0 = (insn >> 16) & 0xf;
1890 rd1 = (insn >> 0) & 0xf;
1891 gen_op_iwmmxt_movq_M0_wRn(rd0);
1892 switch ((insn >> 22) & 3) {
1893 case 0:
1894 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1895 break;
1896 case 1:
1897 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1898 break;
1899 case 2:
1900 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 gen_op_iwmmxt_set_cup();
1908 break;
1909 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1910 wrd = (insn >> 12) & 0xf;
1911 rd0 = (insn >> 16) & 0xf;
1912 rd1 = (insn >> 0) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 if (insn & (1 << 22))
1915 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1916 else
1917 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1918 if (!(insn & (1 << 20)))
1919 gen_op_iwmmxt_addl_M0_wRn(wrd);
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 break;
1923 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1924 wrd = (insn >> 12) & 0xf;
1925 rd0 = (insn >> 16) & 0xf;
1926 rd1 = (insn >> 0) & 0xf;
1927 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1928 if (insn & (1 << 21)) {
1929 if (insn & (1 << 20))
1930 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1931 else
1932 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1933 } else {
1934 if (insn & (1 << 20))
1935 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1938 }
18c9b560
AZ
1939 gen_op_iwmmxt_movq_wRn_M0(wrd);
1940 gen_op_iwmmxt_set_mup();
1941 break;
1942 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1943 wrd = (insn >> 12) & 0xf;
1944 rd0 = (insn >> 16) & 0xf;
1945 rd1 = (insn >> 0) & 0xf;
1946 gen_op_iwmmxt_movq_M0_wRn(rd0);
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1949 else
1950 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1951 if (!(insn & (1 << 20))) {
e677137d
PB
1952 iwmmxt_load_reg(cpu_V1, wrd);
1953 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1954 }
1955 gen_op_iwmmxt_movq_wRn_M0(wrd);
1956 gen_op_iwmmxt_set_mup();
1957 break;
1958 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1959 wrd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 rd1 = (insn >> 0) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0);
1963 switch ((insn >> 22) & 3) {
1964 case 0:
1965 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1966 break;
1967 case 1:
1968 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1969 break;
1970 case 2:
1971 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1972 break;
1973 case 3:
1974 return 1;
1975 }
1976 gen_op_iwmmxt_movq_wRn_M0(wrd);
1977 gen_op_iwmmxt_set_mup();
1978 gen_op_iwmmxt_set_cup();
1979 break;
1980 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1981 wrd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 16) & 0xf;
1983 rd1 = (insn >> 0) & 0xf;
1984 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1985 if (insn & (1 << 22)) {
1986 if (insn & (1 << 20))
1987 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1988 else
1989 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1990 } else {
1991 if (insn & (1 << 20))
1992 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1993 else
1994 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1995 }
18c9b560
AZ
1996 gen_op_iwmmxt_movq_wRn_M0(wrd);
1997 gen_op_iwmmxt_set_mup();
1998 gen_op_iwmmxt_set_cup();
1999 break;
2000 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2001 wrd = (insn >> 12) & 0xf;
2002 rd0 = (insn >> 16) & 0xf;
2003 rd1 = (insn >> 0) & 0xf;
2004 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2005 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2006 tcg_gen_andi_i32(tmp, tmp, 7);
2007 iwmmxt_load_reg(cpu_V1, rd1);
2008 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2009 tcg_temp_free_i32(tmp);
18c9b560
AZ
2010 gen_op_iwmmxt_movq_wRn_M0(wrd);
2011 gen_op_iwmmxt_set_mup();
2012 break;
2013 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2014 if (((insn >> 6) & 3) == 3)
2015 return 1;
18c9b560
AZ
2016 rd = (insn >> 12) & 0xf;
2017 wrd = (insn >> 16) & 0xf;
da6b5335 2018 tmp = load_reg(s, rd);
18c9b560
AZ
2019 gen_op_iwmmxt_movq_M0_wRn(wrd);
2020 switch ((insn >> 6) & 3) {
2021 case 0:
da6b5335
FN
2022 tmp2 = tcg_const_i32(0xff);
2023 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2024 break;
2025 case 1:
da6b5335
FN
2026 tmp2 = tcg_const_i32(0xffff);
2027 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2028 break;
2029 case 2:
da6b5335
FN
2030 tmp2 = tcg_const_i32(0xffffffff);
2031 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2032 break;
da6b5335 2033 default:
39d5492a
PM
2034 TCGV_UNUSED_I32(tmp2);
2035 TCGV_UNUSED_I32(tmp3);
18c9b560 2036 }
da6b5335 2037 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2038 tcg_temp_free_i32(tmp3);
2039 tcg_temp_free_i32(tmp2);
7d1b0095 2040 tcg_temp_free_i32(tmp);
18c9b560
AZ
2041 gen_op_iwmmxt_movq_wRn_M0(wrd);
2042 gen_op_iwmmxt_set_mup();
2043 break;
2044 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2045 rd = (insn >> 12) & 0xf;
2046 wrd = (insn >> 16) & 0xf;
da6b5335 2047 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2048 return 1;
2049 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2050 tmp = tcg_temp_new_i32();
18c9b560
AZ
2051 switch ((insn >> 22) & 3) {
2052 case 0:
da6b5335 2053 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2054 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2055 if (insn & 8) {
2056 tcg_gen_ext8s_i32(tmp, tmp);
2057 } else {
2058 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2059 }
2060 break;
2061 case 1:
da6b5335 2062 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2063 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2064 if (insn & 8) {
2065 tcg_gen_ext16s_i32(tmp, tmp);
2066 } else {
2067 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2068 }
2069 break;
2070 case 2:
da6b5335 2071 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2072 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2073 break;
18c9b560 2074 }
da6b5335 2075 store_reg(s, rd, tmp);
18c9b560
AZ
2076 break;
2077 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2078 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2079 return 1;
da6b5335 2080 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2081 switch ((insn >> 22) & 3) {
2082 case 0:
da6b5335 2083 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2084 break;
2085 case 1:
da6b5335 2086 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2087 break;
2088 case 2:
da6b5335 2089 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2090 break;
18c9b560 2091 }
da6b5335
FN
2092 tcg_gen_shli_i32(tmp, tmp, 28);
2093 gen_set_nzcv(tmp);
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560
AZ
2095 break;
2096 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2097 if (((insn >> 6) & 3) == 3)
2098 return 1;
18c9b560
AZ
2099 rd = (insn >> 12) & 0xf;
2100 wrd = (insn >> 16) & 0xf;
da6b5335 2101 tmp = load_reg(s, rd);
18c9b560
AZ
2102 switch ((insn >> 6) & 3) {
2103 case 0:
da6b5335 2104 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2105 break;
2106 case 1:
da6b5335 2107 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2108 break;
2109 case 2:
da6b5335 2110 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2111 break;
18c9b560 2112 }
7d1b0095 2113 tcg_temp_free_i32(tmp);
18c9b560
AZ
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2118 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2119 return 1;
da6b5335 2120 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2121 tmp2 = tcg_temp_new_i32();
da6b5335 2122 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 for (i = 0; i < 7; i ++) {
da6b5335
FN
2126 tcg_gen_shli_i32(tmp2, tmp2, 4);
2127 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2128 }
2129 break;
2130 case 1:
2131 for (i = 0; i < 3; i ++) {
da6b5335
FN
2132 tcg_gen_shli_i32(tmp2, tmp2, 8);
2133 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2134 }
2135 break;
2136 case 2:
da6b5335
FN
2137 tcg_gen_shli_i32(tmp2, tmp2, 16);
2138 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2139 break;
18c9b560 2140 }
da6b5335 2141 gen_set_nzcv(tmp);
7d1b0095
PM
2142 tcg_temp_free_i32(tmp2);
2143 tcg_temp_free_i32(tmp);
18c9b560
AZ
2144 break;
2145 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2146 wrd = (insn >> 12) & 0xf;
2147 rd0 = (insn >> 16) & 0xf;
2148 gen_op_iwmmxt_movq_M0_wRn(rd0);
2149 switch ((insn >> 22) & 3) {
2150 case 0:
e677137d 2151 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2152 break;
2153 case 1:
e677137d 2154 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2155 break;
2156 case 2:
e677137d 2157 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2158 break;
2159 case 3:
2160 return 1;
2161 }
2162 gen_op_iwmmxt_movq_wRn_M0(wrd);
2163 gen_op_iwmmxt_set_mup();
2164 break;
2165 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2166 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2167 return 1;
da6b5335 2168 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2169 tmp2 = tcg_temp_new_i32();
da6b5335 2170 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2171 switch ((insn >> 22) & 3) {
2172 case 0:
2173 for (i = 0; i < 7; i ++) {
da6b5335
FN
2174 tcg_gen_shli_i32(tmp2, tmp2, 4);
2175 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2176 }
2177 break;
2178 case 1:
2179 for (i = 0; i < 3; i ++) {
da6b5335
FN
2180 tcg_gen_shli_i32(tmp2, tmp2, 8);
2181 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2182 }
2183 break;
2184 case 2:
da6b5335
FN
2185 tcg_gen_shli_i32(tmp2, tmp2, 16);
2186 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2187 break;
18c9b560 2188 }
da6b5335 2189 gen_set_nzcv(tmp);
7d1b0095
PM
2190 tcg_temp_free_i32(tmp2);
2191 tcg_temp_free_i32(tmp);
18c9b560
AZ
2192 break;
2193 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2194 rd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
da6b5335 2196 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2197 return 1;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2199 tmp = tcg_temp_new_i32();
18c9b560
AZ
2200 switch ((insn >> 22) & 3) {
2201 case 0:
da6b5335 2202 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2203 break;
2204 case 1:
da6b5335 2205 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2206 break;
2207 case 2:
da6b5335 2208 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2209 break;
18c9b560 2210 }
da6b5335 2211 store_reg(s, rd, tmp);
18c9b560
AZ
2212 break;
2213 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2214 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2215 wrd = (insn >> 12) & 0xf;
2216 rd0 = (insn >> 16) & 0xf;
2217 rd1 = (insn >> 0) & 0xf;
2218 gen_op_iwmmxt_movq_M0_wRn(rd0);
2219 switch ((insn >> 22) & 3) {
2220 case 0:
2221 if (insn & (1 << 21))
2222 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2223 else
2224 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2225 break;
2226 case 1:
2227 if (insn & (1 << 21))
2228 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2229 else
2230 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2231 break;
2232 case 2:
2233 if (insn & (1 << 21))
2234 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2235 else
2236 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2237 break;
2238 case 3:
2239 return 1;
2240 }
2241 gen_op_iwmmxt_movq_wRn_M0(wrd);
2242 gen_op_iwmmxt_set_mup();
2243 gen_op_iwmmxt_set_cup();
2244 break;
2245 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2246 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2247 wrd = (insn >> 12) & 0xf;
2248 rd0 = (insn >> 16) & 0xf;
2249 gen_op_iwmmxt_movq_M0_wRn(rd0);
2250 switch ((insn >> 22) & 3) {
2251 case 0:
2252 if (insn & (1 << 21))
2253 gen_op_iwmmxt_unpacklsb_M0();
2254 else
2255 gen_op_iwmmxt_unpacklub_M0();
2256 break;
2257 case 1:
2258 if (insn & (1 << 21))
2259 gen_op_iwmmxt_unpacklsw_M0();
2260 else
2261 gen_op_iwmmxt_unpackluw_M0();
2262 break;
2263 case 2:
2264 if (insn & (1 << 21))
2265 gen_op_iwmmxt_unpacklsl_M0();
2266 else
2267 gen_op_iwmmxt_unpacklul_M0();
2268 break;
2269 case 3:
2270 return 1;
2271 }
2272 gen_op_iwmmxt_movq_wRn_M0(wrd);
2273 gen_op_iwmmxt_set_mup();
2274 gen_op_iwmmxt_set_cup();
2275 break;
2276 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2277 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2278 wrd = (insn >> 12) & 0xf;
2279 rd0 = (insn >> 16) & 0xf;
2280 gen_op_iwmmxt_movq_M0_wRn(rd0);
2281 switch ((insn >> 22) & 3) {
2282 case 0:
2283 if (insn & (1 << 21))
2284 gen_op_iwmmxt_unpackhsb_M0();
2285 else
2286 gen_op_iwmmxt_unpackhub_M0();
2287 break;
2288 case 1:
2289 if (insn & (1 << 21))
2290 gen_op_iwmmxt_unpackhsw_M0();
2291 else
2292 gen_op_iwmmxt_unpackhuw_M0();
2293 break;
2294 case 2:
2295 if (insn & (1 << 21))
2296 gen_op_iwmmxt_unpackhsl_M0();
2297 else
2298 gen_op_iwmmxt_unpackhul_M0();
2299 break;
2300 case 3:
2301 return 1;
2302 }
2303 gen_op_iwmmxt_movq_wRn_M0(wrd);
2304 gen_op_iwmmxt_set_mup();
2305 gen_op_iwmmxt_set_cup();
2306 break;
2307 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2308 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2309 if (((insn >> 22) & 3) == 0)
2310 return 1;
18c9b560
AZ
2311 wrd = (insn >> 12) & 0xf;
2312 rd0 = (insn >> 16) & 0xf;
2313 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2314 tmp = tcg_temp_new_i32();
da6b5335 2315 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2316 tcg_temp_free_i32(tmp);
18c9b560 2317 return 1;
da6b5335 2318 }
18c9b560 2319 switch ((insn >> 22) & 3) {
18c9b560 2320 case 1:
477955bd 2321 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2322 break;
2323 case 2:
477955bd 2324 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2325 break;
2326 case 3:
477955bd 2327 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2328 break;
2329 }
7d1b0095 2330 tcg_temp_free_i32(tmp);
18c9b560
AZ
2331 gen_op_iwmmxt_movq_wRn_M0(wrd);
2332 gen_op_iwmmxt_set_mup();
2333 gen_op_iwmmxt_set_cup();
2334 break;
2335 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2336 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2337 if (((insn >> 22) & 3) == 0)
2338 return 1;
18c9b560
AZ
2339 wrd = (insn >> 12) & 0xf;
2340 rd0 = (insn >> 16) & 0xf;
2341 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2342 tmp = tcg_temp_new_i32();
da6b5335 2343 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2344 tcg_temp_free_i32(tmp);
18c9b560 2345 return 1;
da6b5335 2346 }
18c9b560 2347 switch ((insn >> 22) & 3) {
18c9b560 2348 case 1:
477955bd 2349 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2350 break;
2351 case 2:
477955bd 2352 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2353 break;
2354 case 3:
477955bd 2355 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2356 break;
2357 }
7d1b0095 2358 tcg_temp_free_i32(tmp);
18c9b560
AZ
2359 gen_op_iwmmxt_movq_wRn_M0(wrd);
2360 gen_op_iwmmxt_set_mup();
2361 gen_op_iwmmxt_set_cup();
2362 break;
2363 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2364 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2365 if (((insn >> 22) & 3) == 0)
2366 return 1;
18c9b560
AZ
2367 wrd = (insn >> 12) & 0xf;
2368 rd0 = (insn >> 16) & 0xf;
2369 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2370 tmp = tcg_temp_new_i32();
da6b5335 2371 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2372 tcg_temp_free_i32(tmp);
18c9b560 2373 return 1;
da6b5335 2374 }
18c9b560 2375 switch ((insn >> 22) & 3) {
18c9b560 2376 case 1:
477955bd 2377 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2378 break;
2379 case 2:
477955bd 2380 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2381 break;
2382 case 3:
477955bd 2383 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2384 break;
2385 }
7d1b0095 2386 tcg_temp_free_i32(tmp);
18c9b560
AZ
2387 gen_op_iwmmxt_movq_wRn_M0(wrd);
2388 gen_op_iwmmxt_set_mup();
2389 gen_op_iwmmxt_set_cup();
2390 break;
2391 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2392 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2393 if (((insn >> 22) & 3) == 0)
2394 return 1;
18c9b560
AZ
2395 wrd = (insn >> 12) & 0xf;
2396 rd0 = (insn >> 16) & 0xf;
2397 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2398 tmp = tcg_temp_new_i32();
18c9b560 2399 switch ((insn >> 22) & 3) {
18c9b560 2400 case 1:
da6b5335 2401 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2402 tcg_temp_free_i32(tmp);
18c9b560 2403 return 1;
da6b5335 2404 }
477955bd 2405 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2406 break;
2407 case 2:
da6b5335 2408 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2409 tcg_temp_free_i32(tmp);
18c9b560 2410 return 1;
da6b5335 2411 }
477955bd 2412 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2413 break;
2414 case 3:
da6b5335 2415 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2416 tcg_temp_free_i32(tmp);
18c9b560 2417 return 1;
da6b5335 2418 }
477955bd 2419 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2420 break;
2421 }
7d1b0095 2422 tcg_temp_free_i32(tmp);
18c9b560
AZ
2423 gen_op_iwmmxt_movq_wRn_M0(wrd);
2424 gen_op_iwmmxt_set_mup();
2425 gen_op_iwmmxt_set_cup();
2426 break;
2427 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2428 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2429 wrd = (insn >> 12) & 0xf;
2430 rd0 = (insn >> 16) & 0xf;
2431 rd1 = (insn >> 0) & 0xf;
2432 gen_op_iwmmxt_movq_M0_wRn(rd0);
2433 switch ((insn >> 22) & 3) {
2434 case 0:
2435 if (insn & (1 << 21))
2436 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2437 else
2438 gen_op_iwmmxt_minub_M0_wRn(rd1);
2439 break;
2440 case 1:
2441 if (insn & (1 << 21))
2442 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2443 else
2444 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2445 break;
2446 case 2:
2447 if (insn & (1 << 21))
2448 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2449 else
2450 gen_op_iwmmxt_minul_M0_wRn(rd1);
2451 break;
2452 case 3:
2453 return 1;
2454 }
2455 gen_op_iwmmxt_movq_wRn_M0(wrd);
2456 gen_op_iwmmxt_set_mup();
2457 break;
2458 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2459 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2460 wrd = (insn >> 12) & 0xf;
2461 rd0 = (insn >> 16) & 0xf;
2462 rd1 = (insn >> 0) & 0xf;
2463 gen_op_iwmmxt_movq_M0_wRn(rd0);
2464 switch ((insn >> 22) & 3) {
2465 case 0:
2466 if (insn & (1 << 21))
2467 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2468 else
2469 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2470 break;
2471 case 1:
2472 if (insn & (1 << 21))
2473 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2474 else
2475 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2476 break;
2477 case 2:
2478 if (insn & (1 << 21))
2479 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2480 else
2481 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2482 break;
2483 case 3:
2484 return 1;
2485 }
2486 gen_op_iwmmxt_movq_wRn_M0(wrd);
2487 gen_op_iwmmxt_set_mup();
2488 break;
2489 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2490 case 0x402: case 0x502: case 0x602: case 0x702:
2491 wrd = (insn >> 12) & 0xf;
2492 rd0 = (insn >> 16) & 0xf;
2493 rd1 = (insn >> 0) & 0xf;
2494 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2495 tmp = tcg_const_i32((insn >> 20) & 3);
2496 iwmmxt_load_reg(cpu_V1, rd1);
2497 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2498 tcg_temp_free_i32(tmp);
18c9b560
AZ
2499 gen_op_iwmmxt_movq_wRn_M0(wrd);
2500 gen_op_iwmmxt_set_mup();
2501 break;
2502 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2503 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2504 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2505 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2506 wrd = (insn >> 12) & 0xf;
2507 rd0 = (insn >> 16) & 0xf;
2508 rd1 = (insn >> 0) & 0xf;
2509 gen_op_iwmmxt_movq_M0_wRn(rd0);
2510 switch ((insn >> 20) & 0xf) {
2511 case 0x0:
2512 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2513 break;
2514 case 0x1:
2515 gen_op_iwmmxt_subub_M0_wRn(rd1);
2516 break;
2517 case 0x3:
2518 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2519 break;
2520 case 0x4:
2521 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2522 break;
2523 case 0x5:
2524 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2525 break;
2526 case 0x7:
2527 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2528 break;
2529 case 0x8:
2530 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2531 break;
2532 case 0x9:
2533 gen_op_iwmmxt_subul_M0_wRn(rd1);
2534 break;
2535 case 0xb:
2536 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2537 break;
2538 default:
2539 return 1;
2540 }
2541 gen_op_iwmmxt_movq_wRn_M0(wrd);
2542 gen_op_iwmmxt_set_mup();
2543 gen_op_iwmmxt_set_cup();
2544 break;
2545 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2546 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2547 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2548 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2549 wrd = (insn >> 12) & 0xf;
2550 rd0 = (insn >> 16) & 0xf;
2551 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2552 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2553 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2554 tcg_temp_free_i32(tmp);
18c9b560
AZ
2555 gen_op_iwmmxt_movq_wRn_M0(wrd);
2556 gen_op_iwmmxt_set_mup();
2557 gen_op_iwmmxt_set_cup();
2558 break;
2559 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2560 case 0x418: case 0x518: case 0x618: case 0x718:
2561 case 0x818: case 0x918: case 0xa18: case 0xb18:
2562 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2563 wrd = (insn >> 12) & 0xf;
2564 rd0 = (insn >> 16) & 0xf;
2565 rd1 = (insn >> 0) & 0xf;
2566 gen_op_iwmmxt_movq_M0_wRn(rd0);
2567 switch ((insn >> 20) & 0xf) {
2568 case 0x0:
2569 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2570 break;
2571 case 0x1:
2572 gen_op_iwmmxt_addub_M0_wRn(rd1);
2573 break;
2574 case 0x3:
2575 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2576 break;
2577 case 0x4:
2578 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2579 break;
2580 case 0x5:
2581 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2582 break;
2583 case 0x7:
2584 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2585 break;
2586 case 0x8:
2587 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2588 break;
2589 case 0x9:
2590 gen_op_iwmmxt_addul_M0_wRn(rd1);
2591 break;
2592 case 0xb:
2593 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2594 break;
2595 default:
2596 return 1;
2597 }
2598 gen_op_iwmmxt_movq_wRn_M0(wrd);
2599 gen_op_iwmmxt_set_mup();
2600 gen_op_iwmmxt_set_cup();
2601 break;
2602 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2603 case 0x408: case 0x508: case 0x608: case 0x708:
2604 case 0x808: case 0x908: case 0xa08: case 0xb08:
2605 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2606 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2607 return 1;
18c9b560
AZ
2608 wrd = (insn >> 12) & 0xf;
2609 rd0 = (insn >> 16) & 0xf;
2610 rd1 = (insn >> 0) & 0xf;
2611 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2612 switch ((insn >> 22) & 3) {
18c9b560
AZ
2613 case 1:
2614 if (insn & (1 << 21))
2615 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2616 else
2617 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2618 break;
2619 case 2:
2620 if (insn & (1 << 21))
2621 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2622 else
2623 gen_op_iwmmxt_packul_M0_wRn(rd1);
2624 break;
2625 case 3:
2626 if (insn & (1 << 21))
2627 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2628 else
2629 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2630 break;
2631 }
2632 gen_op_iwmmxt_movq_wRn_M0(wrd);
2633 gen_op_iwmmxt_set_mup();
2634 gen_op_iwmmxt_set_cup();
2635 break;
2636 case 0x201: case 0x203: case 0x205: case 0x207:
2637 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2638 case 0x211: case 0x213: case 0x215: case 0x217:
2639 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2640 wrd = (insn >> 5) & 0xf;
2641 rd0 = (insn >> 12) & 0xf;
2642 rd1 = (insn >> 0) & 0xf;
2643 if (rd0 == 0xf || rd1 == 0xf)
2644 return 1;
2645 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2646 tmp = load_reg(s, rd0);
2647 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2648 switch ((insn >> 16) & 0xf) {
2649 case 0x0: /* TMIA */
da6b5335 2650 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2651 break;
2652 case 0x8: /* TMIAPH */
da6b5335 2653 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2654 break;
2655 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2656 if (insn & (1 << 16))
da6b5335 2657 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2658 if (insn & (1 << 17))
da6b5335
FN
2659 tcg_gen_shri_i32(tmp2, tmp2, 16);
2660 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2661 break;
2662 default:
7d1b0095
PM
2663 tcg_temp_free_i32(tmp2);
2664 tcg_temp_free_i32(tmp);
18c9b560
AZ
2665 return 1;
2666 }
7d1b0095
PM
2667 tcg_temp_free_i32(tmp2);
2668 tcg_temp_free_i32(tmp);
18c9b560
AZ
2669 gen_op_iwmmxt_movq_wRn_M0(wrd);
2670 gen_op_iwmmxt_set_mup();
2671 break;
2672 default:
2673 return 1;
2674 }
2675
2676 return 0;
2677}
2678
a1c7273b 2679/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2680 (ie. an undefined instruction). */
7dcc1f89 2681static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2682{
2683 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2684 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2685
2686 if ((insn & 0x0ff00f10) == 0x0e200010) {
2687 /* Multiply with Internal Accumulate Format */
2688 rd0 = (insn >> 12) & 0xf;
2689 rd1 = insn & 0xf;
2690 acc = (insn >> 5) & 7;
2691
2692 if (acc != 0)
2693 return 1;
2694
3a554c0f
FN
2695 tmp = load_reg(s, rd0);
2696 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2697 switch ((insn >> 16) & 0xf) {
2698 case 0x0: /* MIA */
3a554c0f 2699 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2700 break;
2701 case 0x8: /* MIAPH */
3a554c0f 2702 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2703 break;
2704 case 0xc: /* MIABB */
2705 case 0xd: /* MIABT */
2706 case 0xe: /* MIATB */
2707 case 0xf: /* MIATT */
18c9b560 2708 if (insn & (1 << 16))
3a554c0f 2709 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2710 if (insn & (1 << 17))
3a554c0f
FN
2711 tcg_gen_shri_i32(tmp2, tmp2, 16);
2712 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2713 break;
2714 default:
2715 return 1;
2716 }
7d1b0095
PM
2717 tcg_temp_free_i32(tmp2);
2718 tcg_temp_free_i32(tmp);
18c9b560
AZ
2719
2720 gen_op_iwmmxt_movq_wRn_M0(acc);
2721 return 0;
2722 }
2723
2724 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2725 /* Internal Accumulator Access Format */
2726 rdhi = (insn >> 16) & 0xf;
2727 rdlo = (insn >> 12) & 0xf;
2728 acc = insn & 7;
2729
2730 if (acc != 0)
2731 return 1;
2732
2733 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2734 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2735 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2736 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2737 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2738 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2739 } else { /* MAR */
3a554c0f
FN
2740 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2741 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2742 }
2743 return 0;
2744 }
2745
2746 return 1;
2747}
2748
9ee6e8bb
PB
2749#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2750#define VFP_SREG(insn, bigbit, smallbit) \
2751 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2752#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2753 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2754 reg = (((insn) >> (bigbit)) & 0x0f) \
2755 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2756 } else { \
2757 if (insn & (1 << (smallbit))) \
2758 return 1; \
2759 reg = ((insn) >> (bigbit)) & 0x0f; \
2760 }} while (0)
2761
2762#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2763#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2764#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2765#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2766#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2767#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2768
4373f3ce 2769/* Move between integer and VFP cores. */
39d5492a 2770static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2771{
39d5492a 2772 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2773 tcg_gen_mov_i32(tmp, cpu_F0s);
2774 return tmp;
2775}
2776
39d5492a 2777static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2778{
2779 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2780 tcg_temp_free_i32(tmp);
4373f3ce
PB
2781}
2782
39d5492a 2783static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2784{
39d5492a 2785 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2786 if (shift)
2787 tcg_gen_shri_i32(var, var, shift);
86831435 2788 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2789 tcg_gen_shli_i32(tmp, var, 8);
2790 tcg_gen_or_i32(var, var, tmp);
2791 tcg_gen_shli_i32(tmp, var, 16);
2792 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2793 tcg_temp_free_i32(tmp);
ad69471c
PB
2794}
2795
39d5492a 2796static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2797{
39d5492a 2798 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2799 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2800 tcg_gen_shli_i32(tmp, var, 16);
2801 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2802 tcg_temp_free_i32(tmp);
ad69471c
PB
2803}
2804
39d5492a 2805static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2806{
39d5492a 2807 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2808 tcg_gen_andi_i32(var, var, 0xffff0000);
2809 tcg_gen_shri_i32(tmp, var, 16);
2810 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2811 tcg_temp_free_i32(tmp);
ad69471c
PB
2812}
2813
39d5492a 2814static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2815{
2816 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2817 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2818 switch (size) {
2819 case 0:
12dcc321 2820 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2821 gen_neon_dup_u8(tmp, 0);
2822 break;
2823 case 1:
12dcc321 2824 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2825 gen_neon_dup_low16(tmp);
2826 break;
2827 case 2:
12dcc321 2828 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2829 break;
2830 default: /* Avoid compiler warnings. */
2831 abort();
2832 }
2833 return tmp;
2834}
2835
04731fb5
WN
2836static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2837 uint32_t dp)
2838{
2839 uint32_t cc = extract32(insn, 20, 2);
2840
2841 if (dp) {
2842 TCGv_i64 frn, frm, dest;
2843 TCGv_i64 tmp, zero, zf, nf, vf;
2844
2845 zero = tcg_const_i64(0);
2846
2847 frn = tcg_temp_new_i64();
2848 frm = tcg_temp_new_i64();
2849 dest = tcg_temp_new_i64();
2850
2851 zf = tcg_temp_new_i64();
2852 nf = tcg_temp_new_i64();
2853 vf = tcg_temp_new_i64();
2854
2855 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2856 tcg_gen_ext_i32_i64(nf, cpu_NF);
2857 tcg_gen_ext_i32_i64(vf, cpu_VF);
2858
2859 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2860 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2861 switch (cc) {
2862 case 0: /* eq: Z */
2863 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2864 frn, frm);
2865 break;
2866 case 1: /* vs: V */
2867 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2868 frn, frm);
2869 break;
2870 case 2: /* ge: N == V -> N ^ V == 0 */
2871 tmp = tcg_temp_new_i64();
2872 tcg_gen_xor_i64(tmp, vf, nf);
2873 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2874 frn, frm);
2875 tcg_temp_free_i64(tmp);
2876 break;
2877 case 3: /* gt: !Z && N == V */
2878 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2879 frn, frm);
2880 tmp = tcg_temp_new_i64();
2881 tcg_gen_xor_i64(tmp, vf, nf);
2882 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2883 dest, frm);
2884 tcg_temp_free_i64(tmp);
2885 break;
2886 }
2887 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2888 tcg_temp_free_i64(frn);
2889 tcg_temp_free_i64(frm);
2890 tcg_temp_free_i64(dest);
2891
2892 tcg_temp_free_i64(zf);
2893 tcg_temp_free_i64(nf);
2894 tcg_temp_free_i64(vf);
2895
2896 tcg_temp_free_i64(zero);
2897 } else {
2898 TCGv_i32 frn, frm, dest;
2899 TCGv_i32 tmp, zero;
2900
2901 zero = tcg_const_i32(0);
2902
2903 frn = tcg_temp_new_i32();
2904 frm = tcg_temp_new_i32();
2905 dest = tcg_temp_new_i32();
2906 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2907 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2908 switch (cc) {
2909 case 0: /* eq: Z */
2910 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2911 frn, frm);
2912 break;
2913 case 1: /* vs: V */
2914 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2915 frn, frm);
2916 break;
2917 case 2: /* ge: N == V -> N ^ V == 0 */
2918 tmp = tcg_temp_new_i32();
2919 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2920 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2921 frn, frm);
2922 tcg_temp_free_i32(tmp);
2923 break;
2924 case 3: /* gt: !Z && N == V */
2925 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2926 frn, frm);
2927 tmp = tcg_temp_new_i32();
2928 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2929 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2930 dest, frm);
2931 tcg_temp_free_i32(tmp);
2932 break;
2933 }
2934 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2935 tcg_temp_free_i32(frn);
2936 tcg_temp_free_i32(frm);
2937 tcg_temp_free_i32(dest);
2938
2939 tcg_temp_free_i32(zero);
2940 }
2941
2942 return 0;
2943}
2944
40cfacdd
WN
2945static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2946 uint32_t rm, uint32_t dp)
2947{
2948 uint32_t vmin = extract32(insn, 6, 1);
2949 TCGv_ptr fpst = get_fpstatus_ptr(0);
2950
2951 if (dp) {
2952 TCGv_i64 frn, frm, dest;
2953
2954 frn = tcg_temp_new_i64();
2955 frm = tcg_temp_new_i64();
2956 dest = tcg_temp_new_i64();
2957
2958 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2959 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2960 if (vmin) {
f71a2ae5 2961 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2962 } else {
f71a2ae5 2963 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2964 }
2965 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2966 tcg_temp_free_i64(frn);
2967 tcg_temp_free_i64(frm);
2968 tcg_temp_free_i64(dest);
2969 } else {
2970 TCGv_i32 frn, frm, dest;
2971
2972 frn = tcg_temp_new_i32();
2973 frm = tcg_temp_new_i32();
2974 dest = tcg_temp_new_i32();
2975
2976 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2977 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2978 if (vmin) {
f71a2ae5 2979 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2980 } else {
f71a2ae5 2981 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2982 }
2983 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2984 tcg_temp_free_i32(frn);
2985 tcg_temp_free_i32(frm);
2986 tcg_temp_free_i32(dest);
2987 }
2988
2989 tcg_temp_free_ptr(fpst);
2990 return 0;
2991}
2992
7655f39b
WN
2993static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2994 int rounding)
2995{
2996 TCGv_ptr fpst = get_fpstatus_ptr(0);
2997 TCGv_i32 tcg_rmode;
2998
2999 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3000 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3001
3002 if (dp) {
3003 TCGv_i64 tcg_op;
3004 TCGv_i64 tcg_res;
3005 tcg_op = tcg_temp_new_i64();
3006 tcg_res = tcg_temp_new_i64();
3007 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3008 gen_helper_rintd(tcg_res, tcg_op, fpst);
3009 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3010 tcg_temp_free_i64(tcg_op);
3011 tcg_temp_free_i64(tcg_res);
3012 } else {
3013 TCGv_i32 tcg_op;
3014 TCGv_i32 tcg_res;
3015 tcg_op = tcg_temp_new_i32();
3016 tcg_res = tcg_temp_new_i32();
3017 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3018 gen_helper_rints(tcg_res, tcg_op, fpst);
3019 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3020 tcg_temp_free_i32(tcg_op);
3021 tcg_temp_free_i32(tcg_res);
3022 }
3023
3024 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3025 tcg_temp_free_i32(tcg_rmode);
3026
3027 tcg_temp_free_ptr(fpst);
3028 return 0;
3029}
3030
c9975a83
WN
3031static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3032 int rounding)
3033{
3034 bool is_signed = extract32(insn, 7, 1);
3035 TCGv_ptr fpst = get_fpstatus_ptr(0);
3036 TCGv_i32 tcg_rmode, tcg_shift;
3037
3038 tcg_shift = tcg_const_i32(0);
3039
3040 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3041 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3042
3043 if (dp) {
3044 TCGv_i64 tcg_double, tcg_res;
3045 TCGv_i32 tcg_tmp;
3046 /* Rd is encoded as a single precision register even when the source
3047 * is double precision.
3048 */
3049 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3050 tcg_double = tcg_temp_new_i64();
3051 tcg_res = tcg_temp_new_i64();
3052 tcg_tmp = tcg_temp_new_i32();
3053 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3054 if (is_signed) {
3055 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3056 } else {
3057 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3058 }
ecc7b3aa 3059 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3060 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3061 tcg_temp_free_i32(tcg_tmp);
3062 tcg_temp_free_i64(tcg_res);
3063 tcg_temp_free_i64(tcg_double);
3064 } else {
3065 TCGv_i32 tcg_single, tcg_res;
3066 tcg_single = tcg_temp_new_i32();
3067 tcg_res = tcg_temp_new_i32();
3068 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3069 if (is_signed) {
3070 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3071 } else {
3072 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3073 }
3074 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3075 tcg_temp_free_i32(tcg_res);
3076 tcg_temp_free_i32(tcg_single);
3077 }
3078
3079 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3080 tcg_temp_free_i32(tcg_rmode);
3081
3082 tcg_temp_free_i32(tcg_shift);
3083
3084 tcg_temp_free_ptr(fpst);
3085
3086 return 0;
3087}
7655f39b
WN
3088
3089/* Table for converting the most common AArch32 encoding of
3090 * rounding mode to arm_fprounding order (which matches the
3091 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3092 */
3093static const uint8_t fp_decode_rm[] = {
3094 FPROUNDING_TIEAWAY,
3095 FPROUNDING_TIEEVEN,
3096 FPROUNDING_POSINF,
3097 FPROUNDING_NEGINF,
3098};
3099
7dcc1f89 3100static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3101{
3102 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3103
d614a513 3104 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3105 return 1;
3106 }
3107
3108 if (dp) {
3109 VFP_DREG_D(rd, insn);
3110 VFP_DREG_N(rn, insn);
3111 VFP_DREG_M(rm, insn);
3112 } else {
3113 rd = VFP_SREG_D(insn);
3114 rn = VFP_SREG_N(insn);
3115 rm = VFP_SREG_M(insn);
3116 }
3117
3118 if ((insn & 0x0f800e50) == 0x0e000a00) {
3119 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3120 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3121 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3122 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3123 /* VRINTA, VRINTN, VRINTP, VRINTM */
3124 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3125 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3126 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3127 /* VCVTA, VCVTN, VCVTP, VCVTM */
3128 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3129 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3130 }
3131 return 1;
3132}
3133
a1c7273b 3134/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3135 (ie. an undefined instruction). */
7dcc1f89 3136static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3137{
3138 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3139 int dp, veclen;
39d5492a
PM
3140 TCGv_i32 addr;
3141 TCGv_i32 tmp;
3142 TCGv_i32 tmp2;
b7bcbe95 3143
d614a513 3144 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3145 return 1;
d614a513 3146 }
40f137e1 3147
2c7ffc41
PM
3148 /* FIXME: this access check should not take precedence over UNDEF
3149 * for invalid encodings; we will generate incorrect syndrome information
3150 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3151 */
9dbbc748 3152 if (s->fp_excp_el) {
2c7ffc41 3153 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3154 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3155 return 0;
3156 }
3157
5df8bac1 3158 if (!s->vfp_enabled) {
9ee6e8bb 3159 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3160 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3161 return 1;
3162 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3163 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3164 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3165 return 1;
a50c0f51 3166 }
40f137e1 3167 }
6a57f3eb
WN
3168
3169 if (extract32(insn, 28, 4) == 0xf) {
3170 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3171 * only used in v8 and above.
3172 */
7dcc1f89 3173 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3174 }
3175
b7bcbe95
FB
3176 dp = ((insn & 0xf00) == 0xb00);
3177 switch ((insn >> 24) & 0xf) {
3178 case 0xe:
3179 if (insn & (1 << 4)) {
3180 /* single register transfer */
b7bcbe95
FB
3181 rd = (insn >> 12) & 0xf;
3182 if (dp) {
9ee6e8bb
PB
3183 int size;
3184 int pass;
3185
3186 VFP_DREG_N(rn, insn);
3187 if (insn & 0xf)
b7bcbe95 3188 return 1;
9ee6e8bb 3189 if (insn & 0x00c00060
d614a513 3190 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3191 return 1;
d614a513 3192 }
9ee6e8bb
PB
3193
3194 pass = (insn >> 21) & 1;
3195 if (insn & (1 << 22)) {
3196 size = 0;
3197 offset = ((insn >> 5) & 3) * 8;
3198 } else if (insn & (1 << 5)) {
3199 size = 1;
3200 offset = (insn & (1 << 6)) ? 16 : 0;
3201 } else {
3202 size = 2;
3203 offset = 0;
3204 }
18c9b560 3205 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3206 /* vfp->arm */
ad69471c 3207 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3208 switch (size) {
3209 case 0:
9ee6e8bb 3210 if (offset)
ad69471c 3211 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3212 if (insn & (1 << 23))
ad69471c 3213 gen_uxtb(tmp);
9ee6e8bb 3214 else
ad69471c 3215 gen_sxtb(tmp);
9ee6e8bb
PB
3216 break;
3217 case 1:
9ee6e8bb
PB
3218 if (insn & (1 << 23)) {
3219 if (offset) {
ad69471c 3220 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3221 } else {
ad69471c 3222 gen_uxth(tmp);
9ee6e8bb
PB
3223 }
3224 } else {
3225 if (offset) {
ad69471c 3226 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3227 } else {
ad69471c 3228 gen_sxth(tmp);
9ee6e8bb
PB
3229 }
3230 }
3231 break;
3232 case 2:
9ee6e8bb
PB
3233 break;
3234 }
ad69471c 3235 store_reg(s, rd, tmp);
b7bcbe95
FB
3236 } else {
3237 /* arm->vfp */
ad69471c 3238 tmp = load_reg(s, rd);
9ee6e8bb
PB
3239 if (insn & (1 << 23)) {
3240 /* VDUP */
3241 if (size == 0) {
ad69471c 3242 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3243 } else if (size == 1) {
ad69471c 3244 gen_neon_dup_low16(tmp);
9ee6e8bb 3245 }
cbbccffc 3246 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3247 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3248 tcg_gen_mov_i32(tmp2, tmp);
3249 neon_store_reg(rn, n, tmp2);
3250 }
3251 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3252 } else {
3253 /* VMOV */
3254 switch (size) {
3255 case 0:
ad69471c 3256 tmp2 = neon_load_reg(rn, pass);
d593c48e 3257 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3258 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3259 break;
3260 case 1:
ad69471c 3261 tmp2 = neon_load_reg(rn, pass);
d593c48e 3262 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3263 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3264 break;
3265 case 2:
9ee6e8bb
PB
3266 break;
3267 }
ad69471c 3268 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3269 }
b7bcbe95 3270 }
9ee6e8bb
PB
3271 } else { /* !dp */
3272 if ((insn & 0x6f) != 0x00)
3273 return 1;
3274 rn = VFP_SREG_N(insn);
18c9b560 3275 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3276 /* vfp->arm */
3277 if (insn & (1 << 21)) {
3278 /* system register */
40f137e1 3279 rn >>= 1;
9ee6e8bb 3280
b7bcbe95 3281 switch (rn) {
40f137e1 3282 case ARM_VFP_FPSID:
4373f3ce 3283 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3284 VFP3 restricts all id registers to privileged
3285 accesses. */
3286 if (IS_USER(s)
d614a513 3287 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3288 return 1;
d614a513 3289 }
4373f3ce 3290 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3291 break;
40f137e1 3292 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3293 if (IS_USER(s))
3294 return 1;
4373f3ce 3295 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3296 break;
40f137e1
PB
3297 case ARM_VFP_FPINST:
3298 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3299 /* Not present in VFP3. */
3300 if (IS_USER(s)
d614a513 3301 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3302 return 1;
d614a513 3303 }
4373f3ce 3304 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3305 break;
40f137e1 3306 case ARM_VFP_FPSCR:
601d70b9 3307 if (rd == 15) {
4373f3ce
PB
3308 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3309 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3310 } else {
7d1b0095 3311 tmp = tcg_temp_new_i32();
4373f3ce
PB
3312 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3313 }
b7bcbe95 3314 break;
a50c0f51 3315 case ARM_VFP_MVFR2:
d614a513 3316 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3317 return 1;
3318 }
3319 /* fall through */
9ee6e8bb
PB
3320 case ARM_VFP_MVFR0:
3321 case ARM_VFP_MVFR1:
3322 if (IS_USER(s)
d614a513 3323 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3324 return 1;
d614a513 3325 }
4373f3ce 3326 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3327 break;
b7bcbe95
FB
3328 default:
3329 return 1;
3330 }
3331 } else {
3332 gen_mov_F0_vreg(0, rn);
4373f3ce 3333 tmp = gen_vfp_mrs();
b7bcbe95
FB
3334 }
3335 if (rd == 15) {
b5ff1b31 3336 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3337 gen_set_nzcv(tmp);
7d1b0095 3338 tcg_temp_free_i32(tmp);
4373f3ce
PB
3339 } else {
3340 store_reg(s, rd, tmp);
3341 }
b7bcbe95
FB
3342 } else {
3343 /* arm->vfp */
b7bcbe95 3344 if (insn & (1 << 21)) {
40f137e1 3345 rn >>= 1;
b7bcbe95
FB
3346 /* system register */
3347 switch (rn) {
40f137e1 3348 case ARM_VFP_FPSID:
9ee6e8bb
PB
3349 case ARM_VFP_MVFR0:
3350 case ARM_VFP_MVFR1:
b7bcbe95
FB
3351 /* Writes are ignored. */
3352 break;
40f137e1 3353 case ARM_VFP_FPSCR:
e4c1cfa5 3354 tmp = load_reg(s, rd);
4373f3ce 3355 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3356 tcg_temp_free_i32(tmp);
b5ff1b31 3357 gen_lookup_tb(s);
b7bcbe95 3358 break;
40f137e1 3359 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3360 if (IS_USER(s))
3361 return 1;
71b3c3de
JR
3362 /* TODO: VFP subarchitecture support.
3363 * For now, keep the EN bit only */
e4c1cfa5 3364 tmp = load_reg(s, rd);
71b3c3de 3365 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3366 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3367 gen_lookup_tb(s);
3368 break;
3369 case ARM_VFP_FPINST:
3370 case ARM_VFP_FPINST2:
23adb861
PM
3371 if (IS_USER(s)) {
3372 return 1;
3373 }
e4c1cfa5 3374 tmp = load_reg(s, rd);
4373f3ce 3375 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3376 break;
b7bcbe95
FB
3377 default:
3378 return 1;
3379 }
3380 } else {
e4c1cfa5 3381 tmp = load_reg(s, rd);
4373f3ce 3382 gen_vfp_msr(tmp);
b7bcbe95
FB
3383 gen_mov_vreg_F0(0, rn);
3384 }
3385 }
3386 }
3387 } else {
3388 /* data processing */
3389 /* The opcode is in bits 23, 21, 20 and 6. */
3390 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3391 if (dp) {
3392 if (op == 15) {
3393 /* rn is opcode */
3394 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3395 } else {
3396 /* rn is register number */
9ee6e8bb 3397 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3398 }
3399
239c20c7
WN
3400 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3401 ((rn & 0x1e) == 0x6))) {
3402 /* Integer or single/half precision destination. */
9ee6e8bb 3403 rd = VFP_SREG_D(insn);
b7bcbe95 3404 } else {
9ee6e8bb 3405 VFP_DREG_D(rd, insn);
b7bcbe95 3406 }
04595bf6 3407 if (op == 15 &&
239c20c7
WN
3408 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3409 ((rn & 0x1e) == 0x4))) {
3410 /* VCVT from int or half precision is always from S reg
3411 * regardless of dp bit. VCVT with immediate frac_bits
3412 * has same format as SREG_M.
04595bf6
PM
3413 */
3414 rm = VFP_SREG_M(insn);
b7bcbe95 3415 } else {
9ee6e8bb 3416 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3417 }
3418 } else {
9ee6e8bb 3419 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3420 if (op == 15 && rn == 15) {
3421 /* Double precision destination. */
9ee6e8bb
PB
3422 VFP_DREG_D(rd, insn);
3423 } else {
3424 rd = VFP_SREG_D(insn);
3425 }
04595bf6
PM
3426 /* NB that we implicitly rely on the encoding for the frac_bits
3427 * in VCVT of fixed to float being the same as that of an SREG_M
3428 */
9ee6e8bb 3429 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3430 }
3431
69d1fc22 3432 veclen = s->vec_len;
b7bcbe95
FB
3433 if (op == 15 && rn > 3)
3434 veclen = 0;
3435
3436 /* Shut up compiler warnings. */
3437 delta_m = 0;
3438 delta_d = 0;
3439 bank_mask = 0;
3b46e624 3440
b7bcbe95
FB
3441 if (veclen > 0) {
3442 if (dp)
3443 bank_mask = 0xc;
3444 else
3445 bank_mask = 0x18;
3446
3447 /* Figure out what type of vector operation this is. */
3448 if ((rd & bank_mask) == 0) {
3449 /* scalar */
3450 veclen = 0;
3451 } else {
3452 if (dp)
69d1fc22 3453 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3454 else
69d1fc22 3455 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3456
3457 if ((rm & bank_mask) == 0) {
3458 /* mixed scalar/vector */
3459 delta_m = 0;
3460 } else {
3461 /* vector */
3462 delta_m = delta_d;
3463 }
3464 }
3465 }
3466
3467 /* Load the initial operands. */
3468 if (op == 15) {
3469 switch (rn) {
3470 case 16:
3471 case 17:
3472 /* Integer source */
3473 gen_mov_F0_vreg(0, rm);
3474 break;
3475 case 8:
3476 case 9:
3477 /* Compare */
3478 gen_mov_F0_vreg(dp, rd);
3479 gen_mov_F1_vreg(dp, rm);
3480 break;
3481 case 10:
3482 case 11:
3483 /* Compare with zero */
3484 gen_mov_F0_vreg(dp, rd);
3485 gen_vfp_F1_ld0(dp);
3486 break;
9ee6e8bb
PB
3487 case 20:
3488 case 21:
3489 case 22:
3490 case 23:
644ad806
PB
3491 case 28:
3492 case 29:
3493 case 30:
3494 case 31:
9ee6e8bb
PB
3495 /* Source and destination the same. */
3496 gen_mov_F0_vreg(dp, rd);
3497 break;
6e0c0ed1
PM
3498 case 4:
3499 case 5:
3500 case 6:
3501 case 7:
239c20c7
WN
3502 /* VCVTB, VCVTT: only present with the halfprec extension
3503 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3504 * (we choose to UNDEF)
6e0c0ed1 3505 */
d614a513
PM
3506 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3507 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3508 return 1;
3509 }
239c20c7
WN
3510 if (!extract32(rn, 1, 1)) {
3511 /* Half precision source. */
3512 gen_mov_F0_vreg(0, rm);
3513 break;
3514 }
6e0c0ed1 3515 /* Otherwise fall through */
b7bcbe95
FB
3516 default:
3517 /* One source operand. */
3518 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3519 break;
b7bcbe95
FB
3520 }
3521 } else {
3522 /* Two source operands. */
3523 gen_mov_F0_vreg(dp, rn);
3524 gen_mov_F1_vreg(dp, rm);
3525 }
3526
3527 for (;;) {
3528 /* Perform the calculation. */
3529 switch (op) {
605a6aed
PM
3530 case 0: /* VMLA: fd + (fn * fm) */
3531 /* Note that order of inputs to the add matters for NaNs */
3532 gen_vfp_F1_mul(dp);
3533 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3534 gen_vfp_add(dp);
3535 break;
605a6aed 3536 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3537 gen_vfp_mul(dp);
605a6aed
PM
3538 gen_vfp_F1_neg(dp);
3539 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3540 gen_vfp_add(dp);
3541 break;
605a6aed
PM
3542 case 2: /* VNMLS: -fd + (fn * fm) */
3543 /* Note that it isn't valid to replace (-A + B) with (B - A)
3544 * or similar plausible looking simplifications
3545 * because this will give wrong results for NaNs.
3546 */
3547 gen_vfp_F1_mul(dp);
3548 gen_mov_F0_vreg(dp, rd);
3549 gen_vfp_neg(dp);
3550 gen_vfp_add(dp);
b7bcbe95 3551 break;
605a6aed 3552 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3553 gen_vfp_mul(dp);
605a6aed
PM
3554 gen_vfp_F1_neg(dp);
3555 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3556 gen_vfp_neg(dp);
605a6aed 3557 gen_vfp_add(dp);
b7bcbe95
FB
3558 break;
3559 case 4: /* mul: fn * fm */
3560 gen_vfp_mul(dp);
3561 break;
3562 case 5: /* nmul: -(fn * fm) */
3563 gen_vfp_mul(dp);
3564 gen_vfp_neg(dp);
3565 break;
3566 case 6: /* add: fn + fm */
3567 gen_vfp_add(dp);
3568 break;
3569 case 7: /* sub: fn - fm */
3570 gen_vfp_sub(dp);
3571 break;
3572 case 8: /* div: fn / fm */
3573 gen_vfp_div(dp);
3574 break;
da97f52c
PM
3575 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3576 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3577 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3578 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3579 /* These are fused multiply-add, and must be done as one
3580 * floating point operation with no rounding between the
3581 * multiplication and addition steps.
3582 * NB that doing the negations here as separate steps is
3583 * correct : an input NaN should come out with its sign bit
3584 * flipped if it is a negated-input.
3585 */
d614a513 3586 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3587 return 1;
3588 }
3589 if (dp) {
3590 TCGv_ptr fpst;
3591 TCGv_i64 frd;
3592 if (op & 1) {
3593 /* VFNMS, VFMS */
3594 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3595 }
3596 frd = tcg_temp_new_i64();
3597 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3598 if (op & 2) {
3599 /* VFNMA, VFNMS */
3600 gen_helper_vfp_negd(frd, frd);
3601 }
3602 fpst = get_fpstatus_ptr(0);
3603 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3604 cpu_F1d, frd, fpst);
3605 tcg_temp_free_ptr(fpst);
3606 tcg_temp_free_i64(frd);
3607 } else {
3608 TCGv_ptr fpst;
3609 TCGv_i32 frd;
3610 if (op & 1) {
3611 /* VFNMS, VFMS */
3612 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3613 }
3614 frd = tcg_temp_new_i32();
3615 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3616 if (op & 2) {
3617 gen_helper_vfp_negs(frd, frd);
3618 }
3619 fpst = get_fpstatus_ptr(0);
3620 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3621 cpu_F1s, frd, fpst);
3622 tcg_temp_free_ptr(fpst);
3623 tcg_temp_free_i32(frd);
3624 }
3625 break;
9ee6e8bb 3626 case 14: /* fconst */
d614a513
PM
3627 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3628 return 1;
3629 }
9ee6e8bb
PB
3630
3631 n = (insn << 12) & 0x80000000;
3632 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3633 if (dp) {
3634 if (i & 0x40)
3635 i |= 0x3f80;
3636 else
3637 i |= 0x4000;
3638 n |= i << 16;
4373f3ce 3639 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3640 } else {
3641 if (i & 0x40)
3642 i |= 0x780;
3643 else
3644 i |= 0x800;
3645 n |= i << 19;
5b340b51 3646 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3647 }
9ee6e8bb 3648 break;
b7bcbe95
FB
3649 case 15: /* extension space */
3650 switch (rn) {
3651 case 0: /* cpy */
3652 /* no-op */
3653 break;
3654 case 1: /* abs */
3655 gen_vfp_abs(dp);
3656 break;
3657 case 2: /* neg */
3658 gen_vfp_neg(dp);
3659 break;
3660 case 3: /* sqrt */
3661 gen_vfp_sqrt(dp);
3662 break;
239c20c7 3663 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3664 tmp = gen_vfp_mrs();
3665 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3666 if (dp) {
3667 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3668 cpu_env);
3669 } else {
3670 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3671 cpu_env);
3672 }
7d1b0095 3673 tcg_temp_free_i32(tmp);
60011498 3674 break;
239c20c7 3675 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3676 tmp = gen_vfp_mrs();
3677 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3678 if (dp) {
3679 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3680 cpu_env);
3681 } else {
3682 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3683 cpu_env);
3684 }
7d1b0095 3685 tcg_temp_free_i32(tmp);
60011498 3686 break;
239c20c7 3687 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3688 tmp = tcg_temp_new_i32();
239c20c7
WN
3689 if (dp) {
3690 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3691 cpu_env);
3692 } else {
3693 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3694 cpu_env);
3695 }
60011498
PB
3696 gen_mov_F0_vreg(0, rd);
3697 tmp2 = gen_vfp_mrs();
3698 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3699 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3700 tcg_temp_free_i32(tmp2);
60011498
PB
3701 gen_vfp_msr(tmp);
3702 break;
239c20c7 3703 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3704 tmp = tcg_temp_new_i32();
239c20c7
WN
3705 if (dp) {
3706 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3707 cpu_env);
3708 } else {
3709 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3710 cpu_env);
3711 }
60011498
PB
3712 tcg_gen_shli_i32(tmp, tmp, 16);
3713 gen_mov_F0_vreg(0, rd);
3714 tmp2 = gen_vfp_mrs();
3715 tcg_gen_ext16u_i32(tmp2, tmp2);
3716 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3717 tcg_temp_free_i32(tmp2);
60011498
PB
3718 gen_vfp_msr(tmp);
3719 break;
b7bcbe95
FB
3720 case 8: /* cmp */
3721 gen_vfp_cmp(dp);
3722 break;
3723 case 9: /* cmpe */
3724 gen_vfp_cmpe(dp);
3725 break;
3726 case 10: /* cmpz */
3727 gen_vfp_cmp(dp);
3728 break;
3729 case 11: /* cmpez */
3730 gen_vfp_F1_ld0(dp);
3731 gen_vfp_cmpe(dp);
3732 break;
664c6733
WN
3733 case 12: /* vrintr */
3734 {
3735 TCGv_ptr fpst = get_fpstatus_ptr(0);
3736 if (dp) {
3737 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3738 } else {
3739 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3740 }
3741 tcg_temp_free_ptr(fpst);
3742 break;
3743 }
a290c62a
WN
3744 case 13: /* vrintz */
3745 {
3746 TCGv_ptr fpst = get_fpstatus_ptr(0);
3747 TCGv_i32 tcg_rmode;
3748 tcg_rmode = tcg_const_i32(float_round_to_zero);
3749 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3750 if (dp) {
3751 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3752 } else {
3753 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3754 }
3755 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3756 tcg_temp_free_i32(tcg_rmode);
3757 tcg_temp_free_ptr(fpst);
3758 break;
3759 }
4e82bc01
WN
3760 case 14: /* vrintx */
3761 {
3762 TCGv_ptr fpst = get_fpstatus_ptr(0);
3763 if (dp) {
3764 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3765 } else {
3766 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3767 }
3768 tcg_temp_free_ptr(fpst);
3769 break;
3770 }
b7bcbe95
FB
3771 case 15: /* single<->double conversion */
3772 if (dp)
4373f3ce 3773 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3774 else
4373f3ce 3775 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3776 break;
3777 case 16: /* fuito */
5500b06c 3778 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3779 break;
3780 case 17: /* fsito */
5500b06c 3781 gen_vfp_sito(dp, 0);
b7bcbe95 3782 break;
9ee6e8bb 3783 case 20: /* fshto */
d614a513
PM
3784 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3785 return 1;
3786 }
5500b06c 3787 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3788 break;
3789 case 21: /* fslto */
d614a513
PM
3790 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3791 return 1;
3792 }
5500b06c 3793 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3794 break;
3795 case 22: /* fuhto */
d614a513
PM
3796 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3797 return 1;
3798 }
5500b06c 3799 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3800 break;
3801 case 23: /* fulto */
d614a513
PM
3802 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3803 return 1;
3804 }
5500b06c 3805 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3806 break;
b7bcbe95 3807 case 24: /* ftoui */
5500b06c 3808 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3809 break;
3810 case 25: /* ftouiz */
5500b06c 3811 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3812 break;
3813 case 26: /* ftosi */
5500b06c 3814 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3815 break;
3816 case 27: /* ftosiz */
5500b06c 3817 gen_vfp_tosiz(dp, 0);
b7bcbe95 3818 break;
9ee6e8bb 3819 case 28: /* ftosh */
d614a513
PM
3820 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3821 return 1;
3822 }
5500b06c 3823 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3824 break;
3825 case 29: /* ftosl */
d614a513
PM
3826 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3827 return 1;
3828 }
5500b06c 3829 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3830 break;
3831 case 30: /* ftouh */
d614a513
PM
3832 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3833 return 1;
3834 }
5500b06c 3835 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3836 break;
3837 case 31: /* ftoul */
d614a513
PM
3838 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3839 return 1;
3840 }
5500b06c 3841 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3842 break;
b7bcbe95 3843 default: /* undefined */
b7bcbe95
FB
3844 return 1;
3845 }
3846 break;
3847 default: /* undefined */
b7bcbe95
FB
3848 return 1;
3849 }
3850
3851 /* Write back the result. */
239c20c7
WN
3852 if (op == 15 && (rn >= 8 && rn <= 11)) {
3853 /* Comparison, do nothing. */
3854 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3855 (rn & 0x1e) == 0x6)) {
3856 /* VCVT double to int: always integer result.
3857 * VCVT double to half precision is always a single
3858 * precision result.
3859 */
b7bcbe95 3860 gen_mov_vreg_F0(0, rd);
239c20c7 3861 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3862 /* conversion */
3863 gen_mov_vreg_F0(!dp, rd);
239c20c7 3864 } else {
b7bcbe95 3865 gen_mov_vreg_F0(dp, rd);
239c20c7 3866 }
b7bcbe95
FB
3867
3868 /* break out of the loop if we have finished */
3869 if (veclen == 0)
3870 break;
3871
3872 if (op == 15 && delta_m == 0) {
3873 /* single source one-many */
3874 while (veclen--) {
3875 rd = ((rd + delta_d) & (bank_mask - 1))
3876 | (rd & bank_mask);
3877 gen_mov_vreg_F0(dp, rd);
3878 }
3879 break;
3880 }
3881 /* Setup the next operands. */
3882 veclen--;
3883 rd = ((rd + delta_d) & (bank_mask - 1))
3884 | (rd & bank_mask);
3885
3886 if (op == 15) {
3887 /* One source operand. */
3888 rm = ((rm + delta_m) & (bank_mask - 1))
3889 | (rm & bank_mask);
3890 gen_mov_F0_vreg(dp, rm);
3891 } else {
3892 /* Two source operands. */
3893 rn = ((rn + delta_d) & (bank_mask - 1))
3894 | (rn & bank_mask);
3895 gen_mov_F0_vreg(dp, rn);
3896 if (delta_m) {
3897 rm = ((rm + delta_m) & (bank_mask - 1))
3898 | (rm & bank_mask);
3899 gen_mov_F1_vreg(dp, rm);
3900 }
3901 }
3902 }
3903 }
3904 break;
3905 case 0xc:
3906 case 0xd:
8387da81 3907 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3908 /* two-register transfer */
3909 rn = (insn >> 16) & 0xf;
3910 rd = (insn >> 12) & 0xf;
3911 if (dp) {
9ee6e8bb
PB
3912 VFP_DREG_M(rm, insn);
3913 } else {
3914 rm = VFP_SREG_M(insn);
3915 }
b7bcbe95 3916
18c9b560 3917 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3918 /* vfp->arm */
3919 if (dp) {
4373f3ce
PB
3920 gen_mov_F0_vreg(0, rm * 2);
3921 tmp = gen_vfp_mrs();
3922 store_reg(s, rd, tmp);
3923 gen_mov_F0_vreg(0, rm * 2 + 1);
3924 tmp = gen_vfp_mrs();
3925 store_reg(s, rn, tmp);
b7bcbe95
FB
3926 } else {
3927 gen_mov_F0_vreg(0, rm);
4373f3ce 3928 tmp = gen_vfp_mrs();
8387da81 3929 store_reg(s, rd, tmp);
b7bcbe95 3930 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3931 tmp = gen_vfp_mrs();
8387da81 3932 store_reg(s, rn, tmp);
b7bcbe95
FB
3933 }
3934 } else {
3935 /* arm->vfp */
3936 if (dp) {
4373f3ce
PB
3937 tmp = load_reg(s, rd);
3938 gen_vfp_msr(tmp);
3939 gen_mov_vreg_F0(0, rm * 2);
3940 tmp = load_reg(s, rn);
3941 gen_vfp_msr(tmp);
3942 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3943 } else {
8387da81 3944 tmp = load_reg(s, rd);
4373f3ce 3945 gen_vfp_msr(tmp);
b7bcbe95 3946 gen_mov_vreg_F0(0, rm);
8387da81 3947 tmp = load_reg(s, rn);
4373f3ce 3948 gen_vfp_msr(tmp);
b7bcbe95
FB
3949 gen_mov_vreg_F0(0, rm + 1);
3950 }
3951 }
3952 } else {
3953 /* Load/store */
3954 rn = (insn >> 16) & 0xf;
3955 if (dp)
9ee6e8bb 3956 VFP_DREG_D(rd, insn);
b7bcbe95 3957 else
9ee6e8bb 3958 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3959 if ((insn & 0x01200000) == 0x01000000) {
3960 /* Single load/store */
3961 offset = (insn & 0xff) << 2;
3962 if ((insn & (1 << 23)) == 0)
3963 offset = -offset;
934814f1
PM
3964 if (s->thumb && rn == 15) {
3965 /* This is actually UNPREDICTABLE */
3966 addr = tcg_temp_new_i32();
3967 tcg_gen_movi_i32(addr, s->pc & ~2);
3968 } else {
3969 addr = load_reg(s, rn);
3970 }
312eea9f 3971 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3972 if (insn & (1 << 20)) {
312eea9f 3973 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3974 gen_mov_vreg_F0(dp, rd);
3975 } else {
3976 gen_mov_F0_vreg(dp, rd);
312eea9f 3977 gen_vfp_st(s, dp, addr);
b7bcbe95 3978 }
7d1b0095 3979 tcg_temp_free_i32(addr);
b7bcbe95
FB
3980 } else {
3981 /* load/store multiple */
934814f1 3982 int w = insn & (1 << 21);
b7bcbe95
FB
3983 if (dp)
3984 n = (insn >> 1) & 0x7f;
3985 else
3986 n = insn & 0xff;
3987
934814f1
PM
3988 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3989 /* P == U , W == 1 => UNDEF */
3990 return 1;
3991 }
3992 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3993 /* UNPREDICTABLE cases for bad immediates: we choose to
3994 * UNDEF to avoid generating huge numbers of TCG ops
3995 */
3996 return 1;
3997 }
3998 if (rn == 15 && w) {
3999 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4000 return 1;
4001 }
4002
4003 if (s->thumb && rn == 15) {
4004 /* This is actually UNPREDICTABLE */
4005 addr = tcg_temp_new_i32();
4006 tcg_gen_movi_i32(addr, s->pc & ~2);
4007 } else {
4008 addr = load_reg(s, rn);
4009 }
b7bcbe95 4010 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4011 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4012
4013 if (dp)
4014 offset = 8;
4015 else
4016 offset = 4;
4017 for (i = 0; i < n; i++) {
18c9b560 4018 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4019 /* load */
312eea9f 4020 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4021 gen_mov_vreg_F0(dp, rd + i);
4022 } else {
4023 /* store */
4024 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4025 gen_vfp_st(s, dp, addr);
b7bcbe95 4026 }
312eea9f 4027 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4028 }
934814f1 4029 if (w) {
b7bcbe95
FB
4030 /* writeback */
4031 if (insn & (1 << 24))
4032 offset = -offset * n;
4033 else if (dp && (insn & 1))
4034 offset = 4;
4035 else
4036 offset = 0;
4037
4038 if (offset != 0)
312eea9f
FN
4039 tcg_gen_addi_i32(addr, addr, offset);
4040 store_reg(s, rn, addr);
4041 } else {
7d1b0095 4042 tcg_temp_free_i32(addr);
b7bcbe95
FB
4043 }
4044 }
4045 }
4046 break;
4047 default:
4048 /* Should never happen. */
4049 return 1;
4050 }
4051 return 0;
4052}
4053
90aa39a1 4054static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4055{
90aa39a1
SF
4056#ifndef CONFIG_USER_ONLY
4057 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4058 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4059#else
4060 return true;
4061#endif
4062}
6e256c93 4063
90aa39a1
SF
4064static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4065{
4066 if (use_goto_tb(s, dest)) {
57fec1fe 4067 tcg_gen_goto_tb(n);
eaed129d 4068 gen_set_pc_im(s, dest);
90aa39a1 4069 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4070 } else {
eaed129d 4071 gen_set_pc_im(s, dest);
57fec1fe 4072 tcg_gen_exit_tb(0);
6e256c93 4073 }
c53be334
FB
4074}
4075
8aaca4c0
FB
4076static inline void gen_jmp (DisasContext *s, uint32_t dest)
4077{
50225ad0 4078 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 4079 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4080 if (s->thumb)
d9ba4830
PB
4081 dest |= 1;
4082 gen_bx_im(s, dest);
8aaca4c0 4083 } else {
6e256c93 4084 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4085 s->is_jmp = DISAS_TB_JUMP;
4086 }
4087}
4088
39d5492a 4089static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4090{
ee097184 4091 if (x)
d9ba4830 4092 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4093 else
d9ba4830 4094 gen_sxth(t0);
ee097184 4095 if (y)
d9ba4830 4096 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4097 else
d9ba4830
PB
4098 gen_sxth(t1);
4099 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4100}
4101
4102/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4103static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4104{
b5ff1b31
FB
4105 uint32_t mask;
4106
4107 mask = 0;
4108 if (flags & (1 << 0))
4109 mask |= 0xff;
4110 if (flags & (1 << 1))
4111 mask |= 0xff00;
4112 if (flags & (1 << 2))
4113 mask |= 0xff0000;
4114 if (flags & (1 << 3))
4115 mask |= 0xff000000;
9ee6e8bb 4116
2ae23e75 4117 /* Mask out undefined bits. */
9ee6e8bb 4118 mask &= ~CPSR_RESERVED;
d614a513 4119 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4120 mask &= ~CPSR_T;
d614a513
PM
4121 }
4122 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4123 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4124 }
4125 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4126 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4127 }
4128 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4129 mask &= ~CPSR_IT;
d614a513 4130 }
4051e12c
PM
4131 /* Mask out execution state and reserved bits. */
4132 if (!spsr) {
4133 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4134 }
b5ff1b31
FB
4135 /* Mask out privileged bits. */
4136 if (IS_USER(s))
9ee6e8bb 4137 mask &= CPSR_USER;
b5ff1b31
FB
4138 return mask;
4139}
4140
2fbac54b 4141/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4142static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4143{
39d5492a 4144 TCGv_i32 tmp;
b5ff1b31
FB
4145 if (spsr) {
4146 /* ??? This is also undefined in system mode. */
4147 if (IS_USER(s))
4148 return 1;
d9ba4830
PB
4149
4150 tmp = load_cpu_field(spsr);
4151 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4152 tcg_gen_andi_i32(t0, t0, mask);
4153 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4154 store_cpu_field(tmp, spsr);
b5ff1b31 4155 } else {
2fbac54b 4156 gen_set_cpsr(t0, mask);
b5ff1b31 4157 }
7d1b0095 4158 tcg_temp_free_i32(t0);
b5ff1b31
FB
4159 gen_lookup_tb(s);
4160 return 0;
4161}
4162
2fbac54b
FN
4163/* Returns nonzero if access to the PSR is not permitted. */
4164static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4165{
39d5492a 4166 TCGv_i32 tmp;
7d1b0095 4167 tmp = tcg_temp_new_i32();
2fbac54b
FN
4168 tcg_gen_movi_i32(tmp, val);
4169 return gen_set_psr(s, mask, spsr, tmp);
4170}
4171
8bfd0550
PM
4172static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4173 int *tgtmode, int *regno)
4174{
4175 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4176 * the target mode and register number, and identify the various
4177 * unpredictable cases.
4178 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4179 * + executed in user mode
4180 * + using R15 as the src/dest register
4181 * + accessing an unimplemented register
4182 * + accessing a register that's inaccessible at current PL/security state*
4183 * + accessing a register that you could access with a different insn
4184 * We choose to UNDEF in all these cases.
4185 * Since we don't know which of the various AArch32 modes we are in
4186 * we have to defer some checks to runtime.
4187 * Accesses to Monitor mode registers from Secure EL1 (which implies
4188 * that EL3 is AArch64) must trap to EL3.
4189 *
4190 * If the access checks fail this function will emit code to take
4191 * an exception and return false. Otherwise it will return true,
4192 * and set *tgtmode and *regno appropriately.
4193 */
4194 int exc_target = default_exception_el(s);
4195
4196 /* These instructions are present only in ARMv8, or in ARMv7 with the
4197 * Virtualization Extensions.
4198 */
4199 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4200 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4201 goto undef;
4202 }
4203
4204 if (IS_USER(s) || rn == 15) {
4205 goto undef;
4206 }
4207
4208 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4209 * of registers into (r, sysm).
4210 */
4211 if (r) {
4212 /* SPSRs for other modes */
4213 switch (sysm) {
4214 case 0xe: /* SPSR_fiq */
4215 *tgtmode = ARM_CPU_MODE_FIQ;
4216 break;
4217 case 0x10: /* SPSR_irq */
4218 *tgtmode = ARM_CPU_MODE_IRQ;
4219 break;
4220 case 0x12: /* SPSR_svc */
4221 *tgtmode = ARM_CPU_MODE_SVC;
4222 break;
4223 case 0x14: /* SPSR_abt */
4224 *tgtmode = ARM_CPU_MODE_ABT;
4225 break;
4226 case 0x16: /* SPSR_und */
4227 *tgtmode = ARM_CPU_MODE_UND;
4228 break;
4229 case 0x1c: /* SPSR_mon */
4230 *tgtmode = ARM_CPU_MODE_MON;
4231 break;
4232 case 0x1e: /* SPSR_hyp */
4233 *tgtmode = ARM_CPU_MODE_HYP;
4234 break;
4235 default: /* unallocated */
4236 goto undef;
4237 }
4238 /* We arbitrarily assign SPSR a register number of 16. */
4239 *regno = 16;
4240 } else {
4241 /* general purpose registers for other modes */
4242 switch (sysm) {
4243 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4244 *tgtmode = ARM_CPU_MODE_USR;
4245 *regno = sysm + 8;
4246 break;
4247 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4248 *tgtmode = ARM_CPU_MODE_FIQ;
4249 *regno = sysm;
4250 break;
4251 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4252 *tgtmode = ARM_CPU_MODE_IRQ;
4253 *regno = sysm & 1 ? 13 : 14;
4254 break;
4255 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4256 *tgtmode = ARM_CPU_MODE_SVC;
4257 *regno = sysm & 1 ? 13 : 14;
4258 break;
4259 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4260 *tgtmode = ARM_CPU_MODE_ABT;
4261 *regno = sysm & 1 ? 13 : 14;
4262 break;
4263 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4264 *tgtmode = ARM_CPU_MODE_UND;
4265 *regno = sysm & 1 ? 13 : 14;
4266 break;
4267 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4268 *tgtmode = ARM_CPU_MODE_MON;
4269 *regno = sysm & 1 ? 13 : 14;
4270 break;
4271 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4272 *tgtmode = ARM_CPU_MODE_HYP;
4273 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4274 *regno = sysm & 1 ? 13 : 17;
4275 break;
4276 default: /* unallocated */
4277 goto undef;
4278 }
4279 }
4280
4281 /* Catch the 'accessing inaccessible register' cases we can detect
4282 * at translate time.
4283 */
4284 switch (*tgtmode) {
4285 case ARM_CPU_MODE_MON:
4286 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4287 goto undef;
4288 }
4289 if (s->current_el == 1) {
4290 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4291 * then accesses to Mon registers trap to EL3
4292 */
4293 exc_target = 3;
4294 goto undef;
4295 }
4296 break;
4297 case ARM_CPU_MODE_HYP:
4298 /* Note that we can forbid accesses from EL2 here because they
4299 * must be from Hyp mode itself
4300 */
4301 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4302 goto undef;
4303 }
4304 break;
4305 default:
4306 break;
4307 }
4308
4309 return true;
4310
4311undef:
4312 /* If we get here then some access check did not pass */
4313 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4314 return false;
4315}
4316
4317static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4318{
4319 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4320 int tgtmode = 0, regno = 0;
4321
4322 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4323 return;
4324 }
4325
4326 /* Sync state because msr_banked() can raise exceptions */
4327 gen_set_condexec(s);
4328 gen_set_pc_im(s, s->pc - 4);
4329 tcg_reg = load_reg(s, rn);
4330 tcg_tgtmode = tcg_const_i32(tgtmode);
4331 tcg_regno = tcg_const_i32(regno);
4332 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4333 tcg_temp_free_i32(tcg_tgtmode);
4334 tcg_temp_free_i32(tcg_regno);
4335 tcg_temp_free_i32(tcg_reg);
4336 s->is_jmp = DISAS_UPDATE;
4337}
4338
4339static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4340{
4341 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4342 int tgtmode = 0, regno = 0;
4343
4344 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4345 return;
4346 }
4347
4348 /* Sync state because mrs_banked() can raise exceptions */
4349 gen_set_condexec(s);
4350 gen_set_pc_im(s, s->pc - 4);
4351 tcg_reg = tcg_temp_new_i32();
4352 tcg_tgtmode = tcg_const_i32(tgtmode);
4353 tcg_regno = tcg_const_i32(regno);
4354 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4355 tcg_temp_free_i32(tcg_tgtmode);
4356 tcg_temp_free_i32(tcg_regno);
4357 store_reg(s, rn, tcg_reg);
4358 s->is_jmp = DISAS_UPDATE;
4359}
4360
e9bb4aa9 4361/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4362static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4363{
39d5492a 4364 TCGv_i32 tmp;
e9bb4aa9 4365 store_reg(s, 15, pc);
d9ba4830 4366 tmp = load_cpu_field(spsr);
235ea1f5 4367 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 4368 tcg_temp_free_i32(tmp);
577bf808 4369 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
4370}
4371
b0109805 4372/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4373static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4374{
235ea1f5 4375 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4376 tcg_temp_free_i32(cpsr);
b0109805 4377 store_reg(s, 15, pc);
577bf808 4378 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4379}
3b46e624 4380
9ee6e8bb
PB
4381static void gen_nop_hint(DisasContext *s, int val)
4382{
4383 switch (val) {
c87e5a61
PM
4384 case 1: /* yield */
4385 gen_set_pc_im(s, s->pc);
4386 s->is_jmp = DISAS_YIELD;
4387 break;
9ee6e8bb 4388 case 3: /* wfi */
eaed129d 4389 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4390 s->is_jmp = DISAS_WFI;
4391 break;
4392 case 2: /* wfe */
72c1d3af
PM
4393 gen_set_pc_im(s, s->pc);
4394 s->is_jmp = DISAS_WFE;
4395 break;
9ee6e8bb 4396 case 4: /* sev */
12b10571
MR
4397 case 5: /* sevl */
4398 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4399 default: /* nop */
4400 break;
4401 }
4402}
99c475ab 4403
ad69471c 4404#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4405
39d5492a 4406static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4407{
4408 switch (size) {
dd8fbd78
FN
4409 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4410 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4411 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4412 default: abort();
9ee6e8bb 4413 }
9ee6e8bb
PB
4414}
4415
39d5492a 4416static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4417{
4418 switch (size) {
dd8fbd78
FN
4419 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4420 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4421 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4422 default: return;
4423 }
4424}
4425
4426/* 32-bit pairwise ops end up the same as the elementwise versions. */
4427#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4428#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4429#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4430#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4431
ad69471c
PB
4432#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4433 switch ((size << 1) | u) { \
4434 case 0: \
dd8fbd78 4435 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4436 break; \
4437 case 1: \
dd8fbd78 4438 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4439 break; \
4440 case 2: \
dd8fbd78 4441 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4442 break; \
4443 case 3: \
dd8fbd78 4444 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4445 break; \
4446 case 4: \
dd8fbd78 4447 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4448 break; \
4449 case 5: \
dd8fbd78 4450 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4451 break; \
4452 default: return 1; \
4453 }} while (0)
9ee6e8bb
PB
4454
4455#define GEN_NEON_INTEGER_OP(name) do { \
4456 switch ((size << 1) | u) { \
ad69471c 4457 case 0: \
dd8fbd78 4458 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4459 break; \
4460 case 1: \
dd8fbd78 4461 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4462 break; \
4463 case 2: \
dd8fbd78 4464 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4465 break; \
4466 case 3: \
dd8fbd78 4467 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4468 break; \
4469 case 4: \
dd8fbd78 4470 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4471 break; \
4472 case 5: \
dd8fbd78 4473 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4474 break; \
9ee6e8bb
PB
4475 default: return 1; \
4476 }} while (0)
4477
39d5492a 4478static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4479{
39d5492a 4480 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4481 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4482 return tmp;
9ee6e8bb
PB
4483}
4484
39d5492a 4485static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4486{
dd8fbd78 4487 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4488 tcg_temp_free_i32(var);
9ee6e8bb
PB
4489}
4490
39d5492a 4491static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4492{
39d5492a 4493 TCGv_i32 tmp;
9ee6e8bb 4494 if (size == 1) {
0fad6efc
PM
4495 tmp = neon_load_reg(reg & 7, reg >> 4);
4496 if (reg & 8) {
dd8fbd78 4497 gen_neon_dup_high16(tmp);
0fad6efc
PM
4498 } else {
4499 gen_neon_dup_low16(tmp);
dd8fbd78 4500 }
0fad6efc
PM
4501 } else {
4502 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4503 }
dd8fbd78 4504 return tmp;
9ee6e8bb
PB
4505}
4506
02acedf9 4507static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4508{
39d5492a 4509 TCGv_i32 tmp, tmp2;
600b828c 4510 if (!q && size == 2) {
02acedf9
PM
4511 return 1;
4512 }
4513 tmp = tcg_const_i32(rd);
4514 tmp2 = tcg_const_i32(rm);
4515 if (q) {
4516 switch (size) {
4517 case 0:
02da0b2d 4518 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4519 break;
4520 case 1:
02da0b2d 4521 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4522 break;
4523 case 2:
02da0b2d 4524 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4525 break;
4526 default:
4527 abort();
4528 }
4529 } else {
4530 switch (size) {
4531 case 0:
02da0b2d 4532 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4533 break;
4534 case 1:
02da0b2d 4535 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4536 break;
4537 default:
4538 abort();
4539 }
4540 }
4541 tcg_temp_free_i32(tmp);
4542 tcg_temp_free_i32(tmp2);
4543 return 0;
19457615
FN
4544}
4545
d68a6f3a 4546static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4547{
39d5492a 4548 TCGv_i32 tmp, tmp2;
600b828c 4549 if (!q && size == 2) {
d68a6f3a
PM
4550 return 1;
4551 }
4552 tmp = tcg_const_i32(rd);
4553 tmp2 = tcg_const_i32(rm);
4554 if (q) {
4555 switch (size) {
4556 case 0:
02da0b2d 4557 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4558 break;
4559 case 1:
02da0b2d 4560 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4561 break;
4562 case 2:
02da0b2d 4563 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4564 break;
4565 default:
4566 abort();
4567 }
4568 } else {
4569 switch (size) {
4570 case 0:
02da0b2d 4571 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4572 break;
4573 case 1:
02da0b2d 4574 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4575 break;
4576 default:
4577 abort();
4578 }
4579 }
4580 tcg_temp_free_i32(tmp);
4581 tcg_temp_free_i32(tmp2);
4582 return 0;
19457615
FN
4583}
4584
39d5492a 4585static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4586{
39d5492a 4587 TCGv_i32 rd, tmp;
19457615 4588
7d1b0095
PM
4589 rd = tcg_temp_new_i32();
4590 tmp = tcg_temp_new_i32();
19457615
FN
4591
4592 tcg_gen_shli_i32(rd, t0, 8);
4593 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4594 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4595 tcg_gen_or_i32(rd, rd, tmp);
4596
4597 tcg_gen_shri_i32(t1, t1, 8);
4598 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4599 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4600 tcg_gen_or_i32(t1, t1, tmp);
4601 tcg_gen_mov_i32(t0, rd);
4602
7d1b0095
PM
4603 tcg_temp_free_i32(tmp);
4604 tcg_temp_free_i32(rd);
19457615
FN
4605}
4606
39d5492a 4607static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4608{
39d5492a 4609 TCGv_i32 rd, tmp;
19457615 4610
7d1b0095
PM
4611 rd = tcg_temp_new_i32();
4612 tmp = tcg_temp_new_i32();
19457615
FN
4613
4614 tcg_gen_shli_i32(rd, t0, 16);
4615 tcg_gen_andi_i32(tmp, t1, 0xffff);
4616 tcg_gen_or_i32(rd, rd, tmp);
4617 tcg_gen_shri_i32(t1, t1, 16);
4618 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4619 tcg_gen_or_i32(t1, t1, tmp);
4620 tcg_gen_mov_i32(t0, rd);
4621
7d1b0095
PM
4622 tcg_temp_free_i32(tmp);
4623 tcg_temp_free_i32(rd);
19457615
FN
4624}
4625
4626
9ee6e8bb
PB
4627static struct {
4628 int nregs;
4629 int interleave;
4630 int spacing;
4631} neon_ls_element_type[11] = {
4632 {4, 4, 1},
4633 {4, 4, 2},
4634 {4, 1, 1},
4635 {4, 2, 1},
4636 {3, 3, 1},
4637 {3, 3, 2},
4638 {3, 1, 1},
4639 {1, 1, 1},
4640 {2, 2, 1},
4641 {2, 2, 2},
4642 {2, 1, 1}
4643};
4644
4645/* Translate a NEON load/store element instruction. Return nonzero if the
4646 instruction is invalid. */
7dcc1f89 4647static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4648{
4649 int rd, rn, rm;
4650 int op;
4651 int nregs;
4652 int interleave;
84496233 4653 int spacing;
9ee6e8bb
PB
4654 int stride;
4655 int size;
4656 int reg;
4657 int pass;
4658 int load;
4659 int shift;
9ee6e8bb 4660 int n;
39d5492a
PM
4661 TCGv_i32 addr;
4662 TCGv_i32 tmp;
4663 TCGv_i32 tmp2;
84496233 4664 TCGv_i64 tmp64;
9ee6e8bb 4665
2c7ffc41
PM
4666 /* FIXME: this access check should not take precedence over UNDEF
4667 * for invalid encodings; we will generate incorrect syndrome information
4668 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4669 */
9dbbc748 4670 if (s->fp_excp_el) {
2c7ffc41 4671 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4672 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4673 return 0;
4674 }
4675
5df8bac1 4676 if (!s->vfp_enabled)
9ee6e8bb
PB
4677 return 1;
4678 VFP_DREG_D(rd, insn);
4679 rn = (insn >> 16) & 0xf;
4680 rm = insn & 0xf;
4681 load = (insn & (1 << 21)) != 0;
4682 if ((insn & (1 << 23)) == 0) {
4683 /* Load store all elements. */
4684 op = (insn >> 8) & 0xf;
4685 size = (insn >> 6) & 3;
84496233 4686 if (op > 10)
9ee6e8bb 4687 return 1;
f2dd89d0
PM
4688 /* Catch UNDEF cases for bad values of align field */
4689 switch (op & 0xc) {
4690 case 4:
4691 if (((insn >> 5) & 1) == 1) {
4692 return 1;
4693 }
4694 break;
4695 case 8:
4696 if (((insn >> 4) & 3) == 3) {
4697 return 1;
4698 }
4699 break;
4700 default:
4701 break;
4702 }
9ee6e8bb
PB
4703 nregs = neon_ls_element_type[op].nregs;
4704 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4705 spacing = neon_ls_element_type[op].spacing;
4706 if (size == 3 && (interleave | spacing) != 1)
4707 return 1;
e318a60b 4708 addr = tcg_temp_new_i32();
dcc65026 4709 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4710 stride = (1 << size) * interleave;
4711 for (reg = 0; reg < nregs; reg++) {
4712 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4713 load_reg_var(s, addr, rn);
4714 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4715 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4716 load_reg_var(s, addr, rn);
4717 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4718 }
84496233 4719 if (size == 3) {
8ed1237d 4720 tmp64 = tcg_temp_new_i64();
84496233 4721 if (load) {
12dcc321 4722 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4723 neon_store_reg64(tmp64, rd);
84496233 4724 } else {
84496233 4725 neon_load_reg64(tmp64, rd);
12dcc321 4726 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4727 }
8ed1237d 4728 tcg_temp_free_i64(tmp64);
84496233
JR
4729 tcg_gen_addi_i32(addr, addr, stride);
4730 } else {
4731 for (pass = 0; pass < 2; pass++) {
4732 if (size == 2) {
4733 if (load) {
58ab8e96 4734 tmp = tcg_temp_new_i32();
12dcc321 4735 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4736 neon_store_reg(rd, pass, tmp);
4737 } else {
4738 tmp = neon_load_reg(rd, pass);
12dcc321 4739 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4740 tcg_temp_free_i32(tmp);
84496233 4741 }
1b2b1e54 4742 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4743 } else if (size == 1) {
4744 if (load) {
58ab8e96 4745 tmp = tcg_temp_new_i32();
12dcc321 4746 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4747 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4748 tmp2 = tcg_temp_new_i32();
12dcc321 4749 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4750 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4751 tcg_gen_shli_i32(tmp2, tmp2, 16);
4752 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4753 tcg_temp_free_i32(tmp2);
84496233
JR
4754 neon_store_reg(rd, pass, tmp);
4755 } else {
4756 tmp = neon_load_reg(rd, pass);
7d1b0095 4757 tmp2 = tcg_temp_new_i32();
84496233 4758 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4759 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4760 tcg_temp_free_i32(tmp);
84496233 4761 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4762 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4763 tcg_temp_free_i32(tmp2);
1b2b1e54 4764 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4765 }
84496233
JR
4766 } else /* size == 0 */ {
4767 if (load) {
39d5492a 4768 TCGV_UNUSED_I32(tmp2);
84496233 4769 for (n = 0; n < 4; n++) {
58ab8e96 4770 tmp = tcg_temp_new_i32();
12dcc321 4771 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4772 tcg_gen_addi_i32(addr, addr, stride);
4773 if (n == 0) {
4774 tmp2 = tmp;
4775 } else {
41ba8341
PB
4776 tcg_gen_shli_i32(tmp, tmp, n * 8);
4777 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4778 tcg_temp_free_i32(tmp);
84496233 4779 }
9ee6e8bb 4780 }
84496233
JR
4781 neon_store_reg(rd, pass, tmp2);
4782 } else {
4783 tmp2 = neon_load_reg(rd, pass);
4784 for (n = 0; n < 4; n++) {
7d1b0095 4785 tmp = tcg_temp_new_i32();
84496233
JR
4786 if (n == 0) {
4787 tcg_gen_mov_i32(tmp, tmp2);
4788 } else {
4789 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4790 }
12dcc321 4791 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4792 tcg_temp_free_i32(tmp);
84496233
JR
4793 tcg_gen_addi_i32(addr, addr, stride);
4794 }
7d1b0095 4795 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4796 }
4797 }
4798 }
4799 }
84496233 4800 rd += spacing;
9ee6e8bb 4801 }
e318a60b 4802 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4803 stride = nregs * 8;
4804 } else {
4805 size = (insn >> 10) & 3;
4806 if (size == 3) {
4807 /* Load single element to all lanes. */
8e18cde3
PM
4808 int a = (insn >> 4) & 1;
4809 if (!load) {
9ee6e8bb 4810 return 1;
8e18cde3 4811 }
9ee6e8bb
PB
4812 size = (insn >> 6) & 3;
4813 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4814
4815 if (size == 3) {
4816 if (nregs != 4 || a == 0) {
9ee6e8bb 4817 return 1;
99c475ab 4818 }
8e18cde3
PM
4819 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4820 size = 2;
4821 }
4822 if (nregs == 1 && a == 1 && size == 0) {
4823 return 1;
4824 }
4825 if (nregs == 3 && a == 1) {
4826 return 1;
4827 }
e318a60b 4828 addr = tcg_temp_new_i32();
8e18cde3
PM
4829 load_reg_var(s, addr, rn);
4830 if (nregs == 1) {
4831 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4832 tmp = gen_load_and_replicate(s, addr, size);
4833 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4834 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4835 if (insn & (1 << 5)) {
4836 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4837 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4838 }
4839 tcg_temp_free_i32(tmp);
4840 } else {
4841 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4842 stride = (insn & (1 << 5)) ? 2 : 1;
4843 for (reg = 0; reg < nregs; reg++) {
4844 tmp = gen_load_and_replicate(s, addr, size);
4845 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4846 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4847 tcg_temp_free_i32(tmp);
4848 tcg_gen_addi_i32(addr, addr, 1 << size);
4849 rd += stride;
4850 }
9ee6e8bb 4851 }
e318a60b 4852 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4853 stride = (1 << size) * nregs;
4854 } else {
4855 /* Single element. */
93262b16 4856 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4857 pass = (insn >> 7) & 1;
4858 switch (size) {
4859 case 0:
4860 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4861 stride = 1;
4862 break;
4863 case 1:
4864 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4865 stride = (insn & (1 << 5)) ? 2 : 1;
4866 break;
4867 case 2:
4868 shift = 0;
9ee6e8bb
PB
4869 stride = (insn & (1 << 6)) ? 2 : 1;
4870 break;
4871 default:
4872 abort();
4873 }
4874 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4875 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4876 switch (nregs) {
4877 case 1:
4878 if (((idx & (1 << size)) != 0) ||
4879 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4880 return 1;
4881 }
4882 break;
4883 case 3:
4884 if ((idx & 1) != 0) {
4885 return 1;
4886 }
4887 /* fall through */
4888 case 2:
4889 if (size == 2 && (idx & 2) != 0) {
4890 return 1;
4891 }
4892 break;
4893 case 4:
4894 if ((size == 2) && ((idx & 3) == 3)) {
4895 return 1;
4896 }
4897 break;
4898 default:
4899 abort();
4900 }
4901 if ((rd + stride * (nregs - 1)) > 31) {
4902 /* Attempts to write off the end of the register file
4903 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4904 * the neon_load_reg() would write off the end of the array.
4905 */
4906 return 1;
4907 }
e318a60b 4908 addr = tcg_temp_new_i32();
dcc65026 4909 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4910 for (reg = 0; reg < nregs; reg++) {
4911 if (load) {
58ab8e96 4912 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4913 switch (size) {
4914 case 0:
12dcc321 4915 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4916 break;
4917 case 1:
12dcc321 4918 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4919 break;
4920 case 2:
12dcc321 4921 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4922 break;
a50f5b91
PB
4923 default: /* Avoid compiler warnings. */
4924 abort();
9ee6e8bb
PB
4925 }
4926 if (size != 2) {
8f8e3aa4 4927 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4928 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4929 shift, size ? 16 : 8);
7d1b0095 4930 tcg_temp_free_i32(tmp2);
9ee6e8bb 4931 }
8f8e3aa4 4932 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4933 } else { /* Store */
8f8e3aa4
PB
4934 tmp = neon_load_reg(rd, pass);
4935 if (shift)
4936 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4937 switch (size) {
4938 case 0:
12dcc321 4939 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4940 break;
4941 case 1:
12dcc321 4942 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4943 break;
4944 case 2:
12dcc321 4945 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4946 break;
99c475ab 4947 }
58ab8e96 4948 tcg_temp_free_i32(tmp);
99c475ab 4949 }
9ee6e8bb 4950 rd += stride;
1b2b1e54 4951 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4952 }
e318a60b 4953 tcg_temp_free_i32(addr);
9ee6e8bb 4954 stride = nregs * (1 << size);
99c475ab 4955 }
9ee6e8bb
PB
4956 }
4957 if (rm != 15) {
39d5492a 4958 TCGv_i32 base;
b26eefb6
PB
4959
4960 base = load_reg(s, rn);
9ee6e8bb 4961 if (rm == 13) {
b26eefb6 4962 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4963 } else {
39d5492a 4964 TCGv_i32 index;
b26eefb6
PB
4965 index = load_reg(s, rm);
4966 tcg_gen_add_i32(base, base, index);
7d1b0095 4967 tcg_temp_free_i32(index);
9ee6e8bb 4968 }
b26eefb6 4969 store_reg(s, rn, base);
9ee6e8bb
PB
4970 }
4971 return 0;
4972}
3b46e624 4973
8f8e3aa4 4974/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4975static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4976{
4977 tcg_gen_and_i32(t, t, c);
f669df27 4978 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4979 tcg_gen_or_i32(dest, t, f);
4980}
4981
39d5492a 4982static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4983{
4984 switch (size) {
4985 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4986 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 4987 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
4988 default: abort();
4989 }
4990}
4991
39d5492a 4992static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4993{
4994 switch (size) {
02da0b2d
PM
4995 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4996 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4997 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4998 default: abort();
4999 }
5000}
5001
39d5492a 5002static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5003{
5004 switch (size) {
02da0b2d
PM
5005 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5006 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5007 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5008 default: abort();
5009 }
5010}
5011
39d5492a 5012static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5013{
5014 switch (size) {
02da0b2d
PM
5015 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5016 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5017 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5018 default: abort();
5019 }
5020}
5021
39d5492a 5022static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5023 int q, int u)
5024{
5025 if (q) {
5026 if (u) {
5027 switch (size) {
5028 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5029 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5030 default: abort();
5031 }
5032 } else {
5033 switch (size) {
5034 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5035 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5036 default: abort();
5037 }
5038 }
5039 } else {
5040 if (u) {
5041 switch (size) {
b408a9b0
CL
5042 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5043 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5044 default: abort();
5045 }
5046 } else {
5047 switch (size) {
5048 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5049 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5050 default: abort();
5051 }
5052 }
5053 }
5054}
5055
39d5492a 5056static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5057{
5058 if (u) {
5059 switch (size) {
5060 case 0: gen_helper_neon_widen_u8(dest, src); break;
5061 case 1: gen_helper_neon_widen_u16(dest, src); break;
5062 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5063 default: abort();
5064 }
5065 } else {
5066 switch (size) {
5067 case 0: gen_helper_neon_widen_s8(dest, src); break;
5068 case 1: gen_helper_neon_widen_s16(dest, src); break;
5069 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5070 default: abort();
5071 }
5072 }
7d1b0095 5073 tcg_temp_free_i32(src);
ad69471c
PB
5074}
5075
5076static inline void gen_neon_addl(int size)
5077{
5078 switch (size) {
5079 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5080 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5081 case 2: tcg_gen_add_i64(CPU_V001); break;
5082 default: abort();
5083 }
5084}
5085
5086static inline void gen_neon_subl(int size)
5087{
5088 switch (size) {
5089 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5090 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5091 case 2: tcg_gen_sub_i64(CPU_V001); break;
5092 default: abort();
5093 }
5094}
5095
a7812ae4 5096static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5097{
5098 switch (size) {
5099 case 0: gen_helper_neon_negl_u16(var, var); break;
5100 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5101 case 2:
5102 tcg_gen_neg_i64(var, var);
5103 break;
ad69471c
PB
5104 default: abort();
5105 }
5106}
5107
a7812ae4 5108static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5109{
5110 switch (size) {
02da0b2d
PM
5111 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5112 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5113 default: abort();
5114 }
5115}
5116
39d5492a
PM
5117static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5118 int size, int u)
ad69471c 5119{
a7812ae4 5120 TCGv_i64 tmp;
ad69471c
PB
5121
5122 switch ((size << 1) | u) {
5123 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5124 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5125 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5126 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5127 case 4:
5128 tmp = gen_muls_i64_i32(a, b);
5129 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5130 tcg_temp_free_i64(tmp);
ad69471c
PB
5131 break;
5132 case 5:
5133 tmp = gen_mulu_i64_i32(a, b);
5134 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5135 tcg_temp_free_i64(tmp);
ad69471c
PB
5136 break;
5137 default: abort();
5138 }
c6067f04
CL
5139
5140 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5141 Don't forget to clean them now. */
5142 if (size < 2) {
7d1b0095
PM
5143 tcg_temp_free_i32(a);
5144 tcg_temp_free_i32(b);
c6067f04 5145 }
ad69471c
PB
5146}
5147
39d5492a
PM
5148static void gen_neon_narrow_op(int op, int u, int size,
5149 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5150{
5151 if (op) {
5152 if (u) {
5153 gen_neon_unarrow_sats(size, dest, src);
5154 } else {
5155 gen_neon_narrow(size, dest, src);
5156 }
5157 } else {
5158 if (u) {
5159 gen_neon_narrow_satu(size, dest, src);
5160 } else {
5161 gen_neon_narrow_sats(size, dest, src);
5162 }
5163 }
5164}
5165
62698be3
PM
5166/* Symbolic constants for op fields for Neon 3-register same-length.
5167 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5168 * table A7-9.
5169 */
5170#define NEON_3R_VHADD 0
5171#define NEON_3R_VQADD 1
5172#define NEON_3R_VRHADD 2
5173#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5174#define NEON_3R_VHSUB 4
5175#define NEON_3R_VQSUB 5
5176#define NEON_3R_VCGT 6
5177#define NEON_3R_VCGE 7
5178#define NEON_3R_VSHL 8
5179#define NEON_3R_VQSHL 9
5180#define NEON_3R_VRSHL 10
5181#define NEON_3R_VQRSHL 11
5182#define NEON_3R_VMAX 12
5183#define NEON_3R_VMIN 13
5184#define NEON_3R_VABD 14
5185#define NEON_3R_VABA 15
5186#define NEON_3R_VADD_VSUB 16
5187#define NEON_3R_VTST_VCEQ 17
5188#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5189#define NEON_3R_VMUL 19
5190#define NEON_3R_VPMAX 20
5191#define NEON_3R_VPMIN 21
5192#define NEON_3R_VQDMULH_VQRDMULH 22
5193#define NEON_3R_VPADD 23
f1ecb913 5194#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5195#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5196#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5197#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5198#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5199#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5200#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5201#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5202
5203static const uint8_t neon_3r_sizes[] = {
5204 [NEON_3R_VHADD] = 0x7,
5205 [NEON_3R_VQADD] = 0xf,
5206 [NEON_3R_VRHADD] = 0x7,
5207 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5208 [NEON_3R_VHSUB] = 0x7,
5209 [NEON_3R_VQSUB] = 0xf,
5210 [NEON_3R_VCGT] = 0x7,
5211 [NEON_3R_VCGE] = 0x7,
5212 [NEON_3R_VSHL] = 0xf,
5213 [NEON_3R_VQSHL] = 0xf,
5214 [NEON_3R_VRSHL] = 0xf,
5215 [NEON_3R_VQRSHL] = 0xf,
5216 [NEON_3R_VMAX] = 0x7,
5217 [NEON_3R_VMIN] = 0x7,
5218 [NEON_3R_VABD] = 0x7,
5219 [NEON_3R_VABA] = 0x7,
5220 [NEON_3R_VADD_VSUB] = 0xf,
5221 [NEON_3R_VTST_VCEQ] = 0x7,
5222 [NEON_3R_VML] = 0x7,
5223 [NEON_3R_VMUL] = 0x7,
5224 [NEON_3R_VPMAX] = 0x7,
5225 [NEON_3R_VPMIN] = 0x7,
5226 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5227 [NEON_3R_VPADD] = 0x7,
f1ecb913 5228 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5229 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5230 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5231 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5232 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5233 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5234 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5235 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5236};
5237
600b828c
PM
5238/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5239 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5240 * table A7-13.
5241 */
5242#define NEON_2RM_VREV64 0
5243#define NEON_2RM_VREV32 1
5244#define NEON_2RM_VREV16 2
5245#define NEON_2RM_VPADDL 4
5246#define NEON_2RM_VPADDL_U 5
9d935509
AB
5247#define NEON_2RM_AESE 6 /* Includes AESD */
5248#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5249#define NEON_2RM_VCLS 8
5250#define NEON_2RM_VCLZ 9
5251#define NEON_2RM_VCNT 10
5252#define NEON_2RM_VMVN 11
5253#define NEON_2RM_VPADAL 12
5254#define NEON_2RM_VPADAL_U 13
5255#define NEON_2RM_VQABS 14
5256#define NEON_2RM_VQNEG 15
5257#define NEON_2RM_VCGT0 16
5258#define NEON_2RM_VCGE0 17
5259#define NEON_2RM_VCEQ0 18
5260#define NEON_2RM_VCLE0 19
5261#define NEON_2RM_VCLT0 20
f1ecb913 5262#define NEON_2RM_SHA1H 21
600b828c
PM
5263#define NEON_2RM_VABS 22
5264#define NEON_2RM_VNEG 23
5265#define NEON_2RM_VCGT0_F 24
5266#define NEON_2RM_VCGE0_F 25
5267#define NEON_2RM_VCEQ0_F 26
5268#define NEON_2RM_VCLE0_F 27
5269#define NEON_2RM_VCLT0_F 28
5270#define NEON_2RM_VABS_F 30
5271#define NEON_2RM_VNEG_F 31
5272#define NEON_2RM_VSWP 32
5273#define NEON_2RM_VTRN 33
5274#define NEON_2RM_VUZP 34
5275#define NEON_2RM_VZIP 35
5276#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5277#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5278#define NEON_2RM_VSHLL 38
f1ecb913 5279#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5280#define NEON_2RM_VRINTN 40
2ce70625 5281#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5282#define NEON_2RM_VRINTA 42
5283#define NEON_2RM_VRINTZ 43
600b828c 5284#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5285#define NEON_2RM_VRINTM 45
600b828c 5286#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5287#define NEON_2RM_VRINTP 47
901ad525
WN
5288#define NEON_2RM_VCVTAU 48
5289#define NEON_2RM_VCVTAS 49
5290#define NEON_2RM_VCVTNU 50
5291#define NEON_2RM_VCVTNS 51
5292#define NEON_2RM_VCVTPU 52
5293#define NEON_2RM_VCVTPS 53
5294#define NEON_2RM_VCVTMU 54
5295#define NEON_2RM_VCVTMS 55
600b828c
PM
5296#define NEON_2RM_VRECPE 56
5297#define NEON_2RM_VRSQRTE 57
5298#define NEON_2RM_VRECPE_F 58
5299#define NEON_2RM_VRSQRTE_F 59
5300#define NEON_2RM_VCVT_FS 60
5301#define NEON_2RM_VCVT_FU 61
5302#define NEON_2RM_VCVT_SF 62
5303#define NEON_2RM_VCVT_UF 63
5304
5305static int neon_2rm_is_float_op(int op)
5306{
5307 /* Return true if this neon 2reg-misc op is float-to-float */
5308 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5309 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5310 op == NEON_2RM_VRINTM ||
5311 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5312 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5313}
5314
fe8fcf3d
PM
5315static bool neon_2rm_is_v8_op(int op)
5316{
5317 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5318 switch (op) {
5319 case NEON_2RM_VRINTN:
5320 case NEON_2RM_VRINTA:
5321 case NEON_2RM_VRINTM:
5322 case NEON_2RM_VRINTP:
5323 case NEON_2RM_VRINTZ:
5324 case NEON_2RM_VRINTX:
5325 case NEON_2RM_VCVTAU:
5326 case NEON_2RM_VCVTAS:
5327 case NEON_2RM_VCVTNU:
5328 case NEON_2RM_VCVTNS:
5329 case NEON_2RM_VCVTPU:
5330 case NEON_2RM_VCVTPS:
5331 case NEON_2RM_VCVTMU:
5332 case NEON_2RM_VCVTMS:
5333 return true;
5334 default:
5335 return false;
5336 }
5337}
5338
600b828c
PM
5339/* Each entry in this array has bit n set if the insn allows
5340 * size value n (otherwise it will UNDEF). Since unallocated
5341 * op values will have no bits set they always UNDEF.
5342 */
5343static const uint8_t neon_2rm_sizes[] = {
5344 [NEON_2RM_VREV64] = 0x7,
5345 [NEON_2RM_VREV32] = 0x3,
5346 [NEON_2RM_VREV16] = 0x1,
5347 [NEON_2RM_VPADDL] = 0x7,
5348 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5349 [NEON_2RM_AESE] = 0x1,
5350 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5351 [NEON_2RM_VCLS] = 0x7,
5352 [NEON_2RM_VCLZ] = 0x7,
5353 [NEON_2RM_VCNT] = 0x1,
5354 [NEON_2RM_VMVN] = 0x1,
5355 [NEON_2RM_VPADAL] = 0x7,
5356 [NEON_2RM_VPADAL_U] = 0x7,
5357 [NEON_2RM_VQABS] = 0x7,
5358 [NEON_2RM_VQNEG] = 0x7,
5359 [NEON_2RM_VCGT0] = 0x7,
5360 [NEON_2RM_VCGE0] = 0x7,
5361 [NEON_2RM_VCEQ0] = 0x7,
5362 [NEON_2RM_VCLE0] = 0x7,
5363 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5364 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5365 [NEON_2RM_VABS] = 0x7,
5366 [NEON_2RM_VNEG] = 0x7,
5367 [NEON_2RM_VCGT0_F] = 0x4,
5368 [NEON_2RM_VCGE0_F] = 0x4,
5369 [NEON_2RM_VCEQ0_F] = 0x4,
5370 [NEON_2RM_VCLE0_F] = 0x4,
5371 [NEON_2RM_VCLT0_F] = 0x4,
5372 [NEON_2RM_VABS_F] = 0x4,
5373 [NEON_2RM_VNEG_F] = 0x4,
5374 [NEON_2RM_VSWP] = 0x1,
5375 [NEON_2RM_VTRN] = 0x7,
5376 [NEON_2RM_VUZP] = 0x7,
5377 [NEON_2RM_VZIP] = 0x7,
5378 [NEON_2RM_VMOVN] = 0x7,
5379 [NEON_2RM_VQMOVN] = 0x7,
5380 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5381 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5382 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5383 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5384 [NEON_2RM_VRINTA] = 0x4,
5385 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5386 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5387 [NEON_2RM_VRINTM] = 0x4,
600b828c 5388 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5389 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5390 [NEON_2RM_VCVTAU] = 0x4,
5391 [NEON_2RM_VCVTAS] = 0x4,
5392 [NEON_2RM_VCVTNU] = 0x4,
5393 [NEON_2RM_VCVTNS] = 0x4,
5394 [NEON_2RM_VCVTPU] = 0x4,
5395 [NEON_2RM_VCVTPS] = 0x4,
5396 [NEON_2RM_VCVTMU] = 0x4,
5397 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5398 [NEON_2RM_VRECPE] = 0x4,
5399 [NEON_2RM_VRSQRTE] = 0x4,
5400 [NEON_2RM_VRECPE_F] = 0x4,
5401 [NEON_2RM_VRSQRTE_F] = 0x4,
5402 [NEON_2RM_VCVT_FS] = 0x4,
5403 [NEON_2RM_VCVT_FU] = 0x4,
5404 [NEON_2RM_VCVT_SF] = 0x4,
5405 [NEON_2RM_VCVT_UF] = 0x4,
5406};
5407
9ee6e8bb
PB
5408/* Translate a NEON data processing instruction. Return nonzero if the
5409 instruction is invalid.
ad69471c
PB
5410 We process data in a mixture of 32-bit and 64-bit chunks.
5411 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5412
7dcc1f89 5413static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5414{
5415 int op;
5416 int q;
5417 int rd, rn, rm;
5418 int size;
5419 int shift;
5420 int pass;
5421 int count;
5422 int pairwise;
5423 int u;
ca9a32e4 5424 uint32_t imm, mask;
39d5492a 5425 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5426 TCGv_i64 tmp64;
9ee6e8bb 5427
2c7ffc41
PM
5428 /* FIXME: this access check should not take precedence over UNDEF
5429 * for invalid encodings; we will generate incorrect syndrome information
5430 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5431 */
9dbbc748 5432 if (s->fp_excp_el) {
2c7ffc41 5433 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5434 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5435 return 0;
5436 }
5437
5df8bac1 5438 if (!s->vfp_enabled)
9ee6e8bb
PB
5439 return 1;
5440 q = (insn & (1 << 6)) != 0;
5441 u = (insn >> 24) & 1;
5442 VFP_DREG_D(rd, insn);
5443 VFP_DREG_N(rn, insn);
5444 VFP_DREG_M(rm, insn);
5445 size = (insn >> 20) & 3;
5446 if ((insn & (1 << 23)) == 0) {
5447 /* Three register same length. */
5448 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5449 /* Catch invalid op and bad size combinations: UNDEF */
5450 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5451 return 1;
5452 }
25f84f79
PM
5453 /* All insns of this form UNDEF for either this condition or the
5454 * superset of cases "Q==1"; we catch the latter later.
5455 */
5456 if (q && ((rd | rn | rm) & 1)) {
5457 return 1;
5458 }
f1ecb913
AB
5459 /*
5460 * The SHA-1/SHA-256 3-register instructions require special treatment
5461 * here, as their size field is overloaded as an op type selector, and
5462 * they all consume their input in a single pass.
5463 */
5464 if (op == NEON_3R_SHA) {
5465 if (!q) {
5466 return 1;
5467 }
5468 if (!u) { /* SHA-1 */
d614a513 5469 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5470 return 1;
5471 }
5472 tmp = tcg_const_i32(rd);
5473 tmp2 = tcg_const_i32(rn);
5474 tmp3 = tcg_const_i32(rm);
5475 tmp4 = tcg_const_i32(size);
5476 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5477 tcg_temp_free_i32(tmp4);
5478 } else { /* SHA-256 */
d614a513 5479 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5480 return 1;
5481 }
5482 tmp = tcg_const_i32(rd);
5483 tmp2 = tcg_const_i32(rn);
5484 tmp3 = tcg_const_i32(rm);
5485 switch (size) {
5486 case 0:
5487 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5488 break;
5489 case 1:
5490 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5491 break;
5492 case 2:
5493 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5494 break;
5495 }
5496 }
5497 tcg_temp_free_i32(tmp);
5498 tcg_temp_free_i32(tmp2);
5499 tcg_temp_free_i32(tmp3);
5500 return 0;
5501 }
62698be3
PM
5502 if (size == 3 && op != NEON_3R_LOGIC) {
5503 /* 64-bit element instructions. */
9ee6e8bb 5504 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5505 neon_load_reg64(cpu_V0, rn + pass);
5506 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5507 switch (op) {
62698be3 5508 case NEON_3R_VQADD:
9ee6e8bb 5509 if (u) {
02da0b2d
PM
5510 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5511 cpu_V0, cpu_V1);
2c0262af 5512 } else {
02da0b2d
PM
5513 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5514 cpu_V0, cpu_V1);
2c0262af 5515 }
9ee6e8bb 5516 break;
62698be3 5517 case NEON_3R_VQSUB:
9ee6e8bb 5518 if (u) {
02da0b2d
PM
5519 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5520 cpu_V0, cpu_V1);
ad69471c 5521 } else {
02da0b2d
PM
5522 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5523 cpu_V0, cpu_V1);
ad69471c
PB
5524 }
5525 break;
62698be3 5526 case NEON_3R_VSHL:
ad69471c
PB
5527 if (u) {
5528 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5529 } else {
5530 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5531 }
5532 break;
62698be3 5533 case NEON_3R_VQSHL:
ad69471c 5534 if (u) {
02da0b2d
PM
5535 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5536 cpu_V1, cpu_V0);
ad69471c 5537 } else {
02da0b2d
PM
5538 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5539 cpu_V1, cpu_V0);
ad69471c
PB
5540 }
5541 break;
62698be3 5542 case NEON_3R_VRSHL:
ad69471c
PB
5543 if (u) {
5544 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5545 } else {
ad69471c
PB
5546 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5547 }
5548 break;
62698be3 5549 case NEON_3R_VQRSHL:
ad69471c 5550 if (u) {
02da0b2d
PM
5551 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5552 cpu_V1, cpu_V0);
ad69471c 5553 } else {
02da0b2d
PM
5554 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5555 cpu_V1, cpu_V0);
1e8d4eec 5556 }
9ee6e8bb 5557 break;
62698be3 5558 case NEON_3R_VADD_VSUB:
9ee6e8bb 5559 if (u) {
ad69471c 5560 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5561 } else {
ad69471c 5562 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5563 }
5564 break;
5565 default:
5566 abort();
2c0262af 5567 }
ad69471c 5568 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5569 }
9ee6e8bb 5570 return 0;
2c0262af 5571 }
25f84f79 5572 pairwise = 0;
9ee6e8bb 5573 switch (op) {
62698be3
PM
5574 case NEON_3R_VSHL:
5575 case NEON_3R_VQSHL:
5576 case NEON_3R_VRSHL:
5577 case NEON_3R_VQRSHL:
9ee6e8bb 5578 {
ad69471c
PB
5579 int rtmp;
5580 /* Shift instruction operands are reversed. */
5581 rtmp = rn;
9ee6e8bb 5582 rn = rm;
ad69471c 5583 rm = rtmp;
9ee6e8bb 5584 }
2c0262af 5585 break;
25f84f79
PM
5586 case NEON_3R_VPADD:
5587 if (u) {
5588 return 1;
5589 }
5590 /* Fall through */
62698be3
PM
5591 case NEON_3R_VPMAX:
5592 case NEON_3R_VPMIN:
9ee6e8bb 5593 pairwise = 1;
2c0262af 5594 break;
25f84f79
PM
5595 case NEON_3R_FLOAT_ARITH:
5596 pairwise = (u && size < 2); /* if VPADD (float) */
5597 break;
5598 case NEON_3R_FLOAT_MINMAX:
5599 pairwise = u; /* if VPMIN/VPMAX (float) */
5600 break;
5601 case NEON_3R_FLOAT_CMP:
5602 if (!u && size) {
5603 /* no encoding for U=0 C=1x */
5604 return 1;
5605 }
5606 break;
5607 case NEON_3R_FLOAT_ACMP:
5608 if (!u) {
5609 return 1;
5610 }
5611 break;
505935fc
WN
5612 case NEON_3R_FLOAT_MISC:
5613 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5614 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5615 return 1;
5616 }
2c0262af 5617 break;
25f84f79
PM
5618 case NEON_3R_VMUL:
5619 if (u && (size != 0)) {
5620 /* UNDEF on invalid size for polynomial subcase */
5621 return 1;
5622 }
2c0262af 5623 break;
da97f52c 5624 case NEON_3R_VFM:
d614a513 5625 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5626 return 1;
5627 }
5628 break;
9ee6e8bb 5629 default:
2c0262af 5630 break;
9ee6e8bb 5631 }
dd8fbd78 5632
25f84f79
PM
5633 if (pairwise && q) {
5634 /* All the pairwise insns UNDEF if Q is set */
5635 return 1;
5636 }
5637
9ee6e8bb
PB
5638 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5639
5640 if (pairwise) {
5641 /* Pairwise. */
a5a14945
JR
5642 if (pass < 1) {
5643 tmp = neon_load_reg(rn, 0);
5644 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5645 } else {
a5a14945
JR
5646 tmp = neon_load_reg(rm, 0);
5647 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5648 }
5649 } else {
5650 /* Elementwise. */
dd8fbd78
FN
5651 tmp = neon_load_reg(rn, pass);
5652 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5653 }
5654 switch (op) {
62698be3 5655 case NEON_3R_VHADD:
9ee6e8bb
PB
5656 GEN_NEON_INTEGER_OP(hadd);
5657 break;
62698be3 5658 case NEON_3R_VQADD:
02da0b2d 5659 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5660 break;
62698be3 5661 case NEON_3R_VRHADD:
9ee6e8bb 5662 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5663 break;
62698be3 5664 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5665 switch ((u << 2) | size) {
5666 case 0: /* VAND */
dd8fbd78 5667 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5668 break;
5669 case 1: /* BIC */
f669df27 5670 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5671 break;
5672 case 2: /* VORR */
dd8fbd78 5673 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5674 break;
5675 case 3: /* VORN */
f669df27 5676 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5677 break;
5678 case 4: /* VEOR */
dd8fbd78 5679 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5680 break;
5681 case 5: /* VBSL */
dd8fbd78
FN
5682 tmp3 = neon_load_reg(rd, pass);
5683 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5684 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5685 break;
5686 case 6: /* VBIT */
dd8fbd78
FN
5687 tmp3 = neon_load_reg(rd, pass);
5688 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5689 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5690 break;
5691 case 7: /* VBIF */
dd8fbd78
FN
5692 tmp3 = neon_load_reg(rd, pass);
5693 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5694 tcg_temp_free_i32(tmp3);
9ee6e8bb 5695 break;
2c0262af
FB
5696 }
5697 break;
62698be3 5698 case NEON_3R_VHSUB:
9ee6e8bb
PB
5699 GEN_NEON_INTEGER_OP(hsub);
5700 break;
62698be3 5701 case NEON_3R_VQSUB:
02da0b2d 5702 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5703 break;
62698be3 5704 case NEON_3R_VCGT:
9ee6e8bb
PB
5705 GEN_NEON_INTEGER_OP(cgt);
5706 break;
62698be3 5707 case NEON_3R_VCGE:
9ee6e8bb
PB
5708 GEN_NEON_INTEGER_OP(cge);
5709 break;
62698be3 5710 case NEON_3R_VSHL:
ad69471c 5711 GEN_NEON_INTEGER_OP(shl);
2c0262af 5712 break;
62698be3 5713 case NEON_3R_VQSHL:
02da0b2d 5714 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5715 break;
62698be3 5716 case NEON_3R_VRSHL:
ad69471c 5717 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5718 break;
62698be3 5719 case NEON_3R_VQRSHL:
02da0b2d 5720 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5721 break;
62698be3 5722 case NEON_3R_VMAX:
9ee6e8bb
PB
5723 GEN_NEON_INTEGER_OP(max);
5724 break;
62698be3 5725 case NEON_3R_VMIN:
9ee6e8bb
PB
5726 GEN_NEON_INTEGER_OP(min);
5727 break;
62698be3 5728 case NEON_3R_VABD:
9ee6e8bb
PB
5729 GEN_NEON_INTEGER_OP(abd);
5730 break;
62698be3 5731 case NEON_3R_VABA:
9ee6e8bb 5732 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5733 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5734 tmp2 = neon_load_reg(rd, pass);
5735 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5736 break;
62698be3 5737 case NEON_3R_VADD_VSUB:
9ee6e8bb 5738 if (!u) { /* VADD */
62698be3 5739 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5740 } else { /* VSUB */
5741 switch (size) {
dd8fbd78
FN
5742 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5743 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5744 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5745 default: abort();
9ee6e8bb
PB
5746 }
5747 }
5748 break;
62698be3 5749 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5750 if (!u) { /* VTST */
5751 switch (size) {
dd8fbd78
FN
5752 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5753 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5754 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5755 default: abort();
9ee6e8bb
PB
5756 }
5757 } else { /* VCEQ */
5758 switch (size) {
dd8fbd78
FN
5759 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5760 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5761 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5762 default: abort();
9ee6e8bb
PB
5763 }
5764 }
5765 break;
62698be3 5766 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5767 switch (size) {
dd8fbd78
FN
5768 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5769 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5770 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5771 default: abort();
9ee6e8bb 5772 }
7d1b0095 5773 tcg_temp_free_i32(tmp2);
dd8fbd78 5774 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5775 if (u) { /* VMLS */
dd8fbd78 5776 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5777 } else { /* VMLA */
dd8fbd78 5778 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5779 }
5780 break;
62698be3 5781 case NEON_3R_VMUL:
9ee6e8bb 5782 if (u) { /* polynomial */
dd8fbd78 5783 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5784 } else { /* Integer */
5785 switch (size) {
dd8fbd78
FN
5786 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5787 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5788 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5789 default: abort();
9ee6e8bb
PB
5790 }
5791 }
5792 break;
62698be3 5793 case NEON_3R_VPMAX:
9ee6e8bb
PB
5794 GEN_NEON_INTEGER_OP(pmax);
5795 break;
62698be3 5796 case NEON_3R_VPMIN:
9ee6e8bb
PB
5797 GEN_NEON_INTEGER_OP(pmin);
5798 break;
62698be3 5799 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5800 if (!u) { /* VQDMULH */
5801 switch (size) {
02da0b2d
PM
5802 case 1:
5803 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5804 break;
5805 case 2:
5806 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5807 break;
62698be3 5808 default: abort();
9ee6e8bb 5809 }
62698be3 5810 } else { /* VQRDMULH */
9ee6e8bb 5811 switch (size) {
02da0b2d
PM
5812 case 1:
5813 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5814 break;
5815 case 2:
5816 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5817 break;
62698be3 5818 default: abort();
9ee6e8bb
PB
5819 }
5820 }
5821 break;
62698be3 5822 case NEON_3R_VPADD:
9ee6e8bb 5823 switch (size) {
dd8fbd78
FN
5824 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5825 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5826 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5827 default: abort();
9ee6e8bb
PB
5828 }
5829 break;
62698be3 5830 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5831 {
5832 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5833 switch ((u << 2) | size) {
5834 case 0: /* VADD */
aa47cfdd
PM
5835 case 4: /* VPADD */
5836 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5837 break;
5838 case 2: /* VSUB */
aa47cfdd 5839 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5840 break;
5841 case 6: /* VABD */
aa47cfdd 5842 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5843 break;
5844 default:
62698be3 5845 abort();
9ee6e8bb 5846 }
aa47cfdd 5847 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5848 break;
aa47cfdd 5849 }
62698be3 5850 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5851 {
5852 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5853 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5854 if (!u) {
7d1b0095 5855 tcg_temp_free_i32(tmp2);
dd8fbd78 5856 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5857 if (size == 0) {
aa47cfdd 5858 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5859 } else {
aa47cfdd 5860 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5861 }
5862 }
aa47cfdd 5863 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5864 break;
aa47cfdd 5865 }
62698be3 5866 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5867 {
5868 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5869 if (!u) {
aa47cfdd 5870 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5871 } else {
aa47cfdd
PM
5872 if (size == 0) {
5873 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5874 } else {
5875 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5876 }
b5ff1b31 5877 }
aa47cfdd 5878 tcg_temp_free_ptr(fpstatus);
2c0262af 5879 break;
aa47cfdd 5880 }
62698be3 5881 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5882 {
5883 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5884 if (size == 0) {
5885 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5886 } else {
5887 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5888 }
5889 tcg_temp_free_ptr(fpstatus);
2c0262af 5890 break;
aa47cfdd 5891 }
62698be3 5892 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5893 {
5894 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5895 if (size == 0) {
f71a2ae5 5896 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5897 } else {
f71a2ae5 5898 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5899 }
5900 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5901 break;
aa47cfdd 5902 }
505935fc
WN
5903 case NEON_3R_FLOAT_MISC:
5904 if (u) {
5905 /* VMAXNM/VMINNM */
5906 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5907 if (size == 0) {
f71a2ae5 5908 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5909 } else {
f71a2ae5 5910 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5911 }
5912 tcg_temp_free_ptr(fpstatus);
5913 } else {
5914 if (size == 0) {
5915 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5916 } else {
5917 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5918 }
5919 }
2c0262af 5920 break;
da97f52c
PM
5921 case NEON_3R_VFM:
5922 {
5923 /* VFMA, VFMS: fused multiply-add */
5924 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5925 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5926 if (size) {
5927 /* VFMS */
5928 gen_helper_vfp_negs(tmp, tmp);
5929 }
5930 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5931 tcg_temp_free_i32(tmp3);
5932 tcg_temp_free_ptr(fpstatus);
5933 break;
5934 }
9ee6e8bb
PB
5935 default:
5936 abort();
2c0262af 5937 }
7d1b0095 5938 tcg_temp_free_i32(tmp2);
dd8fbd78 5939
9ee6e8bb
PB
5940 /* Save the result. For elementwise operations we can put it
5941 straight into the destination register. For pairwise operations
5942 we have to be careful to avoid clobbering the source operands. */
5943 if (pairwise && rd == rm) {
dd8fbd78 5944 neon_store_scratch(pass, tmp);
9ee6e8bb 5945 } else {
dd8fbd78 5946 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5947 }
5948
5949 } /* for pass */
5950 if (pairwise && rd == rm) {
5951 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5952 tmp = neon_load_scratch(pass);
5953 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5954 }
5955 }
ad69471c 5956 /* End of 3 register same size operations. */
9ee6e8bb
PB
5957 } else if (insn & (1 << 4)) {
5958 if ((insn & 0x00380080) != 0) {
5959 /* Two registers and shift. */
5960 op = (insn >> 8) & 0xf;
5961 if (insn & (1 << 7)) {
cc13115b
PM
5962 /* 64-bit shift. */
5963 if (op > 7) {
5964 return 1;
5965 }
9ee6e8bb
PB
5966 size = 3;
5967 } else {
5968 size = 2;
5969 while ((insn & (1 << (size + 19))) == 0)
5970 size--;
5971 }
5972 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5973 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5974 by immediate using the variable shift operations. */
5975 if (op < 8) {
5976 /* Shift by immediate:
5977 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5978 if (q && ((rd | rm) & 1)) {
5979 return 1;
5980 }
5981 if (!u && (op == 4 || op == 6)) {
5982 return 1;
5983 }
9ee6e8bb
PB
5984 /* Right shifts are encoded as N - shift, where N is the
5985 element size in bits. */
5986 if (op <= 4)
5987 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5988 if (size == 3) {
5989 count = q + 1;
5990 } else {
5991 count = q ? 4: 2;
5992 }
5993 switch (size) {
5994 case 0:
5995 imm = (uint8_t) shift;
5996 imm |= imm << 8;
5997 imm |= imm << 16;
5998 break;
5999 case 1:
6000 imm = (uint16_t) shift;
6001 imm |= imm << 16;
6002 break;
6003 case 2:
6004 case 3:
6005 imm = shift;
6006 break;
6007 default:
6008 abort();
6009 }
6010
6011 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6012 if (size == 3) {
6013 neon_load_reg64(cpu_V0, rm + pass);
6014 tcg_gen_movi_i64(cpu_V1, imm);
6015 switch (op) {
6016 case 0: /* VSHR */
6017 case 1: /* VSRA */
6018 if (u)
6019 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6020 else
ad69471c 6021 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6022 break;
ad69471c
PB
6023 case 2: /* VRSHR */
6024 case 3: /* VRSRA */
6025 if (u)
6026 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6027 else
ad69471c 6028 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6029 break;
ad69471c 6030 case 4: /* VSRI */
ad69471c
PB
6031 case 5: /* VSHL, VSLI */
6032 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6033 break;
0322b26e 6034 case 6: /* VQSHLU */
02da0b2d
PM
6035 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6036 cpu_V0, cpu_V1);
ad69471c 6037 break;
0322b26e
PM
6038 case 7: /* VQSHL */
6039 if (u) {
02da0b2d 6040 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6041 cpu_V0, cpu_V1);
6042 } else {
02da0b2d 6043 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6044 cpu_V0, cpu_V1);
6045 }
9ee6e8bb 6046 break;
9ee6e8bb 6047 }
ad69471c
PB
6048 if (op == 1 || op == 3) {
6049 /* Accumulate. */
5371cb81 6050 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6051 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6052 } else if (op == 4 || (op == 5 && u)) {
6053 /* Insert */
923e6509
CL
6054 neon_load_reg64(cpu_V1, rd + pass);
6055 uint64_t mask;
6056 if (shift < -63 || shift > 63) {
6057 mask = 0;
6058 } else {
6059 if (op == 4) {
6060 mask = 0xffffffffffffffffull >> -shift;
6061 } else {
6062 mask = 0xffffffffffffffffull << shift;
6063 }
6064 }
6065 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6066 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6067 }
6068 neon_store_reg64(cpu_V0, rd + pass);
6069 } else { /* size < 3 */
6070 /* Operands in T0 and T1. */
dd8fbd78 6071 tmp = neon_load_reg(rm, pass);
7d1b0095 6072 tmp2 = tcg_temp_new_i32();
dd8fbd78 6073 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6074 switch (op) {
6075 case 0: /* VSHR */
6076 case 1: /* VSRA */
6077 GEN_NEON_INTEGER_OP(shl);
6078 break;
6079 case 2: /* VRSHR */
6080 case 3: /* VRSRA */
6081 GEN_NEON_INTEGER_OP(rshl);
6082 break;
6083 case 4: /* VSRI */
ad69471c
PB
6084 case 5: /* VSHL, VSLI */
6085 switch (size) {
dd8fbd78
FN
6086 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6087 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6088 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6089 default: abort();
ad69471c
PB
6090 }
6091 break;
0322b26e 6092 case 6: /* VQSHLU */
ad69471c 6093 switch (size) {
0322b26e 6094 case 0:
02da0b2d
PM
6095 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6096 tmp, tmp2);
0322b26e
PM
6097 break;
6098 case 1:
02da0b2d
PM
6099 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6100 tmp, tmp2);
0322b26e
PM
6101 break;
6102 case 2:
02da0b2d
PM
6103 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6104 tmp, tmp2);
0322b26e
PM
6105 break;
6106 default:
cc13115b 6107 abort();
ad69471c
PB
6108 }
6109 break;
0322b26e 6110 case 7: /* VQSHL */
02da0b2d 6111 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6112 break;
ad69471c 6113 }
7d1b0095 6114 tcg_temp_free_i32(tmp2);
ad69471c
PB
6115
6116 if (op == 1 || op == 3) {
6117 /* Accumulate. */
dd8fbd78 6118 tmp2 = neon_load_reg(rd, pass);
5371cb81 6119 gen_neon_add(size, tmp, tmp2);
7d1b0095 6120 tcg_temp_free_i32(tmp2);
ad69471c
PB
6121 } else if (op == 4 || (op == 5 && u)) {
6122 /* Insert */
6123 switch (size) {
6124 case 0:
6125 if (op == 4)
ca9a32e4 6126 mask = 0xff >> -shift;
ad69471c 6127 else
ca9a32e4
JR
6128 mask = (uint8_t)(0xff << shift);
6129 mask |= mask << 8;
6130 mask |= mask << 16;
ad69471c
PB
6131 break;
6132 case 1:
6133 if (op == 4)
ca9a32e4 6134 mask = 0xffff >> -shift;
ad69471c 6135 else
ca9a32e4
JR
6136 mask = (uint16_t)(0xffff << shift);
6137 mask |= mask << 16;
ad69471c
PB
6138 break;
6139 case 2:
ca9a32e4
JR
6140 if (shift < -31 || shift > 31) {
6141 mask = 0;
6142 } else {
6143 if (op == 4)
6144 mask = 0xffffffffu >> -shift;
6145 else
6146 mask = 0xffffffffu << shift;
6147 }
ad69471c
PB
6148 break;
6149 default:
6150 abort();
6151 }
dd8fbd78 6152 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6153 tcg_gen_andi_i32(tmp, tmp, mask);
6154 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6155 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6156 tcg_temp_free_i32(tmp2);
ad69471c 6157 }
dd8fbd78 6158 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6159 }
6160 } /* for pass */
6161 } else if (op < 10) {
ad69471c 6162 /* Shift by immediate and narrow:
9ee6e8bb 6163 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6164 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6165 if (rm & 1) {
6166 return 1;
6167 }
9ee6e8bb
PB
6168 shift = shift - (1 << (size + 3));
6169 size++;
92cdfaeb 6170 if (size == 3) {
a7812ae4 6171 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6172 neon_load_reg64(cpu_V0, rm);
6173 neon_load_reg64(cpu_V1, rm + 1);
6174 for (pass = 0; pass < 2; pass++) {
6175 TCGv_i64 in;
6176 if (pass == 0) {
6177 in = cpu_V0;
6178 } else {
6179 in = cpu_V1;
6180 }
ad69471c 6181 if (q) {
0b36f4cd 6182 if (input_unsigned) {
92cdfaeb 6183 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6184 } else {
92cdfaeb 6185 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6186 }
ad69471c 6187 } else {
0b36f4cd 6188 if (input_unsigned) {
92cdfaeb 6189 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6190 } else {
92cdfaeb 6191 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6192 }
ad69471c 6193 }
7d1b0095 6194 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6195 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6196 neon_store_reg(rd, pass, tmp);
6197 } /* for pass */
6198 tcg_temp_free_i64(tmp64);
6199 } else {
6200 if (size == 1) {
6201 imm = (uint16_t)shift;
6202 imm |= imm << 16;
2c0262af 6203 } else {
92cdfaeb
PM
6204 /* size == 2 */
6205 imm = (uint32_t)shift;
6206 }
6207 tmp2 = tcg_const_i32(imm);
6208 tmp4 = neon_load_reg(rm + 1, 0);
6209 tmp5 = neon_load_reg(rm + 1, 1);
6210 for (pass = 0; pass < 2; pass++) {
6211 if (pass == 0) {
6212 tmp = neon_load_reg(rm, 0);
6213 } else {
6214 tmp = tmp4;
6215 }
0b36f4cd
CL
6216 gen_neon_shift_narrow(size, tmp, tmp2, q,
6217 input_unsigned);
92cdfaeb
PM
6218 if (pass == 0) {
6219 tmp3 = neon_load_reg(rm, 1);
6220 } else {
6221 tmp3 = tmp5;
6222 }
0b36f4cd
CL
6223 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6224 input_unsigned);
36aa55dc 6225 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6226 tcg_temp_free_i32(tmp);
6227 tcg_temp_free_i32(tmp3);
6228 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6229 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6230 neon_store_reg(rd, pass, tmp);
6231 } /* for pass */
c6067f04 6232 tcg_temp_free_i32(tmp2);
b75263d6 6233 }
9ee6e8bb 6234 } else if (op == 10) {
cc13115b
PM
6235 /* VSHLL, VMOVL */
6236 if (q || (rd & 1)) {
9ee6e8bb 6237 return 1;
cc13115b 6238 }
ad69471c
PB
6239 tmp = neon_load_reg(rm, 0);
6240 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6241 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6242 if (pass == 1)
6243 tmp = tmp2;
6244
6245 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6246
9ee6e8bb
PB
6247 if (shift != 0) {
6248 /* The shift is less than the width of the source
ad69471c
PB
6249 type, so we can just shift the whole register. */
6250 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6251 /* Widen the result of shift: we need to clear
6252 * the potential overflow bits resulting from
6253 * left bits of the narrow input appearing as
6254 * right bits of left the neighbour narrow
6255 * input. */
ad69471c
PB
6256 if (size < 2 || !u) {
6257 uint64_t imm64;
6258 if (size == 0) {
6259 imm = (0xffu >> (8 - shift));
6260 imm |= imm << 16;
acdf01ef 6261 } else if (size == 1) {
ad69471c 6262 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6263 } else {
6264 /* size == 2 */
6265 imm = 0xffffffff >> (32 - shift);
6266 }
6267 if (size < 2) {
6268 imm64 = imm | (((uint64_t)imm) << 32);
6269 } else {
6270 imm64 = imm;
9ee6e8bb 6271 }
acdf01ef 6272 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6273 }
6274 }
ad69471c 6275 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6276 }
f73534a5 6277 } else if (op >= 14) {
9ee6e8bb 6278 /* VCVT fixed-point. */
cc13115b
PM
6279 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6280 return 1;
6281 }
f73534a5
PM
6282 /* We have already masked out the must-be-1 top bit of imm6,
6283 * hence this 32-shift where the ARM ARM has 64-imm6.
6284 */
6285 shift = 32 - shift;
9ee6e8bb 6286 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6287 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6288 if (!(op & 1)) {
9ee6e8bb 6289 if (u)
5500b06c 6290 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6291 else
5500b06c 6292 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6293 } else {
6294 if (u)
5500b06c 6295 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6296 else
5500b06c 6297 gen_vfp_tosl(0, shift, 1);
2c0262af 6298 }
4373f3ce 6299 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6300 }
6301 } else {
9ee6e8bb
PB
6302 return 1;
6303 }
6304 } else { /* (insn & 0x00380080) == 0 */
6305 int invert;
7d80fee5
PM
6306 if (q && (rd & 1)) {
6307 return 1;
6308 }
9ee6e8bb
PB
6309
6310 op = (insn >> 8) & 0xf;
6311 /* One register and immediate. */
6312 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6313 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6314 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6315 * We choose to not special-case this and will behave as if a
6316 * valid constant encoding of 0 had been given.
6317 */
9ee6e8bb
PB
6318 switch (op) {
6319 case 0: case 1:
6320 /* no-op */
6321 break;
6322 case 2: case 3:
6323 imm <<= 8;
6324 break;
6325 case 4: case 5:
6326 imm <<= 16;
6327 break;
6328 case 6: case 7:
6329 imm <<= 24;
6330 break;
6331 case 8: case 9:
6332 imm |= imm << 16;
6333 break;
6334 case 10: case 11:
6335 imm = (imm << 8) | (imm << 24);
6336 break;
6337 case 12:
8e31209e 6338 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6339 break;
6340 case 13:
6341 imm = (imm << 16) | 0xffff;
6342 break;
6343 case 14:
6344 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6345 if (invert)
6346 imm = ~imm;
6347 break;
6348 case 15:
7d80fee5
PM
6349 if (invert) {
6350 return 1;
6351 }
9ee6e8bb
PB
6352 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6353 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6354 break;
6355 }
6356 if (invert)
6357 imm = ~imm;
6358
9ee6e8bb
PB
6359 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6360 if (op & 1 && op < 12) {
ad69471c 6361 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6362 if (invert) {
6363 /* The immediate value has already been inverted, so
6364 BIC becomes AND. */
ad69471c 6365 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6366 } else {
ad69471c 6367 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6368 }
9ee6e8bb 6369 } else {
ad69471c 6370 /* VMOV, VMVN. */
7d1b0095 6371 tmp = tcg_temp_new_i32();
9ee6e8bb 6372 if (op == 14 && invert) {
a5a14945 6373 int n;
ad69471c
PB
6374 uint32_t val;
6375 val = 0;
9ee6e8bb
PB
6376 for (n = 0; n < 4; n++) {
6377 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6378 val |= 0xff << (n * 8);
9ee6e8bb 6379 }
ad69471c
PB
6380 tcg_gen_movi_i32(tmp, val);
6381 } else {
6382 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6383 }
9ee6e8bb 6384 }
ad69471c 6385 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6386 }
6387 }
e4b3861d 6388 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6389 if (size != 3) {
6390 op = (insn >> 8) & 0xf;
6391 if ((insn & (1 << 6)) == 0) {
6392 /* Three registers of different lengths. */
6393 int src1_wide;
6394 int src2_wide;
6395 int prewiden;
526d0096
PM
6396 /* undefreq: bit 0 : UNDEF if size == 0
6397 * bit 1 : UNDEF if size == 1
6398 * bit 2 : UNDEF if size == 2
6399 * bit 3 : UNDEF if U == 1
6400 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6401 */
6402 int undefreq;
6403 /* prewiden, src1_wide, src2_wide, undefreq */
6404 static const int neon_3reg_wide[16][4] = {
6405 {1, 0, 0, 0}, /* VADDL */
6406 {1, 1, 0, 0}, /* VADDW */
6407 {1, 0, 0, 0}, /* VSUBL */
6408 {1, 1, 0, 0}, /* VSUBW */
6409 {0, 1, 1, 0}, /* VADDHN */
6410 {0, 0, 0, 0}, /* VABAL */
6411 {0, 1, 1, 0}, /* VSUBHN */
6412 {0, 0, 0, 0}, /* VABDL */
6413 {0, 0, 0, 0}, /* VMLAL */
526d0096 6414 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6415 {0, 0, 0, 0}, /* VMLSL */
526d0096 6416 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6417 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6418 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6419 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6420 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6421 };
6422
6423 prewiden = neon_3reg_wide[op][0];
6424 src1_wide = neon_3reg_wide[op][1];
6425 src2_wide = neon_3reg_wide[op][2];
695272dc 6426 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6427
526d0096
PM
6428 if ((undefreq & (1 << size)) ||
6429 ((undefreq & 8) && u)) {
695272dc
PM
6430 return 1;
6431 }
6432 if ((src1_wide && (rn & 1)) ||
6433 (src2_wide && (rm & 1)) ||
6434 (!src2_wide && (rd & 1))) {
ad69471c 6435 return 1;
695272dc 6436 }
ad69471c 6437
4e624eda
PM
6438 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6439 * outside the loop below as it only performs a single pass.
6440 */
6441 if (op == 14 && size == 2) {
6442 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6443
d614a513 6444 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6445 return 1;
6446 }
6447 tcg_rn = tcg_temp_new_i64();
6448 tcg_rm = tcg_temp_new_i64();
6449 tcg_rd = tcg_temp_new_i64();
6450 neon_load_reg64(tcg_rn, rn);
6451 neon_load_reg64(tcg_rm, rm);
6452 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6453 neon_store_reg64(tcg_rd, rd);
6454 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6455 neon_store_reg64(tcg_rd, rd + 1);
6456 tcg_temp_free_i64(tcg_rn);
6457 tcg_temp_free_i64(tcg_rm);
6458 tcg_temp_free_i64(tcg_rd);
6459 return 0;
6460 }
6461
9ee6e8bb
PB
6462 /* Avoid overlapping operands. Wide source operands are
6463 always aligned so will never overlap with wide
6464 destinations in problematic ways. */
8f8e3aa4 6465 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6466 tmp = neon_load_reg(rm, 1);
6467 neon_store_scratch(2, tmp);
8f8e3aa4 6468 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6469 tmp = neon_load_reg(rn, 1);
6470 neon_store_scratch(2, tmp);
9ee6e8bb 6471 }
39d5492a 6472 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6473 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6474 if (src1_wide) {
6475 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6476 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6477 } else {
ad69471c 6478 if (pass == 1 && rd == rn) {
dd8fbd78 6479 tmp = neon_load_scratch(2);
9ee6e8bb 6480 } else {
ad69471c
PB
6481 tmp = neon_load_reg(rn, pass);
6482 }
6483 if (prewiden) {
6484 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6485 }
6486 }
ad69471c
PB
6487 if (src2_wide) {
6488 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6489 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6490 } else {
ad69471c 6491 if (pass == 1 && rd == rm) {
dd8fbd78 6492 tmp2 = neon_load_scratch(2);
9ee6e8bb 6493 } else {
ad69471c
PB
6494 tmp2 = neon_load_reg(rm, pass);
6495 }
6496 if (prewiden) {
6497 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6498 }
9ee6e8bb
PB
6499 }
6500 switch (op) {
6501 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6502 gen_neon_addl(size);
9ee6e8bb 6503 break;
79b0e534 6504 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6505 gen_neon_subl(size);
9ee6e8bb
PB
6506 break;
6507 case 5: case 7: /* VABAL, VABDL */
6508 switch ((size << 1) | u) {
ad69471c
PB
6509 case 0:
6510 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6511 break;
6512 case 1:
6513 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6514 break;
6515 case 2:
6516 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6517 break;
6518 case 3:
6519 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6520 break;
6521 case 4:
6522 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6523 break;
6524 case 5:
6525 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6526 break;
9ee6e8bb
PB
6527 default: abort();
6528 }
7d1b0095
PM
6529 tcg_temp_free_i32(tmp2);
6530 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6531 break;
6532 case 8: case 9: case 10: case 11: case 12: case 13:
6533 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6534 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6535 break;
6536 case 14: /* Polynomial VMULL */
e5ca24cb 6537 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6538 tcg_temp_free_i32(tmp2);
6539 tcg_temp_free_i32(tmp);
e5ca24cb 6540 break;
695272dc
PM
6541 default: /* 15 is RESERVED: caught earlier */
6542 abort();
9ee6e8bb 6543 }
ebcd88ce
PM
6544 if (op == 13) {
6545 /* VQDMULL */
6546 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6547 neon_store_reg64(cpu_V0, rd + pass);
6548 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6549 /* Accumulate. */
ebcd88ce 6550 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6551 switch (op) {
4dc064e6
PM
6552 case 10: /* VMLSL */
6553 gen_neon_negl(cpu_V0, size);
6554 /* Fall through */
6555 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6556 gen_neon_addl(size);
9ee6e8bb
PB
6557 break;
6558 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6559 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6560 if (op == 11) {
6561 gen_neon_negl(cpu_V0, size);
6562 }
ad69471c
PB
6563 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6564 break;
9ee6e8bb
PB
6565 default:
6566 abort();
6567 }
ad69471c 6568 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6569 } else if (op == 4 || op == 6) {
6570 /* Narrowing operation. */
7d1b0095 6571 tmp = tcg_temp_new_i32();
79b0e534 6572 if (!u) {
9ee6e8bb 6573 switch (size) {
ad69471c
PB
6574 case 0:
6575 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6576 break;
6577 case 1:
6578 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6579 break;
6580 case 2:
6581 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6582 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6583 break;
9ee6e8bb
PB
6584 default: abort();
6585 }
6586 } else {
6587 switch (size) {
ad69471c
PB
6588 case 0:
6589 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6590 break;
6591 case 1:
6592 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6593 break;
6594 case 2:
6595 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6596 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6597 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6598 break;
9ee6e8bb
PB
6599 default: abort();
6600 }
6601 }
ad69471c
PB
6602 if (pass == 0) {
6603 tmp3 = tmp;
6604 } else {
6605 neon_store_reg(rd, 0, tmp3);
6606 neon_store_reg(rd, 1, tmp);
6607 }
9ee6e8bb
PB
6608 } else {
6609 /* Write back the result. */
ad69471c 6610 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6611 }
6612 }
6613 } else {
3e3326df
PM
6614 /* Two registers and a scalar. NB that for ops of this form
6615 * the ARM ARM labels bit 24 as Q, but it is in our variable
6616 * 'u', not 'q'.
6617 */
6618 if (size == 0) {
6619 return 1;
6620 }
9ee6e8bb 6621 switch (op) {
9ee6e8bb 6622 case 1: /* Float VMLA scalar */
9ee6e8bb 6623 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6624 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6625 if (size == 1) {
6626 return 1;
6627 }
6628 /* fall through */
6629 case 0: /* Integer VMLA scalar */
6630 case 4: /* Integer VMLS scalar */
6631 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6632 case 12: /* VQDMULH scalar */
6633 case 13: /* VQRDMULH scalar */
3e3326df
PM
6634 if (u && ((rd | rn) & 1)) {
6635 return 1;
6636 }
dd8fbd78
FN
6637 tmp = neon_get_scalar(size, rm);
6638 neon_store_scratch(0, tmp);
9ee6e8bb 6639 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6640 tmp = neon_load_scratch(0);
6641 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6642 if (op == 12) {
6643 if (size == 1) {
02da0b2d 6644 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6645 } else {
02da0b2d 6646 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6647 }
6648 } else if (op == 13) {
6649 if (size == 1) {
02da0b2d 6650 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6651 } else {
02da0b2d 6652 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6653 }
6654 } else if (op & 1) {
aa47cfdd
PM
6655 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6656 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6657 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6658 } else {
6659 switch (size) {
dd8fbd78
FN
6660 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6661 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6662 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6663 default: abort();
9ee6e8bb
PB
6664 }
6665 }
7d1b0095 6666 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6667 if (op < 8) {
6668 /* Accumulate. */
dd8fbd78 6669 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6670 switch (op) {
6671 case 0:
dd8fbd78 6672 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6673 break;
6674 case 1:
aa47cfdd
PM
6675 {
6676 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6677 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6678 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6679 break;
aa47cfdd 6680 }
9ee6e8bb 6681 case 4:
dd8fbd78 6682 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6683 break;
6684 case 5:
aa47cfdd
PM
6685 {
6686 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6687 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6688 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6689 break;
aa47cfdd 6690 }
9ee6e8bb
PB
6691 default:
6692 abort();
6693 }
7d1b0095 6694 tcg_temp_free_i32(tmp2);
9ee6e8bb 6695 }
dd8fbd78 6696 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6697 }
6698 break;
9ee6e8bb 6699 case 3: /* VQDMLAL scalar */
9ee6e8bb 6700 case 7: /* VQDMLSL scalar */
9ee6e8bb 6701 case 11: /* VQDMULL scalar */
3e3326df 6702 if (u == 1) {
ad69471c 6703 return 1;
3e3326df
PM
6704 }
6705 /* fall through */
6706 case 2: /* VMLAL sclar */
6707 case 6: /* VMLSL scalar */
6708 case 10: /* VMULL scalar */
6709 if (rd & 1) {
6710 return 1;
6711 }
dd8fbd78 6712 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6713 /* We need a copy of tmp2 because gen_neon_mull
6714 * deletes it during pass 0. */
7d1b0095 6715 tmp4 = tcg_temp_new_i32();
c6067f04 6716 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6717 tmp3 = neon_load_reg(rn, 1);
ad69471c 6718
9ee6e8bb 6719 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6720 if (pass == 0) {
6721 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6722 } else {
dd8fbd78 6723 tmp = tmp3;
c6067f04 6724 tmp2 = tmp4;
9ee6e8bb 6725 }
ad69471c 6726 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6727 if (op != 11) {
6728 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6729 }
9ee6e8bb 6730 switch (op) {
4dc064e6
PM
6731 case 6:
6732 gen_neon_negl(cpu_V0, size);
6733 /* Fall through */
6734 case 2:
ad69471c 6735 gen_neon_addl(size);
9ee6e8bb
PB
6736 break;
6737 case 3: case 7:
ad69471c 6738 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6739 if (op == 7) {
6740 gen_neon_negl(cpu_V0, size);
6741 }
ad69471c 6742 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6743 break;
6744 case 10:
6745 /* no-op */
6746 break;
6747 case 11:
ad69471c 6748 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6749 break;
6750 default:
6751 abort();
6752 }
ad69471c 6753 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6754 }
dd8fbd78 6755
dd8fbd78 6756
9ee6e8bb
PB
6757 break;
6758 default: /* 14 and 15 are RESERVED */
6759 return 1;
6760 }
6761 }
6762 } else { /* size == 3 */
6763 if (!u) {
6764 /* Extract. */
9ee6e8bb 6765 imm = (insn >> 8) & 0xf;
ad69471c
PB
6766
6767 if (imm > 7 && !q)
6768 return 1;
6769
52579ea1
PM
6770 if (q && ((rd | rn | rm) & 1)) {
6771 return 1;
6772 }
6773
ad69471c
PB
6774 if (imm == 0) {
6775 neon_load_reg64(cpu_V0, rn);
6776 if (q) {
6777 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6778 }
ad69471c
PB
6779 } else if (imm == 8) {
6780 neon_load_reg64(cpu_V0, rn + 1);
6781 if (q) {
6782 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6783 }
ad69471c 6784 } else if (q) {
a7812ae4 6785 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6786 if (imm < 8) {
6787 neon_load_reg64(cpu_V0, rn);
a7812ae4 6788 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6789 } else {
6790 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6791 neon_load_reg64(tmp64, rm);
ad69471c
PB
6792 }
6793 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6794 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6795 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6796 if (imm < 8) {
6797 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6798 } else {
ad69471c
PB
6799 neon_load_reg64(cpu_V1, rm + 1);
6800 imm -= 8;
9ee6e8bb 6801 }
ad69471c 6802 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6803 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6804 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6805 tcg_temp_free_i64(tmp64);
ad69471c 6806 } else {
a7812ae4 6807 /* BUGFIX */
ad69471c 6808 neon_load_reg64(cpu_V0, rn);
a7812ae4 6809 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6810 neon_load_reg64(cpu_V1, rm);
a7812ae4 6811 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6812 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6813 }
6814 neon_store_reg64(cpu_V0, rd);
6815 if (q) {
6816 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6817 }
6818 } else if ((insn & (1 << 11)) == 0) {
6819 /* Two register misc. */
6820 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6821 size = (insn >> 18) & 3;
600b828c
PM
6822 /* UNDEF for unknown op values and bad op-size combinations */
6823 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6824 return 1;
6825 }
fe8fcf3d
PM
6826 if (neon_2rm_is_v8_op(op) &&
6827 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6828 return 1;
6829 }
fc2a9b37
PM
6830 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6831 q && ((rm | rd) & 1)) {
6832 return 1;
6833 }
9ee6e8bb 6834 switch (op) {
600b828c 6835 case NEON_2RM_VREV64:
9ee6e8bb 6836 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6837 tmp = neon_load_reg(rm, pass * 2);
6838 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6839 switch (size) {
dd8fbd78
FN
6840 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6841 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6842 case 2: /* no-op */ break;
6843 default: abort();
6844 }
dd8fbd78 6845 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6846 if (size == 2) {
dd8fbd78 6847 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6848 } else {
9ee6e8bb 6849 switch (size) {
dd8fbd78
FN
6850 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6851 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6852 default: abort();
6853 }
dd8fbd78 6854 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6855 }
6856 }
6857 break;
600b828c
PM
6858 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6859 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6860 for (pass = 0; pass < q + 1; pass++) {
6861 tmp = neon_load_reg(rm, pass * 2);
6862 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6863 tmp = neon_load_reg(rm, pass * 2 + 1);
6864 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6865 switch (size) {
6866 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6867 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6868 case 2: tcg_gen_add_i64(CPU_V001); break;
6869 default: abort();
6870 }
600b828c 6871 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6872 /* Accumulate. */
ad69471c
PB
6873 neon_load_reg64(cpu_V1, rd + pass);
6874 gen_neon_addl(size);
9ee6e8bb 6875 }
ad69471c 6876 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6877 }
6878 break;
600b828c 6879 case NEON_2RM_VTRN:
9ee6e8bb 6880 if (size == 2) {
a5a14945 6881 int n;
9ee6e8bb 6882 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6883 tmp = neon_load_reg(rm, n);
6884 tmp2 = neon_load_reg(rd, n + 1);
6885 neon_store_reg(rm, n, tmp2);
6886 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6887 }
6888 } else {
6889 goto elementwise;
6890 }
6891 break;
600b828c 6892 case NEON_2RM_VUZP:
02acedf9 6893 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6894 return 1;
9ee6e8bb
PB
6895 }
6896 break;
600b828c 6897 case NEON_2RM_VZIP:
d68a6f3a 6898 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6899 return 1;
9ee6e8bb
PB
6900 }
6901 break;
600b828c
PM
6902 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6903 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6904 if (rm & 1) {
6905 return 1;
6906 }
39d5492a 6907 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6908 for (pass = 0; pass < 2; pass++) {
ad69471c 6909 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6910 tmp = tcg_temp_new_i32();
600b828c
PM
6911 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6912 tmp, cpu_V0);
ad69471c
PB
6913 if (pass == 0) {
6914 tmp2 = tmp;
6915 } else {
6916 neon_store_reg(rd, 0, tmp2);
6917 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6918 }
9ee6e8bb
PB
6919 }
6920 break;
600b828c 6921 case NEON_2RM_VSHLL:
fc2a9b37 6922 if (q || (rd & 1)) {
9ee6e8bb 6923 return 1;
600b828c 6924 }
ad69471c
PB
6925 tmp = neon_load_reg(rm, 0);
6926 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6927 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6928 if (pass == 1)
6929 tmp = tmp2;
6930 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6931 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6932 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6933 }
6934 break;
600b828c 6935 case NEON_2RM_VCVT_F16_F32:
d614a513 6936 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6937 q || (rm & 1)) {
6938 return 1;
6939 }
7d1b0095
PM
6940 tmp = tcg_temp_new_i32();
6941 tmp2 = tcg_temp_new_i32();
60011498 6942 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6943 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6944 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6945 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6946 tcg_gen_shli_i32(tmp2, tmp2, 16);
6947 tcg_gen_or_i32(tmp2, tmp2, tmp);
6948 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6949 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6950 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6951 neon_store_reg(rd, 0, tmp2);
7d1b0095 6952 tmp2 = tcg_temp_new_i32();
2d981da7 6953 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6954 tcg_gen_shli_i32(tmp2, tmp2, 16);
6955 tcg_gen_or_i32(tmp2, tmp2, tmp);
6956 neon_store_reg(rd, 1, tmp2);
7d1b0095 6957 tcg_temp_free_i32(tmp);
60011498 6958 break;
600b828c 6959 case NEON_2RM_VCVT_F32_F16:
d614a513 6960 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6961 q || (rd & 1)) {
6962 return 1;
6963 }
7d1b0095 6964 tmp3 = tcg_temp_new_i32();
60011498
PB
6965 tmp = neon_load_reg(rm, 0);
6966 tmp2 = neon_load_reg(rm, 1);
6967 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6968 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6969 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6970 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6971 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6972 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6973 tcg_temp_free_i32(tmp);
60011498 6974 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6975 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6976 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6977 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6978 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6979 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6980 tcg_temp_free_i32(tmp2);
6981 tcg_temp_free_i32(tmp3);
60011498 6982 break;
9d935509 6983 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6984 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6985 || ((rm | rd) & 1)) {
6986 return 1;
6987 }
6988 tmp = tcg_const_i32(rd);
6989 tmp2 = tcg_const_i32(rm);
6990
6991 /* Bit 6 is the lowest opcode bit; it distinguishes between
6992 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6993 */
6994 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6995
6996 if (op == NEON_2RM_AESE) {
6997 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6998 } else {
6999 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7000 }
7001 tcg_temp_free_i32(tmp);
7002 tcg_temp_free_i32(tmp2);
7003 tcg_temp_free_i32(tmp3);
7004 break;
f1ecb913 7005 case NEON_2RM_SHA1H:
d614a513 7006 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7007 || ((rm | rd) & 1)) {
7008 return 1;
7009 }
7010 tmp = tcg_const_i32(rd);
7011 tmp2 = tcg_const_i32(rm);
7012
7013 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7014
7015 tcg_temp_free_i32(tmp);
7016 tcg_temp_free_i32(tmp2);
7017 break;
7018 case NEON_2RM_SHA1SU1:
7019 if ((rm | rd) & 1) {
7020 return 1;
7021 }
7022 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7023 if (q) {
d614a513 7024 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7025 return 1;
7026 }
d614a513 7027 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7028 return 1;
7029 }
7030 tmp = tcg_const_i32(rd);
7031 tmp2 = tcg_const_i32(rm);
7032 if (q) {
7033 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7034 } else {
7035 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7036 }
7037 tcg_temp_free_i32(tmp);
7038 tcg_temp_free_i32(tmp2);
7039 break;
9ee6e8bb
PB
7040 default:
7041 elementwise:
7042 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7043 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7044 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7045 neon_reg_offset(rm, pass));
39d5492a 7046 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7047 } else {
dd8fbd78 7048 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7049 }
7050 switch (op) {
600b828c 7051 case NEON_2RM_VREV32:
9ee6e8bb 7052 switch (size) {
dd8fbd78
FN
7053 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7054 case 1: gen_swap_half(tmp); break;
600b828c 7055 default: abort();
9ee6e8bb
PB
7056 }
7057 break;
600b828c 7058 case NEON_2RM_VREV16:
dd8fbd78 7059 gen_rev16(tmp);
9ee6e8bb 7060 break;
600b828c 7061 case NEON_2RM_VCLS:
9ee6e8bb 7062 switch (size) {
dd8fbd78
FN
7063 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7064 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7065 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7066 default: abort();
9ee6e8bb
PB
7067 }
7068 break;
600b828c 7069 case NEON_2RM_VCLZ:
9ee6e8bb 7070 switch (size) {
dd8fbd78
FN
7071 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7072 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7073 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 7074 default: abort();
9ee6e8bb
PB
7075 }
7076 break;
600b828c 7077 case NEON_2RM_VCNT:
dd8fbd78 7078 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7079 break;
600b828c 7080 case NEON_2RM_VMVN:
dd8fbd78 7081 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7082 break;
600b828c 7083 case NEON_2RM_VQABS:
9ee6e8bb 7084 switch (size) {
02da0b2d
PM
7085 case 0:
7086 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7087 break;
7088 case 1:
7089 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7090 break;
7091 case 2:
7092 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7093 break;
600b828c 7094 default: abort();
9ee6e8bb
PB
7095 }
7096 break;
600b828c 7097 case NEON_2RM_VQNEG:
9ee6e8bb 7098 switch (size) {
02da0b2d
PM
7099 case 0:
7100 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7101 break;
7102 case 1:
7103 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7104 break;
7105 case 2:
7106 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7107 break;
600b828c 7108 default: abort();
9ee6e8bb
PB
7109 }
7110 break;
600b828c 7111 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7112 tmp2 = tcg_const_i32(0);
9ee6e8bb 7113 switch(size) {
dd8fbd78
FN
7114 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7115 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7116 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7117 default: abort();
9ee6e8bb 7118 }
39d5492a 7119 tcg_temp_free_i32(tmp2);
600b828c 7120 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7121 tcg_gen_not_i32(tmp, tmp);
600b828c 7122 }
9ee6e8bb 7123 break;
600b828c 7124 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7125 tmp2 = tcg_const_i32(0);
9ee6e8bb 7126 switch(size) {
dd8fbd78
FN
7127 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7128 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7129 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7130 default: abort();
9ee6e8bb 7131 }
39d5492a 7132 tcg_temp_free_i32(tmp2);
600b828c 7133 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7134 tcg_gen_not_i32(tmp, tmp);
600b828c 7135 }
9ee6e8bb 7136 break;
600b828c 7137 case NEON_2RM_VCEQ0:
dd8fbd78 7138 tmp2 = tcg_const_i32(0);
9ee6e8bb 7139 switch(size) {
dd8fbd78
FN
7140 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7141 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7142 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7143 default: abort();
9ee6e8bb 7144 }
39d5492a 7145 tcg_temp_free_i32(tmp2);
9ee6e8bb 7146 break;
600b828c 7147 case NEON_2RM_VABS:
9ee6e8bb 7148 switch(size) {
dd8fbd78
FN
7149 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7150 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7151 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7152 default: abort();
9ee6e8bb
PB
7153 }
7154 break;
600b828c 7155 case NEON_2RM_VNEG:
dd8fbd78
FN
7156 tmp2 = tcg_const_i32(0);
7157 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7158 tcg_temp_free_i32(tmp2);
9ee6e8bb 7159 break;
600b828c 7160 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7161 {
7162 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7163 tmp2 = tcg_const_i32(0);
aa47cfdd 7164 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7165 tcg_temp_free_i32(tmp2);
aa47cfdd 7166 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7167 break;
aa47cfdd 7168 }
600b828c 7169 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7170 {
7171 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7172 tmp2 = tcg_const_i32(0);
aa47cfdd 7173 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7174 tcg_temp_free_i32(tmp2);
aa47cfdd 7175 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7176 break;
aa47cfdd 7177 }
600b828c 7178 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7179 {
7180 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7181 tmp2 = tcg_const_i32(0);
aa47cfdd 7182 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7183 tcg_temp_free_i32(tmp2);
aa47cfdd 7184 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7185 break;
aa47cfdd 7186 }
600b828c 7187 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7188 {
7189 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7190 tmp2 = tcg_const_i32(0);
aa47cfdd 7191 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7192 tcg_temp_free_i32(tmp2);
aa47cfdd 7193 tcg_temp_free_ptr(fpstatus);
0e326109 7194 break;
aa47cfdd 7195 }
600b828c 7196 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7197 {
7198 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7199 tmp2 = tcg_const_i32(0);
aa47cfdd 7200 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7201 tcg_temp_free_i32(tmp2);
aa47cfdd 7202 tcg_temp_free_ptr(fpstatus);
0e326109 7203 break;
aa47cfdd 7204 }
600b828c 7205 case NEON_2RM_VABS_F:
4373f3ce 7206 gen_vfp_abs(0);
9ee6e8bb 7207 break;
600b828c 7208 case NEON_2RM_VNEG_F:
4373f3ce 7209 gen_vfp_neg(0);
9ee6e8bb 7210 break;
600b828c 7211 case NEON_2RM_VSWP:
dd8fbd78
FN
7212 tmp2 = neon_load_reg(rd, pass);
7213 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7214 break;
600b828c 7215 case NEON_2RM_VTRN:
dd8fbd78 7216 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7217 switch (size) {
dd8fbd78
FN
7218 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7219 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7220 default: abort();
9ee6e8bb 7221 }
dd8fbd78 7222 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7223 break;
34f7b0a2
WN
7224 case NEON_2RM_VRINTN:
7225 case NEON_2RM_VRINTA:
7226 case NEON_2RM_VRINTM:
7227 case NEON_2RM_VRINTP:
7228 case NEON_2RM_VRINTZ:
7229 {
7230 TCGv_i32 tcg_rmode;
7231 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7232 int rmode;
7233
7234 if (op == NEON_2RM_VRINTZ) {
7235 rmode = FPROUNDING_ZERO;
7236 } else {
7237 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7238 }
7239
7240 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7241 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7242 cpu_env);
7243 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7244 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7245 cpu_env);
7246 tcg_temp_free_ptr(fpstatus);
7247 tcg_temp_free_i32(tcg_rmode);
7248 break;
7249 }
2ce70625
WN
7250 case NEON_2RM_VRINTX:
7251 {
7252 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7253 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7254 tcg_temp_free_ptr(fpstatus);
7255 break;
7256 }
901ad525
WN
7257 case NEON_2RM_VCVTAU:
7258 case NEON_2RM_VCVTAS:
7259 case NEON_2RM_VCVTNU:
7260 case NEON_2RM_VCVTNS:
7261 case NEON_2RM_VCVTPU:
7262 case NEON_2RM_VCVTPS:
7263 case NEON_2RM_VCVTMU:
7264 case NEON_2RM_VCVTMS:
7265 {
7266 bool is_signed = !extract32(insn, 7, 1);
7267 TCGv_ptr fpst = get_fpstatus_ptr(1);
7268 TCGv_i32 tcg_rmode, tcg_shift;
7269 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7270
7271 tcg_shift = tcg_const_i32(0);
7272 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7273 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7274 cpu_env);
7275
7276 if (is_signed) {
7277 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7278 tcg_shift, fpst);
7279 } else {
7280 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7281 tcg_shift, fpst);
7282 }
7283
7284 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7285 cpu_env);
7286 tcg_temp_free_i32(tcg_rmode);
7287 tcg_temp_free_i32(tcg_shift);
7288 tcg_temp_free_ptr(fpst);
7289 break;
7290 }
600b828c 7291 case NEON_2RM_VRECPE:
b6d4443a
AB
7292 {
7293 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7294 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7295 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7296 break;
b6d4443a 7297 }
600b828c 7298 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7299 {
7300 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7301 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7302 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7303 break;
c2fb418e 7304 }
600b828c 7305 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7306 {
7307 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7308 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7309 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7310 break;
b6d4443a 7311 }
600b828c 7312 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7313 {
7314 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7315 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7316 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7317 break;
c2fb418e 7318 }
600b828c 7319 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7320 gen_vfp_sito(0, 1);
9ee6e8bb 7321 break;
600b828c 7322 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7323 gen_vfp_uito(0, 1);
9ee6e8bb 7324 break;
600b828c 7325 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7326 gen_vfp_tosiz(0, 1);
9ee6e8bb 7327 break;
600b828c 7328 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7329 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7330 break;
7331 default:
600b828c
PM
7332 /* Reserved op values were caught by the
7333 * neon_2rm_sizes[] check earlier.
7334 */
7335 abort();
9ee6e8bb 7336 }
600b828c 7337 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7338 tcg_gen_st_f32(cpu_F0s, cpu_env,
7339 neon_reg_offset(rd, pass));
9ee6e8bb 7340 } else {
dd8fbd78 7341 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7342 }
7343 }
7344 break;
7345 }
7346 } else if ((insn & (1 << 10)) == 0) {
7347 /* VTBL, VTBX. */
56907d77
PM
7348 int n = ((insn >> 8) & 3) + 1;
7349 if ((rn + n) > 32) {
7350 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7351 * helper function running off the end of the register file.
7352 */
7353 return 1;
7354 }
7355 n <<= 3;
9ee6e8bb 7356 if (insn & (1 << 6)) {
8f8e3aa4 7357 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7358 } else {
7d1b0095 7359 tmp = tcg_temp_new_i32();
8f8e3aa4 7360 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7361 }
8f8e3aa4 7362 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7363 tmp4 = tcg_const_i32(rn);
7364 tmp5 = tcg_const_i32(n);
9ef39277 7365 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7366 tcg_temp_free_i32(tmp);
9ee6e8bb 7367 if (insn & (1 << 6)) {
8f8e3aa4 7368 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7369 } else {
7d1b0095 7370 tmp = tcg_temp_new_i32();
8f8e3aa4 7371 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7372 }
8f8e3aa4 7373 tmp3 = neon_load_reg(rm, 1);
9ef39277 7374 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7375 tcg_temp_free_i32(tmp5);
7376 tcg_temp_free_i32(tmp4);
8f8e3aa4 7377 neon_store_reg(rd, 0, tmp2);
3018f259 7378 neon_store_reg(rd, 1, tmp3);
7d1b0095 7379 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7380 } else if ((insn & 0x380) == 0) {
7381 /* VDUP */
133da6aa
JR
7382 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7383 return 1;
7384 }
9ee6e8bb 7385 if (insn & (1 << 19)) {
dd8fbd78 7386 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7387 } else {
dd8fbd78 7388 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7389 }
7390 if (insn & (1 << 16)) {
dd8fbd78 7391 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7392 } else if (insn & (1 << 17)) {
7393 if ((insn >> 18) & 1)
dd8fbd78 7394 gen_neon_dup_high16(tmp);
9ee6e8bb 7395 else
dd8fbd78 7396 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7397 }
7398 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7399 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7400 tcg_gen_mov_i32(tmp2, tmp);
7401 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7402 }
7d1b0095 7403 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7404 } else {
7405 return 1;
7406 }
7407 }
7408 }
7409 return 0;
7410}
7411
7dcc1f89 7412static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7413{
4b6a83fb
PM
7414 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7415 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7416
7417 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7418
7419 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7420 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7421 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7422 return 1;
7423 }
d614a513 7424 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7425 return disas_iwmmxt_insn(s, insn);
d614a513 7426 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7427 return disas_dsp_insn(s, insn);
c0f4af17
PM
7428 }
7429 return 1;
4b6a83fb
PM
7430 }
7431
7432 /* Otherwise treat as a generic register access */
7433 is64 = (insn & (1 << 25)) == 0;
7434 if (!is64 && ((insn & (1 << 4)) == 0)) {
7435 /* cdp */
7436 return 1;
7437 }
7438
7439 crm = insn & 0xf;
7440 if (is64) {
7441 crn = 0;
7442 opc1 = (insn >> 4) & 0xf;
7443 opc2 = 0;
7444 rt2 = (insn >> 16) & 0xf;
7445 } else {
7446 crn = (insn >> 16) & 0xf;
7447 opc1 = (insn >> 21) & 7;
7448 opc2 = (insn >> 5) & 7;
7449 rt2 = 0;
7450 }
7451 isread = (insn >> 20) & 1;
7452 rt = (insn >> 12) & 0xf;
7453
60322b39 7454 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7455 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7456 if (ri) {
7457 /* Check access permissions */
dcbff19b 7458 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7459 return 1;
7460 }
7461
c0f4af17 7462 if (ri->accessfn ||
d614a513 7463 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7464 /* Emit code to perform further access permissions checks at
7465 * runtime; this may result in an exception.
c0f4af17
PM
7466 * Note that on XScale all cp0..c13 registers do an access check
7467 * call in order to handle c15_cpar.
f59df3f2
PM
7468 */
7469 TCGv_ptr tmpptr;
3f208fd7 7470 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7471 uint32_t syndrome;
7472
7473 /* Note that since we are an implementation which takes an
7474 * exception on a trapped conditional instruction only if the
7475 * instruction passes its condition code check, we can take
7476 * advantage of the clause in the ARM ARM that allows us to set
7477 * the COND field in the instruction to 0xE in all cases.
7478 * We could fish the actual condition out of the insn (ARM)
7479 * or the condexec bits (Thumb) but it isn't necessary.
7480 */
7481 switch (cpnum) {
7482 case 14:
7483 if (is64) {
7484 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7485 isread, false);
8bcbf37c
PM
7486 } else {
7487 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7488 rt, isread, false);
8bcbf37c
PM
7489 }
7490 break;
7491 case 15:
7492 if (is64) {
7493 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7494 isread, false);
8bcbf37c
PM
7495 } else {
7496 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7497 rt, isread, false);
8bcbf37c
PM
7498 }
7499 break;
7500 default:
7501 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7502 * so this can only happen if this is an ARMv7 or earlier CPU,
7503 * in which case the syndrome information won't actually be
7504 * guest visible.
7505 */
d614a513 7506 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7507 syndrome = syn_uncategorized();
7508 break;
7509 }
7510
43bfa4a1 7511 gen_set_condexec(s);
3977ee5d 7512 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7513 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7514 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7515 tcg_isread = tcg_const_i32(isread);
7516 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7517 tcg_isread);
f59df3f2 7518 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7519 tcg_temp_free_i32(tcg_syn);
3f208fd7 7520 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7521 }
7522
4b6a83fb
PM
7523 /* Handle special cases first */
7524 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7525 case ARM_CP_NOP:
7526 return 0;
7527 case ARM_CP_WFI:
7528 if (isread) {
7529 return 1;
7530 }
eaed129d 7531 gen_set_pc_im(s, s->pc);
4b6a83fb 7532 s->is_jmp = DISAS_WFI;
2bee5105 7533 return 0;
4b6a83fb
PM
7534 default:
7535 break;
7536 }
7537
bd79255d 7538 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7539 gen_io_start();
7540 }
7541
4b6a83fb
PM
7542 if (isread) {
7543 /* Read */
7544 if (is64) {
7545 TCGv_i64 tmp64;
7546 TCGv_i32 tmp;
7547 if (ri->type & ARM_CP_CONST) {
7548 tmp64 = tcg_const_i64(ri->resetvalue);
7549 } else if (ri->readfn) {
7550 TCGv_ptr tmpptr;
4b6a83fb
PM
7551 tmp64 = tcg_temp_new_i64();
7552 tmpptr = tcg_const_ptr(ri);
7553 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7554 tcg_temp_free_ptr(tmpptr);
7555 } else {
7556 tmp64 = tcg_temp_new_i64();
7557 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7558 }
7559 tmp = tcg_temp_new_i32();
ecc7b3aa 7560 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7561 store_reg(s, rt, tmp);
7562 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7563 tmp = tcg_temp_new_i32();
ecc7b3aa 7564 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7565 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7566 store_reg(s, rt2, tmp);
7567 } else {
39d5492a 7568 TCGv_i32 tmp;
4b6a83fb
PM
7569 if (ri->type & ARM_CP_CONST) {
7570 tmp = tcg_const_i32(ri->resetvalue);
7571 } else if (ri->readfn) {
7572 TCGv_ptr tmpptr;
4b6a83fb
PM
7573 tmp = tcg_temp_new_i32();
7574 tmpptr = tcg_const_ptr(ri);
7575 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7576 tcg_temp_free_ptr(tmpptr);
7577 } else {
7578 tmp = load_cpu_offset(ri->fieldoffset);
7579 }
7580 if (rt == 15) {
7581 /* Destination register of r15 for 32 bit loads sets
7582 * the condition codes from the high 4 bits of the value
7583 */
7584 gen_set_nzcv(tmp);
7585 tcg_temp_free_i32(tmp);
7586 } else {
7587 store_reg(s, rt, tmp);
7588 }
7589 }
7590 } else {
7591 /* Write */
7592 if (ri->type & ARM_CP_CONST) {
7593 /* If not forbidden by access permissions, treat as WI */
7594 return 0;
7595 }
7596
7597 if (is64) {
39d5492a 7598 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7599 TCGv_i64 tmp64 = tcg_temp_new_i64();
7600 tmplo = load_reg(s, rt);
7601 tmphi = load_reg(s, rt2);
7602 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7603 tcg_temp_free_i32(tmplo);
7604 tcg_temp_free_i32(tmphi);
7605 if (ri->writefn) {
7606 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7607 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7608 tcg_temp_free_ptr(tmpptr);
7609 } else {
7610 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7611 }
7612 tcg_temp_free_i64(tmp64);
7613 } else {
7614 if (ri->writefn) {
39d5492a 7615 TCGv_i32 tmp;
4b6a83fb 7616 TCGv_ptr tmpptr;
4b6a83fb
PM
7617 tmp = load_reg(s, rt);
7618 tmpptr = tcg_const_ptr(ri);
7619 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7620 tcg_temp_free_ptr(tmpptr);
7621 tcg_temp_free_i32(tmp);
7622 } else {
39d5492a 7623 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7624 store_cpu_offset(tmp, ri->fieldoffset);
7625 }
7626 }
2452731c
PM
7627 }
7628
bd79255d 7629 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7630 /* I/O operations must end the TB here (whether read or write) */
7631 gen_io_end();
7632 gen_lookup_tb(s);
7633 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7634 /* We default to ending the TB on a coprocessor register write,
7635 * but allow this to be suppressed by the register definition
7636 * (usually only necessary to work around guest bugs).
7637 */
2452731c 7638 gen_lookup_tb(s);
4b6a83fb 7639 }
2452731c 7640
4b6a83fb
PM
7641 return 0;
7642 }
7643
626187d8
PM
7644 /* Unknown register; this might be a guest error or a QEMU
7645 * unimplemented feature.
7646 */
7647 if (is64) {
7648 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7649 "64 bit system register cp:%d opc1: %d crm:%d "
7650 "(%s)\n",
7651 isread ? "read" : "write", cpnum, opc1, crm,
7652 s->ns ? "non-secure" : "secure");
626187d8
PM
7653 } else {
7654 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7655 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7656 "(%s)\n",
7657 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7658 s->ns ? "non-secure" : "secure");
626187d8
PM
7659 }
7660
4a9a539f 7661 return 1;
9ee6e8bb
PB
7662}
7663
5e3f878a
PB
7664
7665/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7666static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7667{
39d5492a 7668 TCGv_i32 tmp;
7d1b0095 7669 tmp = tcg_temp_new_i32();
ecc7b3aa 7670 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7671 store_reg(s, rlow, tmp);
7d1b0095 7672 tmp = tcg_temp_new_i32();
5e3f878a 7673 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7674 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7675 store_reg(s, rhigh, tmp);
7676}
7677
7678/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7679static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7680{
a7812ae4 7681 TCGv_i64 tmp;
39d5492a 7682 TCGv_i32 tmp2;
5e3f878a 7683
36aa55dc 7684 /* Load value and extend to 64 bits. */
a7812ae4 7685 tmp = tcg_temp_new_i64();
5e3f878a
PB
7686 tmp2 = load_reg(s, rlow);
7687 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7688 tcg_temp_free_i32(tmp2);
5e3f878a 7689 tcg_gen_add_i64(val, val, tmp);
b75263d6 7690 tcg_temp_free_i64(tmp);
5e3f878a
PB
7691}
7692
7693/* load and add a 64-bit value from a register pair. */
a7812ae4 7694static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7695{
a7812ae4 7696 TCGv_i64 tmp;
39d5492a
PM
7697 TCGv_i32 tmpl;
7698 TCGv_i32 tmph;
5e3f878a
PB
7699
7700 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7701 tmpl = load_reg(s, rlow);
7702 tmph = load_reg(s, rhigh);
a7812ae4 7703 tmp = tcg_temp_new_i64();
36aa55dc 7704 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7705 tcg_temp_free_i32(tmpl);
7706 tcg_temp_free_i32(tmph);
5e3f878a 7707 tcg_gen_add_i64(val, val, tmp);
b75263d6 7708 tcg_temp_free_i64(tmp);
5e3f878a
PB
7709}
7710
c9f10124 7711/* Set N and Z flags from hi|lo. */
39d5492a 7712static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7713{
c9f10124
RH
7714 tcg_gen_mov_i32(cpu_NF, hi);
7715 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7716}
7717
426f5abc
PB
7718/* Load/Store exclusive instructions are implemented by remembering
7719 the value/address loaded, and seeing if these are the same
b90372ad 7720 when the store is performed. This should be sufficient to implement
426f5abc
PB
7721 the architecturally mandated semantics, and avoids having to monitor
7722 regular stores.
7723
7724 In system emulation mode only one CPU will be running at once, so
7725 this sequence is effectively atomic. In user emulation mode we
7726 throw an exception and handle the atomic operation elsewhere. */
7727static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7728 TCGv_i32 addr, int size)
426f5abc 7729{
94ee24e7 7730 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7731
50225ad0
PM
7732 s->is_ldex = true;
7733
426f5abc
PB
7734 switch (size) {
7735 case 0:
12dcc321 7736 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7737 break;
7738 case 1:
12dcc321 7739 gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7740 break;
7741 case 2:
7742 case 3:
12dcc321 7743 gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7744 break;
7745 default:
7746 abort();
7747 }
03d05e2d 7748
426f5abc 7749 if (size == 3) {
39d5492a 7750 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7751 TCGv_i32 tmp3 = tcg_temp_new_i32();
7752
2c9adbda 7753 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7754 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7755 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7756 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7757 store_reg(s, rt2, tmp3);
7758 } else {
7759 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7760 }
03d05e2d
PM
7761
7762 store_reg(s, rt, tmp);
7763 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7764}
7765
7766static void gen_clrex(DisasContext *s)
7767{
03d05e2d 7768 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7769}
7770
7771#ifdef CONFIG_USER_ONLY
7772static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7773 TCGv_i32 addr, int size)
426f5abc 7774{
03d05e2d 7775 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7776 tcg_gen_movi_i32(cpu_exclusive_info,
7777 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7778 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7779}
7780#else
7781static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7782 TCGv_i32 addr, int size)
426f5abc 7783{
39d5492a 7784 TCGv_i32 tmp;
03d05e2d 7785 TCGv_i64 val64, extaddr;
42a268c2
RH
7786 TCGLabel *done_label;
7787 TCGLabel *fail_label;
426f5abc
PB
7788
7789 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7790 [addr] = {Rt};
7791 {Rd} = 0;
7792 } else {
7793 {Rd} = 1;
7794 } */
7795 fail_label = gen_new_label();
7796 done_label = gen_new_label();
03d05e2d
PM
7797 extaddr = tcg_temp_new_i64();
7798 tcg_gen_extu_i32_i64(extaddr, addr);
7799 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7800 tcg_temp_free_i64(extaddr);
7801
94ee24e7 7802 tmp = tcg_temp_new_i32();
426f5abc
PB
7803 switch (size) {
7804 case 0:
12dcc321 7805 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7806 break;
7807 case 1:
12dcc321 7808 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7809 break;
7810 case 2:
7811 case 3:
12dcc321 7812 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7813 break;
7814 default:
7815 abort();
7816 }
03d05e2d
PM
7817
7818 val64 = tcg_temp_new_i64();
426f5abc 7819 if (size == 3) {
39d5492a 7820 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7821 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7822 tcg_gen_addi_i32(tmp2, addr, 4);
12dcc321 7823 gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
7d1b0095 7824 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7825 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7826 tcg_temp_free_i32(tmp3);
7827 } else {
7828 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7829 }
03d05e2d
PM
7830 tcg_temp_free_i32(tmp);
7831
7832 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7833 tcg_temp_free_i64(val64);
7834
426f5abc
PB
7835 tmp = load_reg(s, rt);
7836 switch (size) {
7837 case 0:
12dcc321 7838 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7839 break;
7840 case 1:
12dcc321 7841 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7842 break;
7843 case 2:
7844 case 3:
12dcc321 7845 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
426f5abc
PB
7846 break;
7847 default:
7848 abort();
7849 }
94ee24e7 7850 tcg_temp_free_i32(tmp);
426f5abc
PB
7851 if (size == 3) {
7852 tcg_gen_addi_i32(addr, addr, 4);
7853 tmp = load_reg(s, rt2);
12dcc321 7854 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
94ee24e7 7855 tcg_temp_free_i32(tmp);
426f5abc
PB
7856 }
7857 tcg_gen_movi_i32(cpu_R[rd], 0);
7858 tcg_gen_br(done_label);
7859 gen_set_label(fail_label);
7860 tcg_gen_movi_i32(cpu_R[rd], 1);
7861 gen_set_label(done_label);
03d05e2d 7862 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7863}
7864#endif
7865
81465888
PM
7866/* gen_srs:
7867 * @env: CPUARMState
7868 * @s: DisasContext
7869 * @mode: mode field from insn (which stack to store to)
7870 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7871 * @writeback: true if writeback bit set
7872 *
7873 * Generate code for the SRS (Store Return State) insn.
7874 */
7875static void gen_srs(DisasContext *s,
7876 uint32_t mode, uint32_t amode, bool writeback)
7877{
7878 int32_t offset;
cbc0326b
PM
7879 TCGv_i32 addr, tmp;
7880 bool undef = false;
7881
7882 /* SRS is:
7883 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7884 * and specified mode is monitor mode
cbc0326b
PM
7885 * - UNDEFINED in Hyp mode
7886 * - UNPREDICTABLE in User or System mode
7887 * - UNPREDICTABLE if the specified mode is:
7888 * -- not implemented
7889 * -- not a valid mode number
7890 * -- a mode that's at a higher exception level
7891 * -- Monitor, if we are Non-secure
f01377f5 7892 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7893 */
ba63cf47 7894 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7895 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7896 return;
7897 }
7898
7899 if (s->current_el == 0 || s->current_el == 2) {
7900 undef = true;
7901 }
7902
7903 switch (mode) {
7904 case ARM_CPU_MODE_USR:
7905 case ARM_CPU_MODE_FIQ:
7906 case ARM_CPU_MODE_IRQ:
7907 case ARM_CPU_MODE_SVC:
7908 case ARM_CPU_MODE_ABT:
7909 case ARM_CPU_MODE_UND:
7910 case ARM_CPU_MODE_SYS:
7911 break;
7912 case ARM_CPU_MODE_HYP:
7913 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7914 undef = true;
7915 }
7916 break;
7917 case ARM_CPU_MODE_MON:
7918 /* No need to check specifically for "are we non-secure" because
7919 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7920 * so if this isn't EL3 then we must be non-secure.
7921 */
7922 if (s->current_el != 3) {
7923 undef = true;
7924 }
7925 break;
7926 default:
7927 undef = true;
7928 }
7929
7930 if (undef) {
7931 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7932 default_exception_el(s));
7933 return;
7934 }
7935
7936 addr = tcg_temp_new_i32();
7937 tmp = tcg_const_i32(mode);
f01377f5
PM
7938 /* get_r13_banked() will raise an exception if called from System mode */
7939 gen_set_condexec(s);
7940 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7941 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7942 tcg_temp_free_i32(tmp);
7943 switch (amode) {
7944 case 0: /* DA */
7945 offset = -4;
7946 break;
7947 case 1: /* IA */
7948 offset = 0;
7949 break;
7950 case 2: /* DB */
7951 offset = -8;
7952 break;
7953 case 3: /* IB */
7954 offset = 4;
7955 break;
7956 default:
7957 abort();
7958 }
7959 tcg_gen_addi_i32(addr, addr, offset);
7960 tmp = load_reg(s, 14);
12dcc321 7961 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7962 tcg_temp_free_i32(tmp);
81465888
PM
7963 tmp = load_cpu_field(spsr);
7964 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7965 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7966 tcg_temp_free_i32(tmp);
81465888
PM
7967 if (writeback) {
7968 switch (amode) {
7969 case 0:
7970 offset = -8;
7971 break;
7972 case 1:
7973 offset = 4;
7974 break;
7975 case 2:
7976 offset = -4;
7977 break;
7978 case 3:
7979 offset = 0;
7980 break;
7981 default:
7982 abort();
7983 }
7984 tcg_gen_addi_i32(addr, addr, offset);
7985 tmp = tcg_const_i32(mode);
7986 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7987 tcg_temp_free_i32(tmp);
7988 }
7989 tcg_temp_free_i32(addr);
f01377f5 7990 s->is_jmp = DISAS_UPDATE;
81465888
PM
7991}
7992
f4df2210 7993static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7994{
f4df2210 7995 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7996 TCGv_i32 tmp;
7997 TCGv_i32 tmp2;
7998 TCGv_i32 tmp3;
7999 TCGv_i32 addr;
a7812ae4 8000 TCGv_i64 tmp64;
9ee6e8bb 8001
9ee6e8bb 8002 /* M variants do not implement ARM mode. */
b53d8923 8003 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 8004 goto illegal_op;
b53d8923 8005 }
9ee6e8bb
PB
8006 cond = insn >> 28;
8007 if (cond == 0xf){
be5e7a76
DES
8008 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8009 * choose to UNDEF. In ARMv5 and above the space is used
8010 * for miscellaneous unconditional instructions.
8011 */
8012 ARCH(5);
8013
9ee6e8bb
PB
8014 /* Unconditional instructions. */
8015 if (((insn >> 25) & 7) == 1) {
8016 /* NEON Data processing. */
d614a513 8017 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8018 goto illegal_op;
d614a513 8019 }
9ee6e8bb 8020
7dcc1f89 8021 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8022 goto illegal_op;
7dcc1f89 8023 }
9ee6e8bb
PB
8024 return;
8025 }
8026 if ((insn & 0x0f100000) == 0x04000000) {
8027 /* NEON load/store. */
d614a513 8028 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8029 goto illegal_op;
d614a513 8030 }
9ee6e8bb 8031
7dcc1f89 8032 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8033 goto illegal_op;
7dcc1f89 8034 }
9ee6e8bb
PB
8035 return;
8036 }
6a57f3eb
WN
8037 if ((insn & 0x0f000e10) == 0x0e000a00) {
8038 /* VFP. */
7dcc1f89 8039 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8040 goto illegal_op;
8041 }
8042 return;
8043 }
3d185e5d
PM
8044 if (((insn & 0x0f30f000) == 0x0510f000) ||
8045 ((insn & 0x0f30f010) == 0x0710f000)) {
8046 if ((insn & (1 << 22)) == 0) {
8047 /* PLDW; v7MP */
d614a513 8048 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8049 goto illegal_op;
8050 }
8051 }
8052 /* Otherwise PLD; v5TE+ */
be5e7a76 8053 ARCH(5TE);
3d185e5d
PM
8054 return;
8055 }
8056 if (((insn & 0x0f70f000) == 0x0450f000) ||
8057 ((insn & 0x0f70f010) == 0x0650f000)) {
8058 ARCH(7);
8059 return; /* PLI; V7 */
8060 }
8061 if (((insn & 0x0f700000) == 0x04100000) ||
8062 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8063 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8064 goto illegal_op;
8065 }
8066 return; /* v7MP: Unallocated memory hint: must NOP */
8067 }
8068
8069 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8070 ARCH(6);
8071 /* setend */
9886ecdf
PB
8072 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8073 gen_helper_setend(cpu_env);
8074 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8075 }
8076 return;
8077 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8078 switch ((insn >> 4) & 0xf) {
8079 case 1: /* clrex */
8080 ARCH(6K);
426f5abc 8081 gen_clrex(s);
9ee6e8bb
PB
8082 return;
8083 case 4: /* dsb */
8084 case 5: /* dmb */
9ee6e8bb
PB
8085 ARCH(7);
8086 /* We don't emulate caches so these are a no-op. */
8087 return;
6df99dec
SS
8088 case 6: /* isb */
8089 /* We need to break the TB after this insn to execute
8090 * self-modifying code correctly and also to take
8091 * any pending interrupts immediately.
8092 */
8093 gen_lookup_tb(s);
8094 return;
9ee6e8bb
PB
8095 default:
8096 goto illegal_op;
8097 }
8098 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8099 /* srs */
81465888
PM
8100 ARCH(6);
8101 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8102 return;
ea825eee 8103 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8104 /* rfe */
c67b6b71 8105 int32_t offset;
9ee6e8bb
PB
8106 if (IS_USER(s))
8107 goto illegal_op;
8108 ARCH(6);
8109 rn = (insn >> 16) & 0xf;
b0109805 8110 addr = load_reg(s, rn);
9ee6e8bb
PB
8111 i = (insn >> 23) & 3;
8112 switch (i) {
b0109805 8113 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8114 case 1: offset = 0; break; /* IA */
8115 case 2: offset = -8; break; /* DB */
b0109805 8116 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8117 default: abort();
8118 }
8119 if (offset)
b0109805
PB
8120 tcg_gen_addi_i32(addr, addr, offset);
8121 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8122 tmp = tcg_temp_new_i32();
12dcc321 8123 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8124 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8125 tmp2 = tcg_temp_new_i32();
12dcc321 8126 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8127 if (insn & (1 << 21)) {
8128 /* Base writeback. */
8129 switch (i) {
b0109805 8130 case 0: offset = -8; break;
c67b6b71
FN
8131 case 1: offset = 4; break;
8132 case 2: offset = -4; break;
b0109805 8133 case 3: offset = 0; break;
9ee6e8bb
PB
8134 default: abort();
8135 }
8136 if (offset)
b0109805
PB
8137 tcg_gen_addi_i32(addr, addr, offset);
8138 store_reg(s, rn, addr);
8139 } else {
7d1b0095 8140 tcg_temp_free_i32(addr);
9ee6e8bb 8141 }
b0109805 8142 gen_rfe(s, tmp, tmp2);
c67b6b71 8143 return;
9ee6e8bb
PB
8144 } else if ((insn & 0x0e000000) == 0x0a000000) {
8145 /* branch link and change to thumb (blx <offset>) */
8146 int32_t offset;
8147
8148 val = (uint32_t)s->pc;
7d1b0095 8149 tmp = tcg_temp_new_i32();
d9ba4830
PB
8150 tcg_gen_movi_i32(tmp, val);
8151 store_reg(s, 14, tmp);
9ee6e8bb
PB
8152 /* Sign-extend the 24-bit offset */
8153 offset = (((int32_t)insn) << 8) >> 8;
8154 /* offset * 4 + bit24 * 2 + (thumb bit) */
8155 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8156 /* pipeline offset */
8157 val += 4;
be5e7a76 8158 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8159 gen_bx_im(s, val);
9ee6e8bb
PB
8160 return;
8161 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8162 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8163 /* iWMMXt register transfer. */
c0f4af17 8164 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8165 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8166 return;
c0f4af17
PM
8167 }
8168 }
9ee6e8bb
PB
8169 }
8170 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8171 /* Coprocessor double register transfer. */
be5e7a76 8172 ARCH(5TE);
9ee6e8bb
PB
8173 } else if ((insn & 0x0f000010) == 0x0e000010) {
8174 /* Additional coprocessor register transfer. */
7997d92f 8175 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8176 uint32_t mask;
8177 uint32_t val;
8178 /* cps (privileged) */
8179 if (IS_USER(s))
8180 return;
8181 mask = val = 0;
8182 if (insn & (1 << 19)) {
8183 if (insn & (1 << 8))
8184 mask |= CPSR_A;
8185 if (insn & (1 << 7))
8186 mask |= CPSR_I;
8187 if (insn & (1 << 6))
8188 mask |= CPSR_F;
8189 if (insn & (1 << 18))
8190 val |= mask;
8191 }
7997d92f 8192 if (insn & (1 << 17)) {
9ee6e8bb
PB
8193 mask |= CPSR_M;
8194 val |= (insn & 0x1f);
8195 }
8196 if (mask) {
2fbac54b 8197 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8198 }
8199 return;
8200 }
8201 goto illegal_op;
8202 }
8203 if (cond != 0xe) {
8204 /* if not always execute, we generate a conditional jump to
8205 next instruction */
8206 s->condlabel = gen_new_label();
39fb730a 8207 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8208 s->condjmp = 1;
8209 }
8210 if ((insn & 0x0f900000) == 0x03000000) {
8211 if ((insn & (1 << 21)) == 0) {
8212 ARCH(6T2);
8213 rd = (insn >> 12) & 0xf;
8214 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8215 if ((insn & (1 << 22)) == 0) {
8216 /* MOVW */
7d1b0095 8217 tmp = tcg_temp_new_i32();
5e3f878a 8218 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8219 } else {
8220 /* MOVT */
5e3f878a 8221 tmp = load_reg(s, rd);
86831435 8222 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8223 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8224 }
5e3f878a 8225 store_reg(s, rd, tmp);
9ee6e8bb
PB
8226 } else {
8227 if (((insn >> 12) & 0xf) != 0xf)
8228 goto illegal_op;
8229 if (((insn >> 16) & 0xf) == 0) {
8230 gen_nop_hint(s, insn & 0xff);
8231 } else {
8232 /* CPSR = immediate */
8233 val = insn & 0xff;
8234 shift = ((insn >> 8) & 0xf) * 2;
8235 if (shift)
8236 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8237 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8238 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8239 i, val)) {
9ee6e8bb 8240 goto illegal_op;
7dcc1f89 8241 }
9ee6e8bb
PB
8242 }
8243 }
8244 } else if ((insn & 0x0f900000) == 0x01000000
8245 && (insn & 0x00000090) != 0x00000090) {
8246 /* miscellaneous instructions */
8247 op1 = (insn >> 21) & 3;
8248 sh = (insn >> 4) & 0xf;
8249 rm = insn & 0xf;
8250 switch (sh) {
8bfd0550
PM
8251 case 0x0: /* MSR, MRS */
8252 if (insn & (1 << 9)) {
8253 /* MSR (banked) and MRS (banked) */
8254 int sysm = extract32(insn, 16, 4) |
8255 (extract32(insn, 8, 1) << 4);
8256 int r = extract32(insn, 22, 1);
8257
8258 if (op1 & 1) {
8259 /* MSR (banked) */
8260 gen_msr_banked(s, r, sysm, rm);
8261 } else {
8262 /* MRS (banked) */
8263 int rd = extract32(insn, 12, 4);
8264
8265 gen_mrs_banked(s, r, sysm, rd);
8266 }
8267 break;
8268 }
8269
8270 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8271 if (op1 & 1) {
8272 /* PSR = reg */
2fbac54b 8273 tmp = load_reg(s, rm);
9ee6e8bb 8274 i = ((op1 & 2) != 0);
7dcc1f89 8275 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8276 goto illegal_op;
8277 } else {
8278 /* reg = PSR */
8279 rd = (insn >> 12) & 0xf;
8280 if (op1 & 2) {
8281 if (IS_USER(s))
8282 goto illegal_op;
d9ba4830 8283 tmp = load_cpu_field(spsr);
9ee6e8bb 8284 } else {
7d1b0095 8285 tmp = tcg_temp_new_i32();
9ef39277 8286 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8287 }
d9ba4830 8288 store_reg(s, rd, tmp);
9ee6e8bb
PB
8289 }
8290 break;
8291 case 0x1:
8292 if (op1 == 1) {
8293 /* branch/exchange thumb (bx). */
be5e7a76 8294 ARCH(4T);
d9ba4830
PB
8295 tmp = load_reg(s, rm);
8296 gen_bx(s, tmp);
9ee6e8bb
PB
8297 } else if (op1 == 3) {
8298 /* clz */
be5e7a76 8299 ARCH(5);
9ee6e8bb 8300 rd = (insn >> 12) & 0xf;
1497c961
PB
8301 tmp = load_reg(s, rm);
8302 gen_helper_clz(tmp, tmp);
8303 store_reg(s, rd, tmp);
9ee6e8bb
PB
8304 } else {
8305 goto illegal_op;
8306 }
8307 break;
8308 case 0x2:
8309 if (op1 == 1) {
8310 ARCH(5J); /* bxj */
8311 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8312 tmp = load_reg(s, rm);
8313 gen_bx(s, tmp);
9ee6e8bb
PB
8314 } else {
8315 goto illegal_op;
8316 }
8317 break;
8318 case 0x3:
8319 if (op1 != 1)
8320 goto illegal_op;
8321
be5e7a76 8322 ARCH(5);
9ee6e8bb 8323 /* branch link/exchange thumb (blx) */
d9ba4830 8324 tmp = load_reg(s, rm);
7d1b0095 8325 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8326 tcg_gen_movi_i32(tmp2, s->pc);
8327 store_reg(s, 14, tmp2);
8328 gen_bx(s, tmp);
9ee6e8bb 8329 break;
eb0ecd5a
WN
8330 case 0x4:
8331 {
8332 /* crc32/crc32c */
8333 uint32_t c = extract32(insn, 8, 4);
8334
8335 /* Check this CPU supports ARMv8 CRC instructions.
8336 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8337 * Bits 8, 10 and 11 should be zero.
8338 */
d614a513 8339 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8340 (c & 0xd) != 0) {
8341 goto illegal_op;
8342 }
8343
8344 rn = extract32(insn, 16, 4);
8345 rd = extract32(insn, 12, 4);
8346
8347 tmp = load_reg(s, rn);
8348 tmp2 = load_reg(s, rm);
aa633469
PM
8349 if (op1 == 0) {
8350 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8351 } else if (op1 == 1) {
8352 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8353 }
eb0ecd5a
WN
8354 tmp3 = tcg_const_i32(1 << op1);
8355 if (c & 0x2) {
8356 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8357 } else {
8358 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8359 }
8360 tcg_temp_free_i32(tmp2);
8361 tcg_temp_free_i32(tmp3);
8362 store_reg(s, rd, tmp);
8363 break;
8364 }
9ee6e8bb 8365 case 0x5: /* saturating add/subtract */
be5e7a76 8366 ARCH(5TE);
9ee6e8bb
PB
8367 rd = (insn >> 12) & 0xf;
8368 rn = (insn >> 16) & 0xf;
b40d0353 8369 tmp = load_reg(s, rm);
5e3f878a 8370 tmp2 = load_reg(s, rn);
9ee6e8bb 8371 if (op1 & 2)
9ef39277 8372 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8373 if (op1 & 1)
9ef39277 8374 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8375 else
9ef39277 8376 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8377 tcg_temp_free_i32(tmp2);
5e3f878a 8378 store_reg(s, rd, tmp);
9ee6e8bb 8379 break;
49e14940 8380 case 7:
d4a2dc67
PM
8381 {
8382 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
8383 switch (op1) {
8384 case 1:
8385 /* bkpt */
8386 ARCH(5);
8387 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8388 syn_aa32_bkpt(imm16, false),
8389 default_exception_el(s));
37e6456e
PM
8390 break;
8391 case 2:
8392 /* Hypervisor call (v7) */
8393 ARCH(7);
8394 if (IS_USER(s)) {
8395 goto illegal_op;
8396 }
8397 gen_hvc(s, imm16);
8398 break;
8399 case 3:
8400 /* Secure monitor call (v6+) */
8401 ARCH(6K);
8402 if (IS_USER(s)) {
8403 goto illegal_op;
8404 }
8405 gen_smc(s);
8406 break;
8407 default:
49e14940
AL
8408 goto illegal_op;
8409 }
9ee6e8bb 8410 break;
d4a2dc67 8411 }
9ee6e8bb
PB
8412 case 0x8: /* signed multiply */
8413 case 0xa:
8414 case 0xc:
8415 case 0xe:
be5e7a76 8416 ARCH(5TE);
9ee6e8bb
PB
8417 rs = (insn >> 8) & 0xf;
8418 rn = (insn >> 12) & 0xf;
8419 rd = (insn >> 16) & 0xf;
8420 if (op1 == 1) {
8421 /* (32 * 16) >> 16 */
5e3f878a
PB
8422 tmp = load_reg(s, rm);
8423 tmp2 = load_reg(s, rs);
9ee6e8bb 8424 if (sh & 4)
5e3f878a 8425 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8426 else
5e3f878a 8427 gen_sxth(tmp2);
a7812ae4
PB
8428 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8429 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8430 tmp = tcg_temp_new_i32();
ecc7b3aa 8431 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8432 tcg_temp_free_i64(tmp64);
9ee6e8bb 8433 if ((sh & 2) == 0) {
5e3f878a 8434 tmp2 = load_reg(s, rn);
9ef39277 8435 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8436 tcg_temp_free_i32(tmp2);
9ee6e8bb 8437 }
5e3f878a 8438 store_reg(s, rd, tmp);
9ee6e8bb
PB
8439 } else {
8440 /* 16 * 16 */
5e3f878a
PB
8441 tmp = load_reg(s, rm);
8442 tmp2 = load_reg(s, rs);
8443 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8444 tcg_temp_free_i32(tmp2);
9ee6e8bb 8445 if (op1 == 2) {
a7812ae4
PB
8446 tmp64 = tcg_temp_new_i64();
8447 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8448 tcg_temp_free_i32(tmp);
a7812ae4
PB
8449 gen_addq(s, tmp64, rn, rd);
8450 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8451 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8452 } else {
8453 if (op1 == 0) {
5e3f878a 8454 tmp2 = load_reg(s, rn);
9ef39277 8455 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8456 tcg_temp_free_i32(tmp2);
9ee6e8bb 8457 }
5e3f878a 8458 store_reg(s, rd, tmp);
9ee6e8bb
PB
8459 }
8460 }
8461 break;
8462 default:
8463 goto illegal_op;
8464 }
8465 } else if (((insn & 0x0e000000) == 0 &&
8466 (insn & 0x00000090) != 0x90) ||
8467 ((insn & 0x0e000000) == (1 << 25))) {
8468 int set_cc, logic_cc, shiftop;
8469
8470 op1 = (insn >> 21) & 0xf;
8471 set_cc = (insn >> 20) & 1;
8472 logic_cc = table_logic_cc[op1] & set_cc;
8473
8474 /* data processing instruction */
8475 if (insn & (1 << 25)) {
8476 /* immediate operand */
8477 val = insn & 0xff;
8478 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8479 if (shift) {
9ee6e8bb 8480 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8481 }
7d1b0095 8482 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8483 tcg_gen_movi_i32(tmp2, val);
8484 if (logic_cc && shift) {
8485 gen_set_CF_bit31(tmp2);
8486 }
9ee6e8bb
PB
8487 } else {
8488 /* register */
8489 rm = (insn) & 0xf;
e9bb4aa9 8490 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8491 shiftop = (insn >> 5) & 3;
8492 if (!(insn & (1 << 4))) {
8493 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8494 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8495 } else {
8496 rs = (insn >> 8) & 0xf;
8984bd2e 8497 tmp = load_reg(s, rs);
e9bb4aa9 8498 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8499 }
8500 }
8501 if (op1 != 0x0f && op1 != 0x0d) {
8502 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8503 tmp = load_reg(s, rn);
8504 } else {
39d5492a 8505 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8506 }
8507 rd = (insn >> 12) & 0xf;
8508 switch(op1) {
8509 case 0x00:
e9bb4aa9
JR
8510 tcg_gen_and_i32(tmp, tmp, tmp2);
8511 if (logic_cc) {
8512 gen_logic_CC(tmp);
8513 }
7dcc1f89 8514 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8515 break;
8516 case 0x01:
e9bb4aa9
JR
8517 tcg_gen_xor_i32(tmp, tmp, tmp2);
8518 if (logic_cc) {
8519 gen_logic_CC(tmp);
8520 }
7dcc1f89 8521 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8522 break;
8523 case 0x02:
8524 if (set_cc && rd == 15) {
8525 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8526 if (IS_USER(s)) {
9ee6e8bb 8527 goto illegal_op;
e9bb4aa9 8528 }
72485ec4 8529 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8530 gen_exception_return(s, tmp);
9ee6e8bb 8531 } else {
e9bb4aa9 8532 if (set_cc) {
72485ec4 8533 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8534 } else {
8535 tcg_gen_sub_i32(tmp, tmp, tmp2);
8536 }
7dcc1f89 8537 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8538 }
8539 break;
8540 case 0x03:
e9bb4aa9 8541 if (set_cc) {
72485ec4 8542 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8543 } else {
8544 tcg_gen_sub_i32(tmp, tmp2, tmp);
8545 }
7dcc1f89 8546 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8547 break;
8548 case 0x04:
e9bb4aa9 8549 if (set_cc) {
72485ec4 8550 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8551 } else {
8552 tcg_gen_add_i32(tmp, tmp, tmp2);
8553 }
7dcc1f89 8554 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8555 break;
8556 case 0x05:
e9bb4aa9 8557 if (set_cc) {
49b4c31e 8558 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8559 } else {
8560 gen_add_carry(tmp, tmp, tmp2);
8561 }
7dcc1f89 8562 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8563 break;
8564 case 0x06:
e9bb4aa9 8565 if (set_cc) {
2de68a49 8566 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8567 } else {
8568 gen_sub_carry(tmp, tmp, tmp2);
8569 }
7dcc1f89 8570 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8571 break;
8572 case 0x07:
e9bb4aa9 8573 if (set_cc) {
2de68a49 8574 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8575 } else {
8576 gen_sub_carry(tmp, tmp2, tmp);
8577 }
7dcc1f89 8578 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8579 break;
8580 case 0x08:
8581 if (set_cc) {
e9bb4aa9
JR
8582 tcg_gen_and_i32(tmp, tmp, tmp2);
8583 gen_logic_CC(tmp);
9ee6e8bb 8584 }
7d1b0095 8585 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8586 break;
8587 case 0x09:
8588 if (set_cc) {
e9bb4aa9
JR
8589 tcg_gen_xor_i32(tmp, tmp, tmp2);
8590 gen_logic_CC(tmp);
9ee6e8bb 8591 }
7d1b0095 8592 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8593 break;
8594 case 0x0a:
8595 if (set_cc) {
72485ec4 8596 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8597 }
7d1b0095 8598 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8599 break;
8600 case 0x0b:
8601 if (set_cc) {
72485ec4 8602 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8603 }
7d1b0095 8604 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8605 break;
8606 case 0x0c:
e9bb4aa9
JR
8607 tcg_gen_or_i32(tmp, tmp, tmp2);
8608 if (logic_cc) {
8609 gen_logic_CC(tmp);
8610 }
7dcc1f89 8611 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8612 break;
8613 case 0x0d:
8614 if (logic_cc && rd == 15) {
8615 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8616 if (IS_USER(s)) {
9ee6e8bb 8617 goto illegal_op;
e9bb4aa9
JR
8618 }
8619 gen_exception_return(s, tmp2);
9ee6e8bb 8620 } else {
e9bb4aa9
JR
8621 if (logic_cc) {
8622 gen_logic_CC(tmp2);
8623 }
7dcc1f89 8624 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8625 }
8626 break;
8627 case 0x0e:
f669df27 8628 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8629 if (logic_cc) {
8630 gen_logic_CC(tmp);
8631 }
7dcc1f89 8632 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8633 break;
8634 default:
8635 case 0x0f:
e9bb4aa9
JR
8636 tcg_gen_not_i32(tmp2, tmp2);
8637 if (logic_cc) {
8638 gen_logic_CC(tmp2);
8639 }
7dcc1f89 8640 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8641 break;
8642 }
e9bb4aa9 8643 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8644 tcg_temp_free_i32(tmp2);
e9bb4aa9 8645 }
9ee6e8bb
PB
8646 } else {
8647 /* other instructions */
8648 op1 = (insn >> 24) & 0xf;
8649 switch(op1) {
8650 case 0x0:
8651 case 0x1:
8652 /* multiplies, extra load/stores */
8653 sh = (insn >> 5) & 3;
8654 if (sh == 0) {
8655 if (op1 == 0x0) {
8656 rd = (insn >> 16) & 0xf;
8657 rn = (insn >> 12) & 0xf;
8658 rs = (insn >> 8) & 0xf;
8659 rm = (insn) & 0xf;
8660 op1 = (insn >> 20) & 0xf;
8661 switch (op1) {
8662 case 0: case 1: case 2: case 3: case 6:
8663 /* 32 bit mul */
5e3f878a
PB
8664 tmp = load_reg(s, rs);
8665 tmp2 = load_reg(s, rm);
8666 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8667 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8668 if (insn & (1 << 22)) {
8669 /* Subtract (mls) */
8670 ARCH(6T2);
5e3f878a
PB
8671 tmp2 = load_reg(s, rn);
8672 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8673 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8674 } else if (insn & (1 << 21)) {
8675 /* Add */
5e3f878a
PB
8676 tmp2 = load_reg(s, rn);
8677 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8678 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8679 }
8680 if (insn & (1 << 20))
5e3f878a
PB
8681 gen_logic_CC(tmp);
8682 store_reg(s, rd, tmp);
9ee6e8bb 8683 break;
8aac08b1
AJ
8684 case 4:
8685 /* 64 bit mul double accumulate (UMAAL) */
8686 ARCH(6);
8687 tmp = load_reg(s, rs);
8688 tmp2 = load_reg(s, rm);
8689 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8690 gen_addq_lo(s, tmp64, rn);
8691 gen_addq_lo(s, tmp64, rd);
8692 gen_storeq_reg(s, rn, rd, tmp64);
8693 tcg_temp_free_i64(tmp64);
8694 break;
8695 case 8: case 9: case 10: case 11:
8696 case 12: case 13: case 14: case 15:
8697 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8698 tmp = load_reg(s, rs);
8699 tmp2 = load_reg(s, rm);
8aac08b1 8700 if (insn & (1 << 22)) {
c9f10124 8701 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8702 } else {
c9f10124 8703 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8704 }
8705 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8706 TCGv_i32 al = load_reg(s, rn);
8707 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8708 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8709 tcg_temp_free_i32(al);
8710 tcg_temp_free_i32(ah);
9ee6e8bb 8711 }
8aac08b1 8712 if (insn & (1 << 20)) {
c9f10124 8713 gen_logicq_cc(tmp, tmp2);
8aac08b1 8714 }
c9f10124
RH
8715 store_reg(s, rn, tmp);
8716 store_reg(s, rd, tmp2);
9ee6e8bb 8717 break;
8aac08b1
AJ
8718 default:
8719 goto illegal_op;
9ee6e8bb
PB
8720 }
8721 } else {
8722 rn = (insn >> 16) & 0xf;
8723 rd = (insn >> 12) & 0xf;
8724 if (insn & (1 << 23)) {
8725 /* load/store exclusive */
2359bf80 8726 int op2 = (insn >> 8) & 3;
86753403 8727 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8728
8729 switch (op2) {
8730 case 0: /* lda/stl */
8731 if (op1 == 1) {
8732 goto illegal_op;
8733 }
8734 ARCH(8);
8735 break;
8736 case 1: /* reserved */
8737 goto illegal_op;
8738 case 2: /* ldaex/stlex */
8739 ARCH(8);
8740 break;
8741 case 3: /* ldrex/strex */
8742 if (op1) {
8743 ARCH(6K);
8744 } else {
8745 ARCH(6);
8746 }
8747 break;
8748 }
8749
3174f8e9 8750 addr = tcg_temp_local_new_i32();
98a46317 8751 load_reg_var(s, addr, rn);
2359bf80
MR
8752
8753 /* Since the emulation does not have barriers,
8754 the acquire/release semantics need no special
8755 handling */
8756 if (op2 == 0) {
8757 if (insn & (1 << 20)) {
8758 tmp = tcg_temp_new_i32();
8759 switch (op1) {
8760 case 0: /* lda */
12dcc321
PB
8761 gen_aa32_ld32u(s, tmp, addr,
8762 get_mem_index(s));
2359bf80
MR
8763 break;
8764 case 2: /* ldab */
12dcc321
PB
8765 gen_aa32_ld8u(s, tmp, addr,
8766 get_mem_index(s));
2359bf80
MR
8767 break;
8768 case 3: /* ldah */
12dcc321
PB
8769 gen_aa32_ld16u(s, tmp, addr,
8770 get_mem_index(s));
2359bf80
MR
8771 break;
8772 default:
8773 abort();
8774 }
8775 store_reg(s, rd, tmp);
8776 } else {
8777 rm = insn & 0xf;
8778 tmp = load_reg(s, rm);
8779 switch (op1) {
8780 case 0: /* stl */
12dcc321
PB
8781 gen_aa32_st32(s, tmp, addr,
8782 get_mem_index(s));
2359bf80
MR
8783 break;
8784 case 2: /* stlb */
12dcc321
PB
8785 gen_aa32_st8(s, tmp, addr,
8786 get_mem_index(s));
2359bf80
MR
8787 break;
8788 case 3: /* stlh */
12dcc321
PB
8789 gen_aa32_st16(s, tmp, addr,
8790 get_mem_index(s));
2359bf80
MR
8791 break;
8792 default:
8793 abort();
8794 }
8795 tcg_temp_free_i32(tmp);
8796 }
8797 } else if (insn & (1 << 20)) {
86753403
PB
8798 switch (op1) {
8799 case 0: /* ldrex */
426f5abc 8800 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8801 break;
8802 case 1: /* ldrexd */
426f5abc 8803 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8804 break;
8805 case 2: /* ldrexb */
426f5abc 8806 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8807 break;
8808 case 3: /* ldrexh */
426f5abc 8809 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8810 break;
8811 default:
8812 abort();
8813 }
9ee6e8bb
PB
8814 } else {
8815 rm = insn & 0xf;
86753403
PB
8816 switch (op1) {
8817 case 0: /* strex */
426f5abc 8818 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8819 break;
8820 case 1: /* strexd */
502e64fe 8821 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8822 break;
8823 case 2: /* strexb */
426f5abc 8824 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8825 break;
8826 case 3: /* strexh */
426f5abc 8827 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8828 break;
8829 default:
8830 abort();
8831 }
9ee6e8bb 8832 }
39d5492a 8833 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8834 } else {
8835 /* SWP instruction */
8836 rm = (insn) & 0xf;
8837
8984bd2e
PB
8838 /* ??? This is not really atomic. However we know
8839 we never have multiple CPUs running in parallel,
8840 so it is good enough. */
8841 addr = load_reg(s, rn);
8842 tmp = load_reg(s, rm);
5a839c0d 8843 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8844 if (insn & (1 << 22)) {
12dcc321
PB
8845 gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
8846 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8847 } else {
12dcc321
PB
8848 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
8849 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8850 }
5a839c0d 8851 tcg_temp_free_i32(tmp);
7d1b0095 8852 tcg_temp_free_i32(addr);
8984bd2e 8853 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8854 }
8855 }
8856 } else {
8857 int address_offset;
3960c336
PM
8858 bool load = insn & (1 << 20);
8859 bool doubleword = false;
9ee6e8bb
PB
8860 /* Misc load/store */
8861 rn = (insn >> 16) & 0xf;
8862 rd = (insn >> 12) & 0xf;
3960c336
PM
8863
8864 if (!load && (sh & 2)) {
8865 /* doubleword */
8866 ARCH(5TE);
8867 if (rd & 1) {
8868 /* UNPREDICTABLE; we choose to UNDEF */
8869 goto illegal_op;
8870 }
8871 load = (sh & 1) == 0;
8872 doubleword = true;
8873 }
8874
b0109805 8875 addr = load_reg(s, rn);
9ee6e8bb 8876 if (insn & (1 << 24))
b0109805 8877 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb 8878 address_offset = 0;
3960c336
PM
8879
8880 if (doubleword) {
8881 if (!load) {
9ee6e8bb 8882 /* store */
b0109805 8883 tmp = load_reg(s, rd);
12dcc321 8884 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8885 tcg_temp_free_i32(tmp);
b0109805
PB
8886 tcg_gen_addi_i32(addr, addr, 4);
8887 tmp = load_reg(s, rd + 1);
12dcc321 8888 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8889 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8890 } else {
8891 /* load */
5a839c0d 8892 tmp = tcg_temp_new_i32();
12dcc321 8893 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8894 store_reg(s, rd, tmp);
8895 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8896 tmp = tcg_temp_new_i32();
12dcc321 8897 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8898 rd++;
9ee6e8bb
PB
8899 }
8900 address_offset = -4;
3960c336
PM
8901 } else if (load) {
8902 /* load */
8903 tmp = tcg_temp_new_i32();
8904 switch (sh) {
8905 case 1:
12dcc321 8906 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
3960c336
PM
8907 break;
8908 case 2:
12dcc321 8909 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8910 break;
8911 default:
8912 case 3:
12dcc321 8913 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
3960c336
PM
8914 break;
8915 }
9ee6e8bb
PB
8916 } else {
8917 /* store */
b0109805 8918 tmp = load_reg(s, rd);
12dcc321 8919 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
5a839c0d 8920 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8921 }
8922 /* Perform base writeback before the loaded value to
8923 ensure correct behavior with overlapping index registers.
b6af0975 8924 ldrd with base writeback is undefined if the
9ee6e8bb
PB
8925 destination and index registers overlap. */
8926 if (!(insn & (1 << 24))) {
b0109805
PB
8927 gen_add_datah_offset(s, insn, address_offset, addr);
8928 store_reg(s, rn, addr);
9ee6e8bb
PB
8929 } else if (insn & (1 << 21)) {
8930 if (address_offset)
b0109805
PB
8931 tcg_gen_addi_i32(addr, addr, address_offset);
8932 store_reg(s, rn, addr);
8933 } else {
7d1b0095 8934 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8935 }
8936 if (load) {
8937 /* Complete the load. */
b0109805 8938 store_reg(s, rd, tmp);
9ee6e8bb
PB
8939 }
8940 }
8941 break;
8942 case 0x4:
8943 case 0x5:
8944 goto do_ldst;
8945 case 0x6:
8946 case 0x7:
8947 if (insn & (1 << 4)) {
8948 ARCH(6);
8949 /* Armv6 Media instructions. */
8950 rm = insn & 0xf;
8951 rn = (insn >> 16) & 0xf;
2c0262af 8952 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8953 rs = (insn >> 8) & 0xf;
8954 switch ((insn >> 23) & 3) {
8955 case 0: /* Parallel add/subtract. */
8956 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8957 tmp = load_reg(s, rn);
8958 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8959 sh = (insn >> 5) & 7;
8960 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8961 goto illegal_op;
6ddbc6e4 8962 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8963 tcg_temp_free_i32(tmp2);
6ddbc6e4 8964 store_reg(s, rd, tmp);
9ee6e8bb
PB
8965 break;
8966 case 1:
8967 if ((insn & 0x00700020) == 0) {
6c95676b 8968 /* Halfword pack. */
3670669c
PB
8969 tmp = load_reg(s, rn);
8970 tmp2 = load_reg(s, rm);
9ee6e8bb 8971 shift = (insn >> 7) & 0x1f;
3670669c
PB
8972 if (insn & (1 << 6)) {
8973 /* pkhtb */
22478e79
AZ
8974 if (shift == 0)
8975 shift = 31;
8976 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8977 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8978 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8979 } else {
8980 /* pkhbt */
22478e79
AZ
8981 if (shift)
8982 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8983 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8984 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8985 }
8986 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8987 tcg_temp_free_i32(tmp2);
3670669c 8988 store_reg(s, rd, tmp);
9ee6e8bb
PB
8989 } else if ((insn & 0x00200020) == 0x00200000) {
8990 /* [us]sat */
6ddbc6e4 8991 tmp = load_reg(s, rm);
9ee6e8bb
PB
8992 shift = (insn >> 7) & 0x1f;
8993 if (insn & (1 << 6)) {
8994 if (shift == 0)
8995 shift = 31;
6ddbc6e4 8996 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8997 } else {
6ddbc6e4 8998 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8999 }
9000 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9001 tmp2 = tcg_const_i32(sh);
9002 if (insn & (1 << 22))
9ef39277 9003 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9004 else
9ef39277 9005 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9006 tcg_temp_free_i32(tmp2);
6ddbc6e4 9007 store_reg(s, rd, tmp);
9ee6e8bb
PB
9008 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9009 /* [us]sat16 */
6ddbc6e4 9010 tmp = load_reg(s, rm);
9ee6e8bb 9011 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9012 tmp2 = tcg_const_i32(sh);
9013 if (insn & (1 << 22))
9ef39277 9014 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9015 else
9ef39277 9016 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9017 tcg_temp_free_i32(tmp2);
6ddbc6e4 9018 store_reg(s, rd, tmp);
9ee6e8bb
PB
9019 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9020 /* Select bytes. */
6ddbc6e4
PB
9021 tmp = load_reg(s, rn);
9022 tmp2 = load_reg(s, rm);
7d1b0095 9023 tmp3 = tcg_temp_new_i32();
0ecb72a5 9024 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9025 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9026 tcg_temp_free_i32(tmp3);
9027 tcg_temp_free_i32(tmp2);
6ddbc6e4 9028 store_reg(s, rd, tmp);
9ee6e8bb 9029 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9030 tmp = load_reg(s, rm);
9ee6e8bb 9031 shift = (insn >> 10) & 3;
1301f322 9032 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9033 rotate, a shift is sufficient. */
9034 if (shift != 0)
f669df27 9035 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9036 op1 = (insn >> 20) & 7;
9037 switch (op1) {
5e3f878a
PB
9038 case 0: gen_sxtb16(tmp); break;
9039 case 2: gen_sxtb(tmp); break;
9040 case 3: gen_sxth(tmp); break;
9041 case 4: gen_uxtb16(tmp); break;
9042 case 6: gen_uxtb(tmp); break;
9043 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9044 default: goto illegal_op;
9045 }
9046 if (rn != 15) {
5e3f878a 9047 tmp2 = load_reg(s, rn);
9ee6e8bb 9048 if ((op1 & 3) == 0) {
5e3f878a 9049 gen_add16(tmp, tmp2);
9ee6e8bb 9050 } else {
5e3f878a 9051 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9052 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9053 }
9054 }
6c95676b 9055 store_reg(s, rd, tmp);
9ee6e8bb
PB
9056 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9057 /* rev */
b0109805 9058 tmp = load_reg(s, rm);
9ee6e8bb
PB
9059 if (insn & (1 << 22)) {
9060 if (insn & (1 << 7)) {
b0109805 9061 gen_revsh(tmp);
9ee6e8bb
PB
9062 } else {
9063 ARCH(6T2);
b0109805 9064 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9065 }
9066 } else {
9067 if (insn & (1 << 7))
b0109805 9068 gen_rev16(tmp);
9ee6e8bb 9069 else
66896cb8 9070 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9071 }
b0109805 9072 store_reg(s, rd, tmp);
9ee6e8bb
PB
9073 } else {
9074 goto illegal_op;
9075 }
9076 break;
9077 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9078 switch ((insn >> 20) & 0x7) {
9079 case 5:
9080 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9081 /* op2 not 00x or 11x : UNDEF */
9082 goto illegal_op;
9083 }
838fa72d
AJ
9084 /* Signed multiply most significant [accumulate].
9085 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9086 tmp = load_reg(s, rm);
9087 tmp2 = load_reg(s, rs);
a7812ae4 9088 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9089
955a7dd5 9090 if (rd != 15) {
838fa72d 9091 tmp = load_reg(s, rd);
9ee6e8bb 9092 if (insn & (1 << 6)) {
838fa72d 9093 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9094 } else {
838fa72d 9095 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9096 }
9097 }
838fa72d
AJ
9098 if (insn & (1 << 5)) {
9099 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9100 }
9101 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9102 tmp = tcg_temp_new_i32();
ecc7b3aa 9103 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9104 tcg_temp_free_i64(tmp64);
955a7dd5 9105 store_reg(s, rn, tmp);
41e9564d
PM
9106 break;
9107 case 0:
9108 case 4:
9109 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9110 if (insn & (1 << 7)) {
9111 goto illegal_op;
9112 }
9113 tmp = load_reg(s, rm);
9114 tmp2 = load_reg(s, rs);
9ee6e8bb 9115 if (insn & (1 << 5))
5e3f878a
PB
9116 gen_swap_half(tmp2);
9117 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9118 if (insn & (1 << 22)) {
5e3f878a 9119 /* smlald, smlsld */
33bbd75a
PC
9120 TCGv_i64 tmp64_2;
9121
a7812ae4 9122 tmp64 = tcg_temp_new_i64();
33bbd75a 9123 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9124 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9125 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9126 tcg_temp_free_i32(tmp);
33bbd75a
PC
9127 tcg_temp_free_i32(tmp2);
9128 if (insn & (1 << 6)) {
9129 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9130 } else {
9131 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9132 }
9133 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9134 gen_addq(s, tmp64, rd, rn);
9135 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9136 tcg_temp_free_i64(tmp64);
9ee6e8bb 9137 } else {
5e3f878a 9138 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9139 if (insn & (1 << 6)) {
9140 /* This subtraction cannot overflow. */
9141 tcg_gen_sub_i32(tmp, tmp, tmp2);
9142 } else {
9143 /* This addition cannot overflow 32 bits;
9144 * however it may overflow considered as a
9145 * signed operation, in which case we must set
9146 * the Q flag.
9147 */
9148 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9149 }
9150 tcg_temp_free_i32(tmp2);
22478e79 9151 if (rd != 15)
9ee6e8bb 9152 {
22478e79 9153 tmp2 = load_reg(s, rd);
9ef39277 9154 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9155 tcg_temp_free_i32(tmp2);
9ee6e8bb 9156 }
22478e79 9157 store_reg(s, rn, tmp);
9ee6e8bb 9158 }
41e9564d 9159 break;
b8b8ea05
PM
9160 case 1:
9161 case 3:
9162 /* SDIV, UDIV */
d614a513 9163 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9164 goto illegal_op;
9165 }
9166 if (((insn >> 5) & 7) || (rd != 15)) {
9167 goto illegal_op;
9168 }
9169 tmp = load_reg(s, rm);
9170 tmp2 = load_reg(s, rs);
9171 if (insn & (1 << 21)) {
9172 gen_helper_udiv(tmp, tmp, tmp2);
9173 } else {
9174 gen_helper_sdiv(tmp, tmp, tmp2);
9175 }
9176 tcg_temp_free_i32(tmp2);
9177 store_reg(s, rn, tmp);
9178 break;
41e9564d
PM
9179 default:
9180 goto illegal_op;
9ee6e8bb
PB
9181 }
9182 break;
9183 case 3:
9184 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9185 switch (op1) {
9186 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9187 ARCH(6);
9188 tmp = load_reg(s, rm);
9189 tmp2 = load_reg(s, rs);
9190 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9191 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9192 if (rd != 15) {
9193 tmp2 = load_reg(s, rd);
6ddbc6e4 9194 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9195 tcg_temp_free_i32(tmp2);
9ee6e8bb 9196 }
ded9d295 9197 store_reg(s, rn, tmp);
9ee6e8bb
PB
9198 break;
9199 case 0x20: case 0x24: case 0x28: case 0x2c:
9200 /* Bitfield insert/clear. */
9201 ARCH(6T2);
9202 shift = (insn >> 7) & 0x1f;
9203 i = (insn >> 16) & 0x1f;
45140a57
KB
9204 if (i < shift) {
9205 /* UNPREDICTABLE; we choose to UNDEF */
9206 goto illegal_op;
9207 }
9ee6e8bb
PB
9208 i = i + 1 - shift;
9209 if (rm == 15) {
7d1b0095 9210 tmp = tcg_temp_new_i32();
5e3f878a 9211 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9212 } else {
5e3f878a 9213 tmp = load_reg(s, rm);
9ee6e8bb
PB
9214 }
9215 if (i != 32) {
5e3f878a 9216 tmp2 = load_reg(s, rd);
d593c48e 9217 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9218 tcg_temp_free_i32(tmp2);
9ee6e8bb 9219 }
5e3f878a 9220 store_reg(s, rd, tmp);
9ee6e8bb
PB
9221 break;
9222 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9223 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9224 ARCH(6T2);
5e3f878a 9225 tmp = load_reg(s, rm);
9ee6e8bb
PB
9226 shift = (insn >> 7) & 0x1f;
9227 i = ((insn >> 16) & 0x1f) + 1;
9228 if (shift + i > 32)
9229 goto illegal_op;
9230 if (i < 32) {
9231 if (op1 & 0x20) {
5e3f878a 9232 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 9233 } else {
5e3f878a 9234 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
9235 }
9236 }
5e3f878a 9237 store_reg(s, rd, tmp);
9ee6e8bb
PB
9238 break;
9239 default:
9240 goto illegal_op;
9241 }
9242 break;
9243 }
9244 break;
9245 }
9246 do_ldst:
9247 /* Check for undefined extension instructions
9248 * per the ARM Bible IE:
9249 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9250 */
9251 sh = (0xf << 20) | (0xf << 4);
9252 if (op1 == 0x7 && ((insn & sh) == sh))
9253 {
9254 goto illegal_op;
9255 }
9256 /* load/store byte/word */
9257 rn = (insn >> 16) & 0xf;
9258 rd = (insn >> 12) & 0xf;
b0109805 9259 tmp2 = load_reg(s, rn);
a99caa48
PM
9260 if ((insn & 0x01200000) == 0x00200000) {
9261 /* ldrt/strt */
579d21cc 9262 i = get_a32_user_mem_index(s);
a99caa48
PM
9263 } else {
9264 i = get_mem_index(s);
9265 }
9ee6e8bb 9266 if (insn & (1 << 24))
b0109805 9267 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9268 if (insn & (1 << 20)) {
9269 /* load */
5a839c0d 9270 tmp = tcg_temp_new_i32();
9ee6e8bb 9271 if (insn & (1 << 22)) {
12dcc321 9272 gen_aa32_ld8u(s, tmp, tmp2, i);
9ee6e8bb 9273 } else {
12dcc321 9274 gen_aa32_ld32u(s, tmp, tmp2, i);
9ee6e8bb 9275 }
9ee6e8bb
PB
9276 } else {
9277 /* store */
b0109805 9278 tmp = load_reg(s, rd);
5a839c0d 9279 if (insn & (1 << 22)) {
12dcc321 9280 gen_aa32_st8(s, tmp, tmp2, i);
5a839c0d 9281 } else {
12dcc321 9282 gen_aa32_st32(s, tmp, tmp2, i);
5a839c0d
PM
9283 }
9284 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9285 }
9286 if (!(insn & (1 << 24))) {
b0109805
PB
9287 gen_add_data_offset(s, insn, tmp2);
9288 store_reg(s, rn, tmp2);
9289 } else if (insn & (1 << 21)) {
9290 store_reg(s, rn, tmp2);
9291 } else {
7d1b0095 9292 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9293 }
9294 if (insn & (1 << 20)) {
9295 /* Complete the load. */
7dcc1f89 9296 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9297 }
9298 break;
9299 case 0x08:
9300 case 0x09:
9301 {
da3e53dd
PM
9302 int j, n, loaded_base;
9303 bool exc_return = false;
9304 bool is_load = extract32(insn, 20, 1);
9305 bool user = false;
39d5492a 9306 TCGv_i32 loaded_var;
9ee6e8bb
PB
9307 /* load/store multiple words */
9308 /* XXX: store correct base if write back */
9ee6e8bb 9309 if (insn & (1 << 22)) {
da3e53dd 9310 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9311 if (IS_USER(s))
9312 goto illegal_op; /* only usable in supervisor mode */
9313
da3e53dd
PM
9314 if (is_load && extract32(insn, 15, 1)) {
9315 exc_return = true;
9316 } else {
9317 user = true;
9318 }
9ee6e8bb
PB
9319 }
9320 rn = (insn >> 16) & 0xf;
b0109805 9321 addr = load_reg(s, rn);
9ee6e8bb
PB
9322
9323 /* compute total size */
9324 loaded_base = 0;
39d5492a 9325 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9326 n = 0;
9327 for(i=0;i<16;i++) {
9328 if (insn & (1 << i))
9329 n++;
9330 }
9331 /* XXX: test invalid n == 0 case ? */
9332 if (insn & (1 << 23)) {
9333 if (insn & (1 << 24)) {
9334 /* pre increment */
b0109805 9335 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9336 } else {
9337 /* post increment */
9338 }
9339 } else {
9340 if (insn & (1 << 24)) {
9341 /* pre decrement */
b0109805 9342 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9343 } else {
9344 /* post decrement */
9345 if (n != 1)
b0109805 9346 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9347 }
9348 }
9349 j = 0;
9350 for(i=0;i<16;i++) {
9351 if (insn & (1 << i)) {
da3e53dd 9352 if (is_load) {
9ee6e8bb 9353 /* load */
5a839c0d 9354 tmp = tcg_temp_new_i32();
12dcc321 9355 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9356 if (user) {
b75263d6 9357 tmp2 = tcg_const_i32(i);
1ce94f81 9358 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9359 tcg_temp_free_i32(tmp2);
7d1b0095 9360 tcg_temp_free_i32(tmp);
9ee6e8bb 9361 } else if (i == rn) {
b0109805 9362 loaded_var = tmp;
9ee6e8bb
PB
9363 loaded_base = 1;
9364 } else {
7dcc1f89 9365 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9366 }
9367 } else {
9368 /* store */
9369 if (i == 15) {
9370 /* special case: r15 = PC + 8 */
9371 val = (long)s->pc + 4;
7d1b0095 9372 tmp = tcg_temp_new_i32();
b0109805 9373 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9374 } else if (user) {
7d1b0095 9375 tmp = tcg_temp_new_i32();
b75263d6 9376 tmp2 = tcg_const_i32(i);
9ef39277 9377 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9378 tcg_temp_free_i32(tmp2);
9ee6e8bb 9379 } else {
b0109805 9380 tmp = load_reg(s, i);
9ee6e8bb 9381 }
12dcc321 9382 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9383 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9384 }
9385 j++;
9386 /* no need to add after the last transfer */
9387 if (j != n)
b0109805 9388 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9389 }
9390 }
9391 if (insn & (1 << 21)) {
9392 /* write back */
9393 if (insn & (1 << 23)) {
9394 if (insn & (1 << 24)) {
9395 /* pre increment */
9396 } else {
9397 /* post increment */
b0109805 9398 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9399 }
9400 } else {
9401 if (insn & (1 << 24)) {
9402 /* pre decrement */
9403 if (n != 1)
b0109805 9404 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9405 } else {
9406 /* post decrement */
b0109805 9407 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9408 }
9409 }
b0109805
PB
9410 store_reg(s, rn, addr);
9411 } else {
7d1b0095 9412 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9413 }
9414 if (loaded_base) {
b0109805 9415 store_reg(s, rn, loaded_var);
9ee6e8bb 9416 }
da3e53dd 9417 if (exc_return) {
9ee6e8bb 9418 /* Restore CPSR from SPSR. */
d9ba4830 9419 tmp = load_cpu_field(spsr);
235ea1f5 9420 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9421 tcg_temp_free_i32(tmp);
577bf808 9422 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9423 }
9424 }
9425 break;
9426 case 0xa:
9427 case 0xb:
9428 {
9429 int32_t offset;
9430
9431 /* branch (and link) */
9432 val = (int32_t)s->pc;
9433 if (insn & (1 << 24)) {
7d1b0095 9434 tmp = tcg_temp_new_i32();
5e3f878a
PB
9435 tcg_gen_movi_i32(tmp, val);
9436 store_reg(s, 14, tmp);
9ee6e8bb 9437 }
534df156
PM
9438 offset = sextract32(insn << 2, 0, 26);
9439 val += offset + 4;
9ee6e8bb
PB
9440 gen_jmp(s, val);
9441 }
9442 break;
9443 case 0xc:
9444 case 0xd:
9445 case 0xe:
6a57f3eb
WN
9446 if (((insn >> 8) & 0xe) == 10) {
9447 /* VFP. */
7dcc1f89 9448 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9449 goto illegal_op;
9450 }
7dcc1f89 9451 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9452 /* Coprocessor. */
9ee6e8bb 9453 goto illegal_op;
6a57f3eb 9454 }
9ee6e8bb
PB
9455 break;
9456 case 0xf:
9457 /* swi */
eaed129d 9458 gen_set_pc_im(s, s->pc);
d4a2dc67 9459 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9460 s->is_jmp = DISAS_SWI;
9461 break;
9462 default:
9463 illegal_op:
73710361
GB
9464 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9465 default_exception_el(s));
9ee6e8bb
PB
9466 break;
9467 }
9468 }
9469}
9470
9471/* Return true if this is a Thumb-2 logical op. */
9472static int
9473thumb2_logic_op(int op)
9474{
9475 return (op < 8);
9476}
9477
9478/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9479 then set condition code flags based on the result of the operation.
9480 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9481 to the high bit of T1.
9482 Returns zero if the opcode is valid. */
9483
9484static int
39d5492a
PM
9485gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9486 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9487{
9488 int logic_cc;
9489
9490 logic_cc = 0;
9491 switch (op) {
9492 case 0: /* and */
396e467c 9493 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9494 logic_cc = conds;
9495 break;
9496 case 1: /* bic */
f669df27 9497 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9498 logic_cc = conds;
9499 break;
9500 case 2: /* orr */
396e467c 9501 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9502 logic_cc = conds;
9503 break;
9504 case 3: /* orn */
29501f1b 9505 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9506 logic_cc = conds;
9507 break;
9508 case 4: /* eor */
396e467c 9509 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9510 logic_cc = conds;
9511 break;
9512 case 8: /* add */
9513 if (conds)
72485ec4 9514 gen_add_CC(t0, t0, t1);
9ee6e8bb 9515 else
396e467c 9516 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9517 break;
9518 case 10: /* adc */
9519 if (conds)
49b4c31e 9520 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9521 else
396e467c 9522 gen_adc(t0, t1);
9ee6e8bb
PB
9523 break;
9524 case 11: /* sbc */
2de68a49
RH
9525 if (conds) {
9526 gen_sbc_CC(t0, t0, t1);
9527 } else {
396e467c 9528 gen_sub_carry(t0, t0, t1);
2de68a49 9529 }
9ee6e8bb
PB
9530 break;
9531 case 13: /* sub */
9532 if (conds)
72485ec4 9533 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9534 else
396e467c 9535 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9536 break;
9537 case 14: /* rsb */
9538 if (conds)
72485ec4 9539 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9540 else
396e467c 9541 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9542 break;
9543 default: /* 5, 6, 7, 9, 12, 15. */
9544 return 1;
9545 }
9546 if (logic_cc) {
396e467c 9547 gen_logic_CC(t0);
9ee6e8bb 9548 if (shifter_out)
396e467c 9549 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9550 }
9551 return 0;
9552}
9553
9554/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9555 is not legal. */
0ecb72a5 9556static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9557{
b0109805 9558 uint32_t insn, imm, shift, offset;
9ee6e8bb 9559 uint32_t rd, rn, rm, rs;
39d5492a
PM
9560 TCGv_i32 tmp;
9561 TCGv_i32 tmp2;
9562 TCGv_i32 tmp3;
9563 TCGv_i32 addr;
a7812ae4 9564 TCGv_i64 tmp64;
9ee6e8bb
PB
9565 int op;
9566 int shiftop;
9567 int conds;
9568 int logic_cc;
9569
d614a513
PM
9570 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9571 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9572 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9573 16-bit instructions to get correct prefetch abort behavior. */
9574 insn = insn_hw1;
9575 if ((insn & (1 << 12)) == 0) {
be5e7a76 9576 ARCH(5);
9ee6e8bb
PB
9577 /* Second half of blx. */
9578 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9579 tmp = load_reg(s, 14);
9580 tcg_gen_addi_i32(tmp, tmp, offset);
9581 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9582
7d1b0095 9583 tmp2 = tcg_temp_new_i32();
b0109805 9584 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9585 store_reg(s, 14, tmp2);
9586 gen_bx(s, tmp);
9ee6e8bb
PB
9587 return 0;
9588 }
9589 if (insn & (1 << 11)) {
9590 /* Second half of bl. */
9591 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9592 tmp = load_reg(s, 14);
6a0d8a1d 9593 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9594
7d1b0095 9595 tmp2 = tcg_temp_new_i32();
b0109805 9596 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9597 store_reg(s, 14, tmp2);
9598 gen_bx(s, tmp);
9ee6e8bb
PB
9599 return 0;
9600 }
9601 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9602 /* Instruction spans a page boundary. Implement it as two
9603 16-bit instructions in case the second half causes an
9604 prefetch abort. */
9605 offset = ((int32_t)insn << 21) >> 9;
396e467c 9606 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9607 return 0;
9608 }
9609 /* Fall through to 32-bit decode. */
9610 }
9611
f9fd40eb 9612 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9613 s->pc += 2;
9614 insn |= (uint32_t)insn_hw1 << 16;
9615
9616 if ((insn & 0xf800e800) != 0xf000e800) {
9617 ARCH(6T2);
9618 }
9619
9620 rn = (insn >> 16) & 0xf;
9621 rs = (insn >> 12) & 0xf;
9622 rd = (insn >> 8) & 0xf;
9623 rm = insn & 0xf;
9624 switch ((insn >> 25) & 0xf) {
9625 case 0: case 1: case 2: case 3:
9626 /* 16-bit instructions. Should never happen. */
9627 abort();
9628 case 4:
9629 if (insn & (1 << 22)) {
9630 /* Other load/store, table branch. */
9631 if (insn & 0x01200000) {
9632 /* Load/store doubleword. */
9633 if (rn == 15) {
7d1b0095 9634 addr = tcg_temp_new_i32();
b0109805 9635 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9636 } else {
b0109805 9637 addr = load_reg(s, rn);
9ee6e8bb
PB
9638 }
9639 offset = (insn & 0xff) * 4;
9640 if ((insn & (1 << 23)) == 0)
9641 offset = -offset;
9642 if (insn & (1 << 24)) {
b0109805 9643 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9644 offset = 0;
9645 }
9646 if (insn & (1 << 20)) {
9647 /* ldrd */
e2592fad 9648 tmp = tcg_temp_new_i32();
12dcc321 9649 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9650 store_reg(s, rs, tmp);
9651 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9652 tmp = tcg_temp_new_i32();
12dcc321 9653 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9654 store_reg(s, rd, tmp);
9ee6e8bb
PB
9655 } else {
9656 /* strd */
b0109805 9657 tmp = load_reg(s, rs);
12dcc321 9658 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9659 tcg_temp_free_i32(tmp);
b0109805
PB
9660 tcg_gen_addi_i32(addr, addr, 4);
9661 tmp = load_reg(s, rd);
12dcc321 9662 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9663 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9664 }
9665 if (insn & (1 << 21)) {
9666 /* Base writeback. */
9667 if (rn == 15)
9668 goto illegal_op;
b0109805
PB
9669 tcg_gen_addi_i32(addr, addr, offset - 4);
9670 store_reg(s, rn, addr);
9671 } else {
7d1b0095 9672 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9673 }
9674 } else if ((insn & (1 << 23)) == 0) {
9675 /* Load/store exclusive word. */
39d5492a 9676 addr = tcg_temp_local_new_i32();
98a46317 9677 load_reg_var(s, addr, rn);
426f5abc 9678 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9679 if (insn & (1 << 20)) {
426f5abc 9680 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9681 } else {
426f5abc 9682 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9683 }
39d5492a 9684 tcg_temp_free_i32(addr);
2359bf80 9685 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9686 /* Table Branch. */
9687 if (rn == 15) {
7d1b0095 9688 addr = tcg_temp_new_i32();
b0109805 9689 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9690 } else {
b0109805 9691 addr = load_reg(s, rn);
9ee6e8bb 9692 }
b26eefb6 9693 tmp = load_reg(s, rm);
b0109805 9694 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9695 if (insn & (1 << 4)) {
9696 /* tbh */
b0109805 9697 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9698 tcg_temp_free_i32(tmp);
e2592fad 9699 tmp = tcg_temp_new_i32();
12dcc321 9700 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9701 } else { /* tbb */
7d1b0095 9702 tcg_temp_free_i32(tmp);
e2592fad 9703 tmp = tcg_temp_new_i32();
12dcc321 9704 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9705 }
7d1b0095 9706 tcg_temp_free_i32(addr);
b0109805
PB
9707 tcg_gen_shli_i32(tmp, tmp, 1);
9708 tcg_gen_addi_i32(tmp, tmp, s->pc);
9709 store_reg(s, 15, tmp);
9ee6e8bb 9710 } else {
2359bf80 9711 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9712 op = (insn >> 4) & 0x3;
2359bf80
MR
9713 switch (op2) {
9714 case 0:
426f5abc 9715 goto illegal_op;
2359bf80
MR
9716 case 1:
9717 /* Load/store exclusive byte/halfword/doubleword */
9718 if (op == 2) {
9719 goto illegal_op;
9720 }
9721 ARCH(7);
9722 break;
9723 case 2:
9724 /* Load-acquire/store-release */
9725 if (op == 3) {
9726 goto illegal_op;
9727 }
9728 /* Fall through */
9729 case 3:
9730 /* Load-acquire/store-release exclusive */
9731 ARCH(8);
9732 break;
426f5abc 9733 }
39d5492a 9734 addr = tcg_temp_local_new_i32();
98a46317 9735 load_reg_var(s, addr, rn);
2359bf80
MR
9736 if (!(op2 & 1)) {
9737 if (insn & (1 << 20)) {
9738 tmp = tcg_temp_new_i32();
9739 switch (op) {
9740 case 0: /* ldab */
12dcc321 9741 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9742 break;
9743 case 1: /* ldah */
12dcc321 9744 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9745 break;
9746 case 2: /* lda */
12dcc321 9747 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9748 break;
9749 default:
9750 abort();
9751 }
9752 store_reg(s, rs, tmp);
9753 } else {
9754 tmp = load_reg(s, rs);
9755 switch (op) {
9756 case 0: /* stlb */
12dcc321 9757 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9758 break;
9759 case 1: /* stlh */
12dcc321 9760 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9761 break;
9762 case 2: /* stl */
12dcc321 9763 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
2359bf80
MR
9764 break;
9765 default:
9766 abort();
9767 }
9768 tcg_temp_free_i32(tmp);
9769 }
9770 } else if (insn & (1 << 20)) {
426f5abc 9771 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9772 } else {
426f5abc 9773 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9774 }
39d5492a 9775 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9776 }
9777 } else {
9778 /* Load/store multiple, RFE, SRS. */
9779 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9780 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9781 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9782 goto illegal_op;
00115976 9783 }
9ee6e8bb
PB
9784 if (insn & (1 << 20)) {
9785 /* rfe */
b0109805
PB
9786 addr = load_reg(s, rn);
9787 if ((insn & (1 << 24)) == 0)
9788 tcg_gen_addi_i32(addr, addr, -8);
9789 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9790 tmp = tcg_temp_new_i32();
12dcc321 9791 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9792 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9793 tmp2 = tcg_temp_new_i32();
12dcc321 9794 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9795 if (insn & (1 << 21)) {
9796 /* Base writeback. */
b0109805
PB
9797 if (insn & (1 << 24)) {
9798 tcg_gen_addi_i32(addr, addr, 4);
9799 } else {
9800 tcg_gen_addi_i32(addr, addr, -4);
9801 }
9802 store_reg(s, rn, addr);
9803 } else {
7d1b0095 9804 tcg_temp_free_i32(addr);
9ee6e8bb 9805 }
b0109805 9806 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9807 } else {
9808 /* srs */
81465888
PM
9809 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9810 insn & (1 << 21));
9ee6e8bb
PB
9811 }
9812 } else {
5856d44e 9813 int i, loaded_base = 0;
39d5492a 9814 TCGv_i32 loaded_var;
9ee6e8bb 9815 /* Load/store multiple. */
b0109805 9816 addr = load_reg(s, rn);
9ee6e8bb
PB
9817 offset = 0;
9818 for (i = 0; i < 16; i++) {
9819 if (insn & (1 << i))
9820 offset += 4;
9821 }
9822 if (insn & (1 << 24)) {
b0109805 9823 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9824 }
9825
39d5492a 9826 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9827 for (i = 0; i < 16; i++) {
9828 if ((insn & (1 << i)) == 0)
9829 continue;
9830 if (insn & (1 << 20)) {
9831 /* Load. */
e2592fad 9832 tmp = tcg_temp_new_i32();
12dcc321 9833 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9834 if (i == 15) {
b0109805 9835 gen_bx(s, tmp);
5856d44e
YO
9836 } else if (i == rn) {
9837 loaded_var = tmp;
9838 loaded_base = 1;
9ee6e8bb 9839 } else {
b0109805 9840 store_reg(s, i, tmp);
9ee6e8bb
PB
9841 }
9842 } else {
9843 /* Store. */
b0109805 9844 tmp = load_reg(s, i);
12dcc321 9845 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9846 tcg_temp_free_i32(tmp);
9ee6e8bb 9847 }
b0109805 9848 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9849 }
5856d44e
YO
9850 if (loaded_base) {
9851 store_reg(s, rn, loaded_var);
9852 }
9ee6e8bb
PB
9853 if (insn & (1 << 21)) {
9854 /* Base register writeback. */
9855 if (insn & (1 << 24)) {
b0109805 9856 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9857 }
9858 /* Fault if writeback register is in register list. */
9859 if (insn & (1 << rn))
9860 goto illegal_op;
b0109805
PB
9861 store_reg(s, rn, addr);
9862 } else {
7d1b0095 9863 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9864 }
9865 }
9866 }
9867 break;
2af9ab77
JB
9868 case 5:
9869
9ee6e8bb 9870 op = (insn >> 21) & 0xf;
2af9ab77 9871 if (op == 6) {
62b44f05
AR
9872 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9873 goto illegal_op;
9874 }
2af9ab77
JB
9875 /* Halfword pack. */
9876 tmp = load_reg(s, rn);
9877 tmp2 = load_reg(s, rm);
9878 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9879 if (insn & (1 << 5)) {
9880 /* pkhtb */
9881 if (shift == 0)
9882 shift = 31;
9883 tcg_gen_sari_i32(tmp2, tmp2, shift);
9884 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9885 tcg_gen_ext16u_i32(tmp2, tmp2);
9886 } else {
9887 /* pkhbt */
9888 if (shift)
9889 tcg_gen_shli_i32(tmp2, tmp2, shift);
9890 tcg_gen_ext16u_i32(tmp, tmp);
9891 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9892 }
9893 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9894 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9895 store_reg(s, rd, tmp);
9896 } else {
2af9ab77
JB
9897 /* Data processing register constant shift. */
9898 if (rn == 15) {
7d1b0095 9899 tmp = tcg_temp_new_i32();
2af9ab77
JB
9900 tcg_gen_movi_i32(tmp, 0);
9901 } else {
9902 tmp = load_reg(s, rn);
9903 }
9904 tmp2 = load_reg(s, rm);
9905
9906 shiftop = (insn >> 4) & 3;
9907 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9908 conds = (insn & (1 << 20)) != 0;
9909 logic_cc = (conds && thumb2_logic_op(op));
9910 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9911 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9912 goto illegal_op;
7d1b0095 9913 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9914 if (rd != 15) {
9915 store_reg(s, rd, tmp);
9916 } else {
7d1b0095 9917 tcg_temp_free_i32(tmp);
2af9ab77 9918 }
3174f8e9 9919 }
9ee6e8bb
PB
9920 break;
9921 case 13: /* Misc data processing. */
9922 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9923 if (op < 4 && (insn & 0xf000) != 0xf000)
9924 goto illegal_op;
9925 switch (op) {
9926 case 0: /* Register controlled shift. */
8984bd2e
PB
9927 tmp = load_reg(s, rn);
9928 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9929 if ((insn & 0x70) != 0)
9930 goto illegal_op;
9931 op = (insn >> 21) & 3;
8984bd2e
PB
9932 logic_cc = (insn & (1 << 20)) != 0;
9933 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9934 if (logic_cc)
9935 gen_logic_CC(tmp);
7dcc1f89 9936 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9937 break;
9938 case 1: /* Sign/zero extend. */
62b44f05
AR
9939 op = (insn >> 20) & 7;
9940 switch (op) {
9941 case 0: /* SXTAH, SXTH */
9942 case 1: /* UXTAH, UXTH */
9943 case 4: /* SXTAB, SXTB */
9944 case 5: /* UXTAB, UXTB */
9945 break;
9946 case 2: /* SXTAB16, SXTB16 */
9947 case 3: /* UXTAB16, UXTB16 */
9948 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9949 goto illegal_op;
9950 }
9951 break;
9952 default:
9953 goto illegal_op;
9954 }
9955 if (rn != 15) {
9956 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9957 goto illegal_op;
9958 }
9959 }
5e3f878a 9960 tmp = load_reg(s, rm);
9ee6e8bb 9961 shift = (insn >> 4) & 3;
1301f322 9962 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9963 rotate, a shift is sufficient. */
9964 if (shift != 0)
f669df27 9965 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9966 op = (insn >> 20) & 7;
9967 switch (op) {
5e3f878a
PB
9968 case 0: gen_sxth(tmp); break;
9969 case 1: gen_uxth(tmp); break;
9970 case 2: gen_sxtb16(tmp); break;
9971 case 3: gen_uxtb16(tmp); break;
9972 case 4: gen_sxtb(tmp); break;
9973 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9974 default:
9975 g_assert_not_reached();
9ee6e8bb
PB
9976 }
9977 if (rn != 15) {
5e3f878a 9978 tmp2 = load_reg(s, rn);
9ee6e8bb 9979 if ((op >> 1) == 1) {
5e3f878a 9980 gen_add16(tmp, tmp2);
9ee6e8bb 9981 } else {
5e3f878a 9982 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9983 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9984 }
9985 }
5e3f878a 9986 store_reg(s, rd, tmp);
9ee6e8bb
PB
9987 break;
9988 case 2: /* SIMD add/subtract. */
62b44f05
AR
9989 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9990 goto illegal_op;
9991 }
9ee6e8bb
PB
9992 op = (insn >> 20) & 7;
9993 shift = (insn >> 4) & 7;
9994 if ((op & 3) == 3 || (shift & 3) == 3)
9995 goto illegal_op;
6ddbc6e4
PB
9996 tmp = load_reg(s, rn);
9997 tmp2 = load_reg(s, rm);
9998 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9999 tcg_temp_free_i32(tmp2);
6ddbc6e4 10000 store_reg(s, rd, tmp);
9ee6e8bb
PB
10001 break;
10002 case 3: /* Other data processing. */
10003 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10004 if (op < 4) {
10005 /* Saturating add/subtract. */
62b44f05
AR
10006 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10007 goto illegal_op;
10008 }
d9ba4830
PB
10009 tmp = load_reg(s, rn);
10010 tmp2 = load_reg(s, rm);
9ee6e8bb 10011 if (op & 1)
9ef39277 10012 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10013 if (op & 2)
9ef39277 10014 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10015 else
9ef39277 10016 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10017 tcg_temp_free_i32(tmp2);
9ee6e8bb 10018 } else {
62b44f05
AR
10019 switch (op) {
10020 case 0x0a: /* rbit */
10021 case 0x08: /* rev */
10022 case 0x09: /* rev16 */
10023 case 0x0b: /* revsh */
10024 case 0x18: /* clz */
10025 break;
10026 case 0x10: /* sel */
10027 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10028 goto illegal_op;
10029 }
10030 break;
10031 case 0x20: /* crc32/crc32c */
10032 case 0x21:
10033 case 0x22:
10034 case 0x28:
10035 case 0x29:
10036 case 0x2a:
10037 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10038 goto illegal_op;
10039 }
10040 break;
10041 default:
10042 goto illegal_op;
10043 }
d9ba4830 10044 tmp = load_reg(s, rn);
9ee6e8bb
PB
10045 switch (op) {
10046 case 0x0a: /* rbit */
d9ba4830 10047 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10048 break;
10049 case 0x08: /* rev */
66896cb8 10050 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10051 break;
10052 case 0x09: /* rev16 */
d9ba4830 10053 gen_rev16(tmp);
9ee6e8bb
PB
10054 break;
10055 case 0x0b: /* revsh */
d9ba4830 10056 gen_revsh(tmp);
9ee6e8bb
PB
10057 break;
10058 case 0x10: /* sel */
d9ba4830 10059 tmp2 = load_reg(s, rm);
7d1b0095 10060 tmp3 = tcg_temp_new_i32();
0ecb72a5 10061 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10062 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10063 tcg_temp_free_i32(tmp3);
10064 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10065 break;
10066 case 0x18: /* clz */
d9ba4830 10067 gen_helper_clz(tmp, tmp);
9ee6e8bb 10068 break;
eb0ecd5a
WN
10069 case 0x20:
10070 case 0x21:
10071 case 0x22:
10072 case 0x28:
10073 case 0x29:
10074 case 0x2a:
10075 {
10076 /* crc32/crc32c */
10077 uint32_t sz = op & 0x3;
10078 uint32_t c = op & 0x8;
10079
eb0ecd5a 10080 tmp2 = load_reg(s, rm);
aa633469
PM
10081 if (sz == 0) {
10082 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10083 } else if (sz == 1) {
10084 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10085 }
eb0ecd5a
WN
10086 tmp3 = tcg_const_i32(1 << sz);
10087 if (c) {
10088 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10089 } else {
10090 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10091 }
10092 tcg_temp_free_i32(tmp2);
10093 tcg_temp_free_i32(tmp3);
10094 break;
10095 }
9ee6e8bb 10096 default:
62b44f05 10097 g_assert_not_reached();
9ee6e8bb
PB
10098 }
10099 }
d9ba4830 10100 store_reg(s, rd, tmp);
9ee6e8bb
PB
10101 break;
10102 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10103 switch ((insn >> 20) & 7) {
10104 case 0: /* 32 x 32 -> 32 */
10105 case 7: /* Unsigned sum of absolute differences. */
10106 break;
10107 case 1: /* 16 x 16 -> 32 */
10108 case 2: /* Dual multiply add. */
10109 case 3: /* 32 * 16 -> 32msb */
10110 case 4: /* Dual multiply subtract. */
10111 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10112 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10113 goto illegal_op;
10114 }
10115 break;
10116 }
9ee6e8bb 10117 op = (insn >> 4) & 0xf;
d9ba4830
PB
10118 tmp = load_reg(s, rn);
10119 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10120 switch ((insn >> 20) & 7) {
10121 case 0: /* 32 x 32 -> 32 */
d9ba4830 10122 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10123 tcg_temp_free_i32(tmp2);
9ee6e8bb 10124 if (rs != 15) {
d9ba4830 10125 tmp2 = load_reg(s, rs);
9ee6e8bb 10126 if (op)
d9ba4830 10127 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10128 else
d9ba4830 10129 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10130 tcg_temp_free_i32(tmp2);
9ee6e8bb 10131 }
9ee6e8bb
PB
10132 break;
10133 case 1: /* 16 x 16 -> 32 */
d9ba4830 10134 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10135 tcg_temp_free_i32(tmp2);
9ee6e8bb 10136 if (rs != 15) {
d9ba4830 10137 tmp2 = load_reg(s, rs);
9ef39277 10138 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10139 tcg_temp_free_i32(tmp2);
9ee6e8bb 10140 }
9ee6e8bb
PB
10141 break;
10142 case 2: /* Dual multiply add. */
10143 case 4: /* Dual multiply subtract. */
10144 if (op)
d9ba4830
PB
10145 gen_swap_half(tmp2);
10146 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10147 if (insn & (1 << 22)) {
e1d177b9 10148 /* This subtraction cannot overflow. */
d9ba4830 10149 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10150 } else {
e1d177b9
PM
10151 /* This addition cannot overflow 32 bits;
10152 * however it may overflow considered as a signed
10153 * operation, in which case we must set the Q flag.
10154 */
9ef39277 10155 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10156 }
7d1b0095 10157 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10158 if (rs != 15)
10159 {
d9ba4830 10160 tmp2 = load_reg(s, rs);
9ef39277 10161 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10162 tcg_temp_free_i32(tmp2);
9ee6e8bb 10163 }
9ee6e8bb
PB
10164 break;
10165 case 3: /* 32 * 16 -> 32msb */
10166 if (op)
d9ba4830 10167 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10168 else
d9ba4830 10169 gen_sxth(tmp2);
a7812ae4
PB
10170 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10171 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10172 tmp = tcg_temp_new_i32();
ecc7b3aa 10173 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10174 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10175 if (rs != 15)
10176 {
d9ba4830 10177 tmp2 = load_reg(s, rs);
9ef39277 10178 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10179 tcg_temp_free_i32(tmp2);
9ee6e8bb 10180 }
9ee6e8bb 10181 break;
838fa72d
AJ
10182 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10183 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10184 if (rs != 15) {
838fa72d
AJ
10185 tmp = load_reg(s, rs);
10186 if (insn & (1 << 20)) {
10187 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10188 } else {
838fa72d 10189 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10190 }
2c0262af 10191 }
838fa72d
AJ
10192 if (insn & (1 << 4)) {
10193 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10194 }
10195 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10196 tmp = tcg_temp_new_i32();
ecc7b3aa 10197 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10198 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10199 break;
10200 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10201 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10202 tcg_temp_free_i32(tmp2);
9ee6e8bb 10203 if (rs != 15) {
d9ba4830
PB
10204 tmp2 = load_reg(s, rs);
10205 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10206 tcg_temp_free_i32(tmp2);
5fd46862 10207 }
9ee6e8bb 10208 break;
2c0262af 10209 }
d9ba4830 10210 store_reg(s, rd, tmp);
2c0262af 10211 break;
9ee6e8bb
PB
10212 case 6: case 7: /* 64-bit multiply, Divide. */
10213 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10214 tmp = load_reg(s, rn);
10215 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10216 if ((op & 0x50) == 0x10) {
10217 /* sdiv, udiv */
d614a513 10218 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10219 goto illegal_op;
47789990 10220 }
9ee6e8bb 10221 if (op & 0x20)
5e3f878a 10222 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10223 else
5e3f878a 10224 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10225 tcg_temp_free_i32(tmp2);
5e3f878a 10226 store_reg(s, rd, tmp);
9ee6e8bb
PB
10227 } else if ((op & 0xe) == 0xc) {
10228 /* Dual multiply accumulate long. */
62b44f05
AR
10229 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10230 tcg_temp_free_i32(tmp);
10231 tcg_temp_free_i32(tmp2);
10232 goto illegal_op;
10233 }
9ee6e8bb 10234 if (op & 1)
5e3f878a
PB
10235 gen_swap_half(tmp2);
10236 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10237 if (op & 0x10) {
5e3f878a 10238 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10239 } else {
5e3f878a 10240 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10241 }
7d1b0095 10242 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10243 /* BUGFIX */
10244 tmp64 = tcg_temp_new_i64();
10245 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10246 tcg_temp_free_i32(tmp);
a7812ae4
PB
10247 gen_addq(s, tmp64, rs, rd);
10248 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10249 tcg_temp_free_i64(tmp64);
2c0262af 10250 } else {
9ee6e8bb
PB
10251 if (op & 0x20) {
10252 /* Unsigned 64-bit multiply */
a7812ae4 10253 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10254 } else {
9ee6e8bb
PB
10255 if (op & 8) {
10256 /* smlalxy */
62b44f05
AR
10257 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10258 tcg_temp_free_i32(tmp2);
10259 tcg_temp_free_i32(tmp);
10260 goto illegal_op;
10261 }
5e3f878a 10262 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10263 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10264 tmp64 = tcg_temp_new_i64();
10265 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10266 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10267 } else {
10268 /* Signed 64-bit multiply */
a7812ae4 10269 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10270 }
b5ff1b31 10271 }
9ee6e8bb
PB
10272 if (op & 4) {
10273 /* umaal */
62b44f05
AR
10274 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10275 tcg_temp_free_i64(tmp64);
10276 goto illegal_op;
10277 }
a7812ae4
PB
10278 gen_addq_lo(s, tmp64, rs);
10279 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10280 } else if (op & 0x40) {
10281 /* 64-bit accumulate. */
a7812ae4 10282 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10283 }
a7812ae4 10284 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10285 tcg_temp_free_i64(tmp64);
5fd46862 10286 }
2c0262af 10287 break;
9ee6e8bb
PB
10288 }
10289 break;
10290 case 6: case 7: case 14: case 15:
10291 /* Coprocessor. */
10292 if (((insn >> 24) & 3) == 3) {
10293 /* Translate into the equivalent ARM encoding. */
f06053e3 10294 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10295 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10296 goto illegal_op;
7dcc1f89 10297 }
6a57f3eb 10298 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10299 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10300 goto illegal_op;
10301 }
9ee6e8bb
PB
10302 } else {
10303 if (insn & (1 << 28))
10304 goto illegal_op;
7dcc1f89 10305 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10306 goto illegal_op;
7dcc1f89 10307 }
9ee6e8bb
PB
10308 }
10309 break;
10310 case 8: case 9: case 10: case 11:
10311 if (insn & (1 << 15)) {
10312 /* Branches, misc control. */
10313 if (insn & 0x5000) {
10314 /* Unconditional branch. */
10315 /* signextend(hw1[10:0]) -> offset[:12]. */
10316 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10317 /* hw1[10:0] -> offset[11:1]. */
10318 offset |= (insn & 0x7ff) << 1;
10319 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10320 offset[24:22] already have the same value because of the
10321 sign extension above. */
10322 offset ^= ((~insn) & (1 << 13)) << 10;
10323 offset ^= ((~insn) & (1 << 11)) << 11;
10324
9ee6e8bb
PB
10325 if (insn & (1 << 14)) {
10326 /* Branch and link. */
3174f8e9 10327 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10328 }
3b46e624 10329
b0109805 10330 offset += s->pc;
9ee6e8bb
PB
10331 if (insn & (1 << 12)) {
10332 /* b/bl */
b0109805 10333 gen_jmp(s, offset);
9ee6e8bb
PB
10334 } else {
10335 /* blx */
b0109805 10336 offset &= ~(uint32_t)2;
be5e7a76 10337 /* thumb2 bx, no need to check */
b0109805 10338 gen_bx_im(s, offset);
2c0262af 10339 }
9ee6e8bb
PB
10340 } else if (((insn >> 23) & 7) == 7) {
10341 /* Misc control */
10342 if (insn & (1 << 13))
10343 goto illegal_op;
10344
10345 if (insn & (1 << 26)) {
37e6456e
PM
10346 if (!(insn & (1 << 20))) {
10347 /* Hypervisor call (v7) */
10348 int imm16 = extract32(insn, 16, 4) << 12
10349 | extract32(insn, 0, 12);
10350 ARCH(7);
10351 if (IS_USER(s)) {
10352 goto illegal_op;
10353 }
10354 gen_hvc(s, imm16);
10355 } else {
10356 /* Secure monitor call (v6+) */
10357 ARCH(6K);
10358 if (IS_USER(s)) {
10359 goto illegal_op;
10360 }
10361 gen_smc(s);
10362 }
2c0262af 10363 } else {
9ee6e8bb
PB
10364 op = (insn >> 20) & 7;
10365 switch (op) {
10366 case 0: /* msr cpsr. */
b53d8923 10367 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10368 tmp = load_reg(s, rn);
10369 addr = tcg_const_i32(insn & 0xff);
10370 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10371 tcg_temp_free_i32(addr);
7d1b0095 10372 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10373 gen_lookup_tb(s);
10374 break;
10375 }
10376 /* fall through */
10377 case 1: /* msr spsr. */
b53d8923 10378 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10379 goto illegal_op;
b53d8923 10380 }
8bfd0550
PM
10381
10382 if (extract32(insn, 5, 1)) {
10383 /* MSR (banked) */
10384 int sysm = extract32(insn, 8, 4) |
10385 (extract32(insn, 4, 1) << 4);
10386 int r = op & 1;
10387
10388 gen_msr_banked(s, r, sysm, rm);
10389 break;
10390 }
10391
10392 /* MSR (for PSRs) */
2fbac54b
FN
10393 tmp = load_reg(s, rn);
10394 if (gen_set_psr(s,
7dcc1f89 10395 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10396 op == 1, tmp))
9ee6e8bb
PB
10397 goto illegal_op;
10398 break;
10399 case 2: /* cps, nop-hint. */
10400 if (((insn >> 8) & 7) == 0) {
10401 gen_nop_hint(s, insn & 0xff);
10402 }
10403 /* Implemented as NOP in user mode. */
10404 if (IS_USER(s))
10405 break;
10406 offset = 0;
10407 imm = 0;
10408 if (insn & (1 << 10)) {
10409 if (insn & (1 << 7))
10410 offset |= CPSR_A;
10411 if (insn & (1 << 6))
10412 offset |= CPSR_I;
10413 if (insn & (1 << 5))
10414 offset |= CPSR_F;
10415 if (insn & (1 << 9))
10416 imm = CPSR_A | CPSR_I | CPSR_F;
10417 }
10418 if (insn & (1 << 8)) {
10419 offset |= 0x1f;
10420 imm |= (insn & 0x1f);
10421 }
10422 if (offset) {
2fbac54b 10423 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10424 }
10425 break;
10426 case 3: /* Special control operations. */
426f5abc 10427 ARCH(7);
9ee6e8bb
PB
10428 op = (insn >> 4) & 0xf;
10429 switch (op) {
10430 case 2: /* clrex */
426f5abc 10431 gen_clrex(s);
9ee6e8bb
PB
10432 break;
10433 case 4: /* dsb */
10434 case 5: /* dmb */
9ee6e8bb 10435 /* These execute as NOPs. */
9ee6e8bb 10436 break;
6df99dec
SS
10437 case 6: /* isb */
10438 /* We need to break the TB after this insn
10439 * to execute self-modifying code correctly
10440 * and also to take any pending interrupts
10441 * immediately.
10442 */
10443 gen_lookup_tb(s);
10444 break;
9ee6e8bb
PB
10445 default:
10446 goto illegal_op;
10447 }
10448 break;
10449 case 4: /* bxj */
10450 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10451 tmp = load_reg(s, rn);
10452 gen_bx(s, tmp);
9ee6e8bb
PB
10453 break;
10454 case 5: /* Exception return. */
b8b45b68
RV
10455 if (IS_USER(s)) {
10456 goto illegal_op;
10457 }
10458 if (rn != 14 || rd != 15) {
10459 goto illegal_op;
10460 }
10461 tmp = load_reg(s, rn);
10462 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10463 gen_exception_return(s, tmp);
10464 break;
8bfd0550
PM
10465 case 6: /* MRS */
10466 if (extract32(insn, 5, 1)) {
10467 /* MRS (banked) */
10468 int sysm = extract32(insn, 16, 4) |
10469 (extract32(insn, 4, 1) << 4);
10470
10471 gen_mrs_banked(s, 0, sysm, rd);
10472 break;
10473 }
10474
10475 /* mrs cpsr */
7d1b0095 10476 tmp = tcg_temp_new_i32();
b53d8923 10477 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10478 addr = tcg_const_i32(insn & 0xff);
10479 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10480 tcg_temp_free_i32(addr);
9ee6e8bb 10481 } else {
9ef39277 10482 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10483 }
8984bd2e 10484 store_reg(s, rd, tmp);
9ee6e8bb 10485 break;
8bfd0550
PM
10486 case 7: /* MRS */
10487 if (extract32(insn, 5, 1)) {
10488 /* MRS (banked) */
10489 int sysm = extract32(insn, 16, 4) |
10490 (extract32(insn, 4, 1) << 4);
10491
10492 gen_mrs_banked(s, 1, sysm, rd);
10493 break;
10494 }
10495
10496 /* mrs spsr. */
9ee6e8bb 10497 /* Not accessible in user mode. */
b53d8923 10498 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10499 goto illegal_op;
b53d8923 10500 }
d9ba4830
PB
10501 tmp = load_cpu_field(spsr);
10502 store_reg(s, rd, tmp);
9ee6e8bb 10503 break;
2c0262af
FB
10504 }
10505 }
9ee6e8bb
PB
10506 } else {
10507 /* Conditional branch. */
10508 op = (insn >> 22) & 0xf;
10509 /* Generate a conditional jump to next instruction. */
10510 s->condlabel = gen_new_label();
39fb730a 10511 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10512 s->condjmp = 1;
10513
10514 /* offset[11:1] = insn[10:0] */
10515 offset = (insn & 0x7ff) << 1;
10516 /* offset[17:12] = insn[21:16]. */
10517 offset |= (insn & 0x003f0000) >> 4;
10518 /* offset[31:20] = insn[26]. */
10519 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10520 /* offset[18] = insn[13]. */
10521 offset |= (insn & (1 << 13)) << 5;
10522 /* offset[19] = insn[11]. */
10523 offset |= (insn & (1 << 11)) << 8;
10524
10525 /* jump to the offset */
b0109805 10526 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10527 }
10528 } else {
10529 /* Data processing immediate. */
10530 if (insn & (1 << 25)) {
10531 if (insn & (1 << 24)) {
10532 if (insn & (1 << 20))
10533 goto illegal_op;
10534 /* Bitfield/Saturate. */
10535 op = (insn >> 21) & 7;
10536 imm = insn & 0x1f;
10537 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10538 if (rn == 15) {
7d1b0095 10539 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10540 tcg_gen_movi_i32(tmp, 0);
10541 } else {
10542 tmp = load_reg(s, rn);
10543 }
9ee6e8bb
PB
10544 switch (op) {
10545 case 2: /* Signed bitfield extract. */
10546 imm++;
10547 if (shift + imm > 32)
10548 goto illegal_op;
10549 if (imm < 32)
6ddbc6e4 10550 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
10551 break;
10552 case 6: /* Unsigned bitfield extract. */
10553 imm++;
10554 if (shift + imm > 32)
10555 goto illegal_op;
10556 if (imm < 32)
6ddbc6e4 10557 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
10558 break;
10559 case 3: /* Bitfield insert/clear. */
10560 if (imm < shift)
10561 goto illegal_op;
10562 imm = imm + 1 - shift;
10563 if (imm != 32) {
6ddbc6e4 10564 tmp2 = load_reg(s, rd);
d593c48e 10565 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10566 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10567 }
10568 break;
10569 case 7:
10570 goto illegal_op;
10571 default: /* Saturate. */
9ee6e8bb
PB
10572 if (shift) {
10573 if (op & 1)
6ddbc6e4 10574 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10575 else
6ddbc6e4 10576 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10577 }
6ddbc6e4 10578 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10579 if (op & 4) {
10580 /* Unsigned. */
62b44f05
AR
10581 if ((op & 1) && shift == 0) {
10582 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10583 tcg_temp_free_i32(tmp);
10584 tcg_temp_free_i32(tmp2);
10585 goto illegal_op;
10586 }
9ef39277 10587 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10588 } else {
9ef39277 10589 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10590 }
2c0262af 10591 } else {
9ee6e8bb 10592 /* Signed. */
62b44f05
AR
10593 if ((op & 1) && shift == 0) {
10594 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10595 tcg_temp_free_i32(tmp);
10596 tcg_temp_free_i32(tmp2);
10597 goto illegal_op;
10598 }
9ef39277 10599 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10600 } else {
9ef39277 10601 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10602 }
2c0262af 10603 }
b75263d6 10604 tcg_temp_free_i32(tmp2);
9ee6e8bb 10605 break;
2c0262af 10606 }
6ddbc6e4 10607 store_reg(s, rd, tmp);
9ee6e8bb
PB
10608 } else {
10609 imm = ((insn & 0x04000000) >> 15)
10610 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10611 if (insn & (1 << 22)) {
10612 /* 16-bit immediate. */
10613 imm |= (insn >> 4) & 0xf000;
10614 if (insn & (1 << 23)) {
10615 /* movt */
5e3f878a 10616 tmp = load_reg(s, rd);
86831435 10617 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10618 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10619 } else {
9ee6e8bb 10620 /* movw */
7d1b0095 10621 tmp = tcg_temp_new_i32();
5e3f878a 10622 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10623 }
10624 } else {
9ee6e8bb
PB
10625 /* Add/sub 12-bit immediate. */
10626 if (rn == 15) {
b0109805 10627 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10628 if (insn & (1 << 23))
b0109805 10629 offset -= imm;
9ee6e8bb 10630 else
b0109805 10631 offset += imm;
7d1b0095 10632 tmp = tcg_temp_new_i32();
5e3f878a 10633 tcg_gen_movi_i32(tmp, offset);
2c0262af 10634 } else {
5e3f878a 10635 tmp = load_reg(s, rn);
9ee6e8bb 10636 if (insn & (1 << 23))
5e3f878a 10637 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10638 else
5e3f878a 10639 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10640 }
9ee6e8bb 10641 }
5e3f878a 10642 store_reg(s, rd, tmp);
191abaa2 10643 }
9ee6e8bb
PB
10644 } else {
10645 int shifter_out = 0;
10646 /* modified 12-bit immediate. */
10647 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10648 imm = (insn & 0xff);
10649 switch (shift) {
10650 case 0: /* XY */
10651 /* Nothing to do. */
10652 break;
10653 case 1: /* 00XY00XY */
10654 imm |= imm << 16;
10655 break;
10656 case 2: /* XY00XY00 */
10657 imm |= imm << 16;
10658 imm <<= 8;
10659 break;
10660 case 3: /* XYXYXYXY */
10661 imm |= imm << 16;
10662 imm |= imm << 8;
10663 break;
10664 default: /* Rotated constant. */
10665 shift = (shift << 1) | (imm >> 7);
10666 imm |= 0x80;
10667 imm = imm << (32 - shift);
10668 shifter_out = 1;
10669 break;
b5ff1b31 10670 }
7d1b0095 10671 tmp2 = tcg_temp_new_i32();
3174f8e9 10672 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10673 rn = (insn >> 16) & 0xf;
3174f8e9 10674 if (rn == 15) {
7d1b0095 10675 tmp = tcg_temp_new_i32();
3174f8e9
FN
10676 tcg_gen_movi_i32(tmp, 0);
10677 } else {
10678 tmp = load_reg(s, rn);
10679 }
9ee6e8bb
PB
10680 op = (insn >> 21) & 0xf;
10681 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10682 shifter_out, tmp, tmp2))
9ee6e8bb 10683 goto illegal_op;
7d1b0095 10684 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10685 rd = (insn >> 8) & 0xf;
10686 if (rd != 15) {
3174f8e9
FN
10687 store_reg(s, rd, tmp);
10688 } else {
7d1b0095 10689 tcg_temp_free_i32(tmp);
2c0262af 10690 }
2c0262af 10691 }
9ee6e8bb
PB
10692 }
10693 break;
10694 case 12: /* Load/store single data item. */
10695 {
10696 int postinc = 0;
10697 int writeback = 0;
a99caa48 10698 int memidx;
9ee6e8bb 10699 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10700 if (disas_neon_ls_insn(s, insn)) {
c1713132 10701 goto illegal_op;
7dcc1f89 10702 }
9ee6e8bb
PB
10703 break;
10704 }
a2fdc890
PM
10705 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10706 if (rs == 15) {
10707 if (!(insn & (1 << 20))) {
10708 goto illegal_op;
10709 }
10710 if (op != 2) {
10711 /* Byte or halfword load space with dest == r15 : memory hints.
10712 * Catch them early so we don't emit pointless addressing code.
10713 * This space is a mix of:
10714 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10715 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10716 * cores)
10717 * unallocated hints, which must be treated as NOPs
10718 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10719 * which is easiest for the decoding logic
10720 * Some space which must UNDEF
10721 */
10722 int op1 = (insn >> 23) & 3;
10723 int op2 = (insn >> 6) & 0x3f;
10724 if (op & 2) {
10725 goto illegal_op;
10726 }
10727 if (rn == 15) {
02afbf64
PM
10728 /* UNPREDICTABLE, unallocated hint or
10729 * PLD/PLDW/PLI (literal)
10730 */
a2fdc890
PM
10731 return 0;
10732 }
10733 if (op1 & 1) {
02afbf64 10734 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10735 }
10736 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10737 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10738 }
10739 /* UNDEF space, or an UNPREDICTABLE */
10740 return 1;
10741 }
10742 }
a99caa48 10743 memidx = get_mem_index(s);
9ee6e8bb 10744 if (rn == 15) {
7d1b0095 10745 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10746 /* PC relative. */
10747 /* s->pc has already been incremented by 4. */
10748 imm = s->pc & 0xfffffffc;
10749 if (insn & (1 << 23))
10750 imm += insn & 0xfff;
10751 else
10752 imm -= insn & 0xfff;
b0109805 10753 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10754 } else {
b0109805 10755 addr = load_reg(s, rn);
9ee6e8bb
PB
10756 if (insn & (1 << 23)) {
10757 /* Positive offset. */
10758 imm = insn & 0xfff;
b0109805 10759 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10760 } else {
9ee6e8bb 10761 imm = insn & 0xff;
2a0308c5
PM
10762 switch ((insn >> 8) & 0xf) {
10763 case 0x0: /* Shifted Register. */
9ee6e8bb 10764 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10765 if (shift > 3) {
10766 tcg_temp_free_i32(addr);
18c9b560 10767 goto illegal_op;
2a0308c5 10768 }
b26eefb6 10769 tmp = load_reg(s, rm);
9ee6e8bb 10770 if (shift)
b26eefb6 10771 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10772 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10773 tcg_temp_free_i32(tmp);
9ee6e8bb 10774 break;
2a0308c5 10775 case 0xc: /* Negative offset. */
b0109805 10776 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10777 break;
2a0308c5 10778 case 0xe: /* User privilege. */
b0109805 10779 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10780 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10781 break;
2a0308c5 10782 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10783 imm = -imm;
10784 /* Fall through. */
2a0308c5 10785 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10786 postinc = 1;
10787 writeback = 1;
10788 break;
2a0308c5 10789 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10790 imm = -imm;
10791 /* Fall through. */
2a0308c5 10792 case 0xf: /* Pre-increment. */
b0109805 10793 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10794 writeback = 1;
10795 break;
10796 default:
2a0308c5 10797 tcg_temp_free_i32(addr);
b7bcbe95 10798 goto illegal_op;
9ee6e8bb
PB
10799 }
10800 }
10801 }
9ee6e8bb
PB
10802 if (insn & (1 << 20)) {
10803 /* Load. */
5a839c0d 10804 tmp = tcg_temp_new_i32();
a2fdc890 10805 switch (op) {
5a839c0d 10806 case 0:
12dcc321 10807 gen_aa32_ld8u(s, tmp, addr, memidx);
5a839c0d
PM
10808 break;
10809 case 4:
12dcc321 10810 gen_aa32_ld8s(s, tmp, addr, memidx);
5a839c0d
PM
10811 break;
10812 case 1:
12dcc321 10813 gen_aa32_ld16u(s, tmp, addr, memidx);
5a839c0d
PM
10814 break;
10815 case 5:
12dcc321 10816 gen_aa32_ld16s(s, tmp, addr, memidx);
5a839c0d
PM
10817 break;
10818 case 2:
12dcc321 10819 gen_aa32_ld32u(s, tmp, addr, memidx);
5a839c0d 10820 break;
2a0308c5 10821 default:
5a839c0d 10822 tcg_temp_free_i32(tmp);
2a0308c5
PM
10823 tcg_temp_free_i32(addr);
10824 goto illegal_op;
a2fdc890
PM
10825 }
10826 if (rs == 15) {
10827 gen_bx(s, tmp);
9ee6e8bb 10828 } else {
a2fdc890 10829 store_reg(s, rs, tmp);
9ee6e8bb
PB
10830 }
10831 } else {
10832 /* Store. */
b0109805 10833 tmp = load_reg(s, rs);
9ee6e8bb 10834 switch (op) {
5a839c0d 10835 case 0:
12dcc321 10836 gen_aa32_st8(s, tmp, addr, memidx);
5a839c0d
PM
10837 break;
10838 case 1:
12dcc321 10839 gen_aa32_st16(s, tmp, addr, memidx);
5a839c0d
PM
10840 break;
10841 case 2:
12dcc321 10842 gen_aa32_st32(s, tmp, addr, memidx);
5a839c0d 10843 break;
2a0308c5 10844 default:
5a839c0d 10845 tcg_temp_free_i32(tmp);
2a0308c5
PM
10846 tcg_temp_free_i32(addr);
10847 goto illegal_op;
b7bcbe95 10848 }
5a839c0d 10849 tcg_temp_free_i32(tmp);
2c0262af 10850 }
9ee6e8bb 10851 if (postinc)
b0109805
PB
10852 tcg_gen_addi_i32(addr, addr, imm);
10853 if (writeback) {
10854 store_reg(s, rn, addr);
10855 } else {
7d1b0095 10856 tcg_temp_free_i32(addr);
b0109805 10857 }
9ee6e8bb
PB
10858 }
10859 break;
10860 default:
10861 goto illegal_op;
2c0262af 10862 }
9ee6e8bb
PB
10863 return 0;
10864illegal_op:
10865 return 1;
2c0262af
FB
10866}
10867
0ecb72a5 10868static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10869{
10870 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10871 int32_t offset;
10872 int i;
39d5492a
PM
10873 TCGv_i32 tmp;
10874 TCGv_i32 tmp2;
10875 TCGv_i32 addr;
99c475ab 10876
9ee6e8bb
PB
10877 if (s->condexec_mask) {
10878 cond = s->condexec_cond;
bedd2912
JB
10879 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10880 s->condlabel = gen_new_label();
39fb730a 10881 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10882 s->condjmp = 1;
10883 }
9ee6e8bb
PB
10884 }
10885
f9fd40eb 10886 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 10887 s->pc += 2;
b5ff1b31 10888
99c475ab
FB
10889 switch (insn >> 12) {
10890 case 0: case 1:
396e467c 10891
99c475ab
FB
10892 rd = insn & 7;
10893 op = (insn >> 11) & 3;
10894 if (op == 3) {
10895 /* add/subtract */
10896 rn = (insn >> 3) & 7;
396e467c 10897 tmp = load_reg(s, rn);
99c475ab
FB
10898 if (insn & (1 << 10)) {
10899 /* immediate */
7d1b0095 10900 tmp2 = tcg_temp_new_i32();
396e467c 10901 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10902 } else {
10903 /* reg */
10904 rm = (insn >> 6) & 7;
396e467c 10905 tmp2 = load_reg(s, rm);
99c475ab 10906 }
9ee6e8bb
PB
10907 if (insn & (1 << 9)) {
10908 if (s->condexec_mask)
396e467c 10909 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10910 else
72485ec4 10911 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10912 } else {
10913 if (s->condexec_mask)
396e467c 10914 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10915 else
72485ec4 10916 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10917 }
7d1b0095 10918 tcg_temp_free_i32(tmp2);
396e467c 10919 store_reg(s, rd, tmp);
99c475ab
FB
10920 } else {
10921 /* shift immediate */
10922 rm = (insn >> 3) & 7;
10923 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10924 tmp = load_reg(s, rm);
10925 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10926 if (!s->condexec_mask)
10927 gen_logic_CC(tmp);
10928 store_reg(s, rd, tmp);
99c475ab
FB
10929 }
10930 break;
10931 case 2: case 3:
10932 /* arithmetic large immediate */
10933 op = (insn >> 11) & 3;
10934 rd = (insn >> 8) & 0x7;
396e467c 10935 if (op == 0) { /* mov */
7d1b0095 10936 tmp = tcg_temp_new_i32();
396e467c 10937 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10938 if (!s->condexec_mask)
396e467c
FN
10939 gen_logic_CC(tmp);
10940 store_reg(s, rd, tmp);
10941 } else {
10942 tmp = load_reg(s, rd);
7d1b0095 10943 tmp2 = tcg_temp_new_i32();
396e467c
FN
10944 tcg_gen_movi_i32(tmp2, insn & 0xff);
10945 switch (op) {
10946 case 1: /* cmp */
72485ec4 10947 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10948 tcg_temp_free_i32(tmp);
10949 tcg_temp_free_i32(tmp2);
396e467c
FN
10950 break;
10951 case 2: /* add */
10952 if (s->condexec_mask)
10953 tcg_gen_add_i32(tmp, tmp, tmp2);
10954 else
72485ec4 10955 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10956 tcg_temp_free_i32(tmp2);
396e467c
FN
10957 store_reg(s, rd, tmp);
10958 break;
10959 case 3: /* sub */
10960 if (s->condexec_mask)
10961 tcg_gen_sub_i32(tmp, tmp, tmp2);
10962 else
72485ec4 10963 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10964 tcg_temp_free_i32(tmp2);
396e467c
FN
10965 store_reg(s, rd, tmp);
10966 break;
10967 }
99c475ab 10968 }
99c475ab
FB
10969 break;
10970 case 4:
10971 if (insn & (1 << 11)) {
10972 rd = (insn >> 8) & 7;
5899f386
FB
10973 /* load pc-relative. Bit 1 of PC is ignored. */
10974 val = s->pc + 2 + ((insn & 0xff) * 4);
10975 val &= ~(uint32_t)2;
7d1b0095 10976 addr = tcg_temp_new_i32();
b0109805 10977 tcg_gen_movi_i32(addr, val);
c40c8556 10978 tmp = tcg_temp_new_i32();
12dcc321 10979 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
7d1b0095 10980 tcg_temp_free_i32(addr);
b0109805 10981 store_reg(s, rd, tmp);
99c475ab
FB
10982 break;
10983 }
10984 if (insn & (1 << 10)) {
10985 /* data processing extended or blx */
10986 rd = (insn & 7) | ((insn >> 4) & 8);
10987 rm = (insn >> 3) & 0xf;
10988 op = (insn >> 8) & 3;
10989 switch (op) {
10990 case 0: /* add */
396e467c
FN
10991 tmp = load_reg(s, rd);
10992 tmp2 = load_reg(s, rm);
10993 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10994 tcg_temp_free_i32(tmp2);
396e467c 10995 store_reg(s, rd, tmp);
99c475ab
FB
10996 break;
10997 case 1: /* cmp */
396e467c
FN
10998 tmp = load_reg(s, rd);
10999 tmp2 = load_reg(s, rm);
72485ec4 11000 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11001 tcg_temp_free_i32(tmp2);
11002 tcg_temp_free_i32(tmp);
99c475ab
FB
11003 break;
11004 case 2: /* mov/cpy */
396e467c
FN
11005 tmp = load_reg(s, rm);
11006 store_reg(s, rd, tmp);
99c475ab
FB
11007 break;
11008 case 3:/* branch [and link] exchange thumb register */
b0109805 11009 tmp = load_reg(s, rm);
99c475ab 11010 if (insn & (1 << 7)) {
be5e7a76 11011 ARCH(5);
99c475ab 11012 val = (uint32_t)s->pc | 1;
7d1b0095 11013 tmp2 = tcg_temp_new_i32();
b0109805
PB
11014 tcg_gen_movi_i32(tmp2, val);
11015 store_reg(s, 14, tmp2);
99c475ab 11016 }
be5e7a76 11017 /* already thumb, no need to check */
d9ba4830 11018 gen_bx(s, tmp);
99c475ab
FB
11019 break;
11020 }
11021 break;
11022 }
11023
11024 /* data processing register */
11025 rd = insn & 7;
11026 rm = (insn >> 3) & 7;
11027 op = (insn >> 6) & 0xf;
11028 if (op == 2 || op == 3 || op == 4 || op == 7) {
11029 /* the shift/rotate ops want the operands backwards */
11030 val = rm;
11031 rm = rd;
11032 rd = val;
11033 val = 1;
11034 } else {
11035 val = 0;
11036 }
11037
396e467c 11038 if (op == 9) { /* neg */
7d1b0095 11039 tmp = tcg_temp_new_i32();
396e467c
FN
11040 tcg_gen_movi_i32(tmp, 0);
11041 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11042 tmp = load_reg(s, rd);
11043 } else {
39d5492a 11044 TCGV_UNUSED_I32(tmp);
396e467c 11045 }
99c475ab 11046
396e467c 11047 tmp2 = load_reg(s, rm);
5899f386 11048 switch (op) {
99c475ab 11049 case 0x0: /* and */
396e467c 11050 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11051 if (!s->condexec_mask)
396e467c 11052 gen_logic_CC(tmp);
99c475ab
FB
11053 break;
11054 case 0x1: /* eor */
396e467c 11055 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11056 if (!s->condexec_mask)
396e467c 11057 gen_logic_CC(tmp);
99c475ab
FB
11058 break;
11059 case 0x2: /* lsl */
9ee6e8bb 11060 if (s->condexec_mask) {
365af80e 11061 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11062 } else {
9ef39277 11063 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11064 gen_logic_CC(tmp2);
9ee6e8bb 11065 }
99c475ab
FB
11066 break;
11067 case 0x3: /* lsr */
9ee6e8bb 11068 if (s->condexec_mask) {
365af80e 11069 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11070 } else {
9ef39277 11071 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11072 gen_logic_CC(tmp2);
9ee6e8bb 11073 }
99c475ab
FB
11074 break;
11075 case 0x4: /* asr */
9ee6e8bb 11076 if (s->condexec_mask) {
365af80e 11077 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11078 } else {
9ef39277 11079 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11080 gen_logic_CC(tmp2);
9ee6e8bb 11081 }
99c475ab
FB
11082 break;
11083 case 0x5: /* adc */
49b4c31e 11084 if (s->condexec_mask) {
396e467c 11085 gen_adc(tmp, tmp2);
49b4c31e
RH
11086 } else {
11087 gen_adc_CC(tmp, tmp, tmp2);
11088 }
99c475ab
FB
11089 break;
11090 case 0x6: /* sbc */
2de68a49 11091 if (s->condexec_mask) {
396e467c 11092 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11093 } else {
11094 gen_sbc_CC(tmp, tmp, tmp2);
11095 }
99c475ab
FB
11096 break;
11097 case 0x7: /* ror */
9ee6e8bb 11098 if (s->condexec_mask) {
f669df27
AJ
11099 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11100 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11101 } else {
9ef39277 11102 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11103 gen_logic_CC(tmp2);
9ee6e8bb 11104 }
99c475ab
FB
11105 break;
11106 case 0x8: /* tst */
396e467c
FN
11107 tcg_gen_and_i32(tmp, tmp, tmp2);
11108 gen_logic_CC(tmp);
99c475ab 11109 rd = 16;
5899f386 11110 break;
99c475ab 11111 case 0x9: /* neg */
9ee6e8bb 11112 if (s->condexec_mask)
396e467c 11113 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11114 else
72485ec4 11115 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11116 break;
11117 case 0xa: /* cmp */
72485ec4 11118 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11119 rd = 16;
11120 break;
11121 case 0xb: /* cmn */
72485ec4 11122 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11123 rd = 16;
11124 break;
11125 case 0xc: /* orr */
396e467c 11126 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11127 if (!s->condexec_mask)
396e467c 11128 gen_logic_CC(tmp);
99c475ab
FB
11129 break;
11130 case 0xd: /* mul */
7b2919a0 11131 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11132 if (!s->condexec_mask)
396e467c 11133 gen_logic_CC(tmp);
99c475ab
FB
11134 break;
11135 case 0xe: /* bic */
f669df27 11136 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11137 if (!s->condexec_mask)
396e467c 11138 gen_logic_CC(tmp);
99c475ab
FB
11139 break;
11140 case 0xf: /* mvn */
396e467c 11141 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11142 if (!s->condexec_mask)
396e467c 11143 gen_logic_CC(tmp2);
99c475ab 11144 val = 1;
5899f386 11145 rm = rd;
99c475ab
FB
11146 break;
11147 }
11148 if (rd != 16) {
396e467c
FN
11149 if (val) {
11150 store_reg(s, rm, tmp2);
11151 if (op != 0xf)
7d1b0095 11152 tcg_temp_free_i32(tmp);
396e467c
FN
11153 } else {
11154 store_reg(s, rd, tmp);
7d1b0095 11155 tcg_temp_free_i32(tmp2);
396e467c
FN
11156 }
11157 } else {
7d1b0095
PM
11158 tcg_temp_free_i32(tmp);
11159 tcg_temp_free_i32(tmp2);
99c475ab
FB
11160 }
11161 break;
11162
11163 case 5:
11164 /* load/store register offset. */
11165 rd = insn & 7;
11166 rn = (insn >> 3) & 7;
11167 rm = (insn >> 6) & 7;
11168 op = (insn >> 9) & 7;
b0109805 11169 addr = load_reg(s, rn);
b26eefb6 11170 tmp = load_reg(s, rm);
b0109805 11171 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11172 tcg_temp_free_i32(tmp);
99c475ab 11173
c40c8556 11174 if (op < 3) { /* store */
b0109805 11175 tmp = load_reg(s, rd);
c40c8556
PM
11176 } else {
11177 tmp = tcg_temp_new_i32();
11178 }
99c475ab
FB
11179
11180 switch (op) {
11181 case 0: /* str */
12dcc321 11182 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11183 break;
11184 case 1: /* strh */
12dcc321 11185 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11186 break;
11187 case 2: /* strb */
12dcc321 11188 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11189 break;
11190 case 3: /* ldrsb */
12dcc321 11191 gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11192 break;
11193 case 4: /* ldr */
12dcc321 11194 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11195 break;
11196 case 5: /* ldrh */
12dcc321 11197 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11198 break;
11199 case 6: /* ldrb */
12dcc321 11200 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11201 break;
11202 case 7: /* ldrsh */
12dcc321 11203 gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11204 break;
11205 }
c40c8556 11206 if (op >= 3) { /* load */
b0109805 11207 store_reg(s, rd, tmp);
c40c8556
PM
11208 } else {
11209 tcg_temp_free_i32(tmp);
11210 }
7d1b0095 11211 tcg_temp_free_i32(addr);
99c475ab
FB
11212 break;
11213
11214 case 6:
11215 /* load/store word immediate offset */
11216 rd = insn & 7;
11217 rn = (insn >> 3) & 7;
b0109805 11218 addr = load_reg(s, rn);
99c475ab 11219 val = (insn >> 4) & 0x7c;
b0109805 11220 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11221
11222 if (insn & (1 << 11)) {
11223 /* load */
c40c8556 11224 tmp = tcg_temp_new_i32();
12dcc321 11225 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11226 store_reg(s, rd, tmp);
99c475ab
FB
11227 } else {
11228 /* store */
b0109805 11229 tmp = load_reg(s, rd);
12dcc321 11230 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11231 tcg_temp_free_i32(tmp);
99c475ab 11232 }
7d1b0095 11233 tcg_temp_free_i32(addr);
99c475ab
FB
11234 break;
11235
11236 case 7:
11237 /* load/store byte immediate offset */
11238 rd = insn & 7;
11239 rn = (insn >> 3) & 7;
b0109805 11240 addr = load_reg(s, rn);
99c475ab 11241 val = (insn >> 6) & 0x1f;
b0109805 11242 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11243
11244 if (insn & (1 << 11)) {
11245 /* load */
c40c8556 11246 tmp = tcg_temp_new_i32();
12dcc321 11247 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
b0109805 11248 store_reg(s, rd, tmp);
99c475ab
FB
11249 } else {
11250 /* store */
b0109805 11251 tmp = load_reg(s, rd);
12dcc321 11252 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
c40c8556 11253 tcg_temp_free_i32(tmp);
99c475ab 11254 }
7d1b0095 11255 tcg_temp_free_i32(addr);
99c475ab
FB
11256 break;
11257
11258 case 8:
11259 /* load/store halfword immediate offset */
11260 rd = insn & 7;
11261 rn = (insn >> 3) & 7;
b0109805 11262 addr = load_reg(s, rn);
99c475ab 11263 val = (insn >> 5) & 0x3e;
b0109805 11264 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11265
11266 if (insn & (1 << 11)) {
11267 /* load */
c40c8556 11268 tmp = tcg_temp_new_i32();
12dcc321 11269 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
b0109805 11270 store_reg(s, rd, tmp);
99c475ab
FB
11271 } else {
11272 /* store */
b0109805 11273 tmp = load_reg(s, rd);
12dcc321 11274 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
c40c8556 11275 tcg_temp_free_i32(tmp);
99c475ab 11276 }
7d1b0095 11277 tcg_temp_free_i32(addr);
99c475ab
FB
11278 break;
11279
11280 case 9:
11281 /* load/store from stack */
11282 rd = (insn >> 8) & 7;
b0109805 11283 addr = load_reg(s, 13);
99c475ab 11284 val = (insn & 0xff) * 4;
b0109805 11285 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11286
11287 if (insn & (1 << 11)) {
11288 /* load */
c40c8556 11289 tmp = tcg_temp_new_i32();
12dcc321 11290 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11291 store_reg(s, rd, tmp);
99c475ab
FB
11292 } else {
11293 /* store */
b0109805 11294 tmp = load_reg(s, rd);
12dcc321 11295 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11296 tcg_temp_free_i32(tmp);
99c475ab 11297 }
7d1b0095 11298 tcg_temp_free_i32(addr);
99c475ab
FB
11299 break;
11300
11301 case 10:
11302 /* add to high reg */
11303 rd = (insn >> 8) & 7;
5899f386
FB
11304 if (insn & (1 << 11)) {
11305 /* SP */
5e3f878a 11306 tmp = load_reg(s, 13);
5899f386
FB
11307 } else {
11308 /* PC. bit 1 is ignored. */
7d1b0095 11309 tmp = tcg_temp_new_i32();
5e3f878a 11310 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11311 }
99c475ab 11312 val = (insn & 0xff) * 4;
5e3f878a
PB
11313 tcg_gen_addi_i32(tmp, tmp, val);
11314 store_reg(s, rd, tmp);
99c475ab
FB
11315 break;
11316
11317 case 11:
11318 /* misc */
11319 op = (insn >> 8) & 0xf;
11320 switch (op) {
11321 case 0:
11322 /* adjust stack pointer */
b26eefb6 11323 tmp = load_reg(s, 13);
99c475ab
FB
11324 val = (insn & 0x7f) * 4;
11325 if (insn & (1 << 7))
6a0d8a1d 11326 val = -(int32_t)val;
b26eefb6
PB
11327 tcg_gen_addi_i32(tmp, tmp, val);
11328 store_reg(s, 13, tmp);
99c475ab
FB
11329 break;
11330
9ee6e8bb
PB
11331 case 2: /* sign/zero extend. */
11332 ARCH(6);
11333 rd = insn & 7;
11334 rm = (insn >> 3) & 7;
b0109805 11335 tmp = load_reg(s, rm);
9ee6e8bb 11336 switch ((insn >> 6) & 3) {
b0109805
PB
11337 case 0: gen_sxth(tmp); break;
11338 case 1: gen_sxtb(tmp); break;
11339 case 2: gen_uxth(tmp); break;
11340 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11341 }
b0109805 11342 store_reg(s, rd, tmp);
9ee6e8bb 11343 break;
99c475ab
FB
11344 case 4: case 5: case 0xc: case 0xd:
11345 /* push/pop */
b0109805 11346 addr = load_reg(s, 13);
5899f386
FB
11347 if (insn & (1 << 8))
11348 offset = 4;
99c475ab 11349 else
5899f386
FB
11350 offset = 0;
11351 for (i = 0; i < 8; i++) {
11352 if (insn & (1 << i))
11353 offset += 4;
11354 }
11355 if ((insn & (1 << 11)) == 0) {
b0109805 11356 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11357 }
99c475ab
FB
11358 for (i = 0; i < 8; i++) {
11359 if (insn & (1 << i)) {
11360 if (insn & (1 << 11)) {
11361 /* pop */
c40c8556 11362 tmp = tcg_temp_new_i32();
12dcc321 11363 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11364 store_reg(s, i, tmp);
99c475ab
FB
11365 } else {
11366 /* push */
b0109805 11367 tmp = load_reg(s, i);
12dcc321 11368 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11369 tcg_temp_free_i32(tmp);
99c475ab 11370 }
5899f386 11371 /* advance to the next address. */
b0109805 11372 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11373 }
11374 }
39d5492a 11375 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11376 if (insn & (1 << 8)) {
11377 if (insn & (1 << 11)) {
11378 /* pop pc */
c40c8556 11379 tmp = tcg_temp_new_i32();
12dcc321 11380 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11381 /* don't set the pc until the rest of the instruction
11382 has completed */
11383 } else {
11384 /* push lr */
b0109805 11385 tmp = load_reg(s, 14);
12dcc321 11386 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11387 tcg_temp_free_i32(tmp);
99c475ab 11388 }
b0109805 11389 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11390 }
5899f386 11391 if ((insn & (1 << 11)) == 0) {
b0109805 11392 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11393 }
99c475ab 11394 /* write back the new stack pointer */
b0109805 11395 store_reg(s, 13, addr);
99c475ab 11396 /* set the new PC value */
be5e7a76 11397 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11398 store_reg_from_load(s, 15, tmp);
be5e7a76 11399 }
99c475ab
FB
11400 break;
11401
9ee6e8bb
PB
11402 case 1: case 3: case 9: case 11: /* czb */
11403 rm = insn & 7;
d9ba4830 11404 tmp = load_reg(s, rm);
9ee6e8bb
PB
11405 s->condlabel = gen_new_label();
11406 s->condjmp = 1;
11407 if (insn & (1 << 11))
cb63669a 11408 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11409 else
cb63669a 11410 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11411 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11412 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11413 val = (uint32_t)s->pc + 2;
11414 val += offset;
11415 gen_jmp(s, val);
11416 break;
11417
11418 case 15: /* IT, nop-hint. */
11419 if ((insn & 0xf) == 0) {
11420 gen_nop_hint(s, (insn >> 4) & 0xf);
11421 break;
11422 }
11423 /* If Then. */
11424 s->condexec_cond = (insn >> 4) & 0xe;
11425 s->condexec_mask = insn & 0x1f;
11426 /* No actual code generated for this insn, just setup state. */
11427 break;
11428
06c949e6 11429 case 0xe: /* bkpt */
d4a2dc67
PM
11430 {
11431 int imm8 = extract32(insn, 0, 8);
be5e7a76 11432 ARCH(5);
73710361
GB
11433 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11434 default_exception_el(s));
06c949e6 11435 break;
d4a2dc67 11436 }
06c949e6 11437
9ee6e8bb
PB
11438 case 0xa: /* rev */
11439 ARCH(6);
11440 rn = (insn >> 3) & 0x7;
11441 rd = insn & 0x7;
b0109805 11442 tmp = load_reg(s, rn);
9ee6e8bb 11443 switch ((insn >> 6) & 3) {
66896cb8 11444 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11445 case 1: gen_rev16(tmp); break;
11446 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
11447 default: goto illegal_op;
11448 }
b0109805 11449 store_reg(s, rd, tmp);
9ee6e8bb
PB
11450 break;
11451
d9e028c1
PM
11452 case 6:
11453 switch ((insn >> 5) & 7) {
11454 case 2:
11455 /* setend */
11456 ARCH(6);
9886ecdf
PB
11457 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11458 gen_helper_setend(cpu_env);
11459 s->is_jmp = DISAS_UPDATE;
d9e028c1 11460 }
9ee6e8bb 11461 break;
d9e028c1
PM
11462 case 3:
11463 /* cps */
11464 ARCH(6);
11465 if (IS_USER(s)) {
11466 break;
8984bd2e 11467 }
b53d8923 11468 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11469 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11470 /* FAULTMASK */
11471 if (insn & 1) {
11472 addr = tcg_const_i32(19);
11473 gen_helper_v7m_msr(cpu_env, addr, tmp);
11474 tcg_temp_free_i32(addr);
11475 }
11476 /* PRIMASK */
11477 if (insn & 2) {
11478 addr = tcg_const_i32(16);
11479 gen_helper_v7m_msr(cpu_env, addr, tmp);
11480 tcg_temp_free_i32(addr);
11481 }
11482 tcg_temp_free_i32(tmp);
11483 gen_lookup_tb(s);
11484 } else {
11485 if (insn & (1 << 4)) {
11486 shift = CPSR_A | CPSR_I | CPSR_F;
11487 } else {
11488 shift = 0;
11489 }
11490 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11491 }
d9e028c1
PM
11492 break;
11493 default:
11494 goto undef;
9ee6e8bb
PB
11495 }
11496 break;
11497
99c475ab
FB
11498 default:
11499 goto undef;
11500 }
11501 break;
11502
11503 case 12:
a7d3970d 11504 {
99c475ab 11505 /* load/store multiple */
39d5492a
PM
11506 TCGv_i32 loaded_var;
11507 TCGV_UNUSED_I32(loaded_var);
99c475ab 11508 rn = (insn >> 8) & 0x7;
b0109805 11509 addr = load_reg(s, rn);
99c475ab
FB
11510 for (i = 0; i < 8; i++) {
11511 if (insn & (1 << i)) {
99c475ab
FB
11512 if (insn & (1 << 11)) {
11513 /* load */
c40c8556 11514 tmp = tcg_temp_new_i32();
12dcc321 11515 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11516 if (i == rn) {
11517 loaded_var = tmp;
11518 } else {
11519 store_reg(s, i, tmp);
11520 }
99c475ab
FB
11521 } else {
11522 /* store */
b0109805 11523 tmp = load_reg(s, i);
12dcc321 11524 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11525 tcg_temp_free_i32(tmp);
99c475ab 11526 }
5899f386 11527 /* advance to the next address */
b0109805 11528 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11529 }
11530 }
b0109805 11531 if ((insn & (1 << rn)) == 0) {
a7d3970d 11532 /* base reg not in list: base register writeback */
b0109805
PB
11533 store_reg(s, rn, addr);
11534 } else {
a7d3970d
PM
11535 /* base reg in list: if load, complete it now */
11536 if (insn & (1 << 11)) {
11537 store_reg(s, rn, loaded_var);
11538 }
7d1b0095 11539 tcg_temp_free_i32(addr);
b0109805 11540 }
99c475ab 11541 break;
a7d3970d 11542 }
99c475ab
FB
11543 case 13:
11544 /* conditional branch or swi */
11545 cond = (insn >> 8) & 0xf;
11546 if (cond == 0xe)
11547 goto undef;
11548
11549 if (cond == 0xf) {
11550 /* swi */
eaed129d 11551 gen_set_pc_im(s, s->pc);
d4a2dc67 11552 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11553 s->is_jmp = DISAS_SWI;
99c475ab
FB
11554 break;
11555 }
11556 /* generate a conditional jump to next instruction */
e50e6a20 11557 s->condlabel = gen_new_label();
39fb730a 11558 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11559 s->condjmp = 1;
99c475ab
FB
11560
11561 /* jump to the offset */
5899f386 11562 val = (uint32_t)s->pc + 2;
99c475ab 11563 offset = ((int32_t)insn << 24) >> 24;
5899f386 11564 val += offset << 1;
8aaca4c0 11565 gen_jmp(s, val);
99c475ab
FB
11566 break;
11567
11568 case 14:
358bf29e 11569 if (insn & (1 << 11)) {
9ee6e8bb
PB
11570 if (disas_thumb2_insn(env, s, insn))
11571 goto undef32;
358bf29e
PB
11572 break;
11573 }
9ee6e8bb 11574 /* unconditional branch */
99c475ab
FB
11575 val = (uint32_t)s->pc;
11576 offset = ((int32_t)insn << 21) >> 21;
11577 val += (offset << 1) + 2;
8aaca4c0 11578 gen_jmp(s, val);
99c475ab
FB
11579 break;
11580
11581 case 15:
9ee6e8bb 11582 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11583 goto undef32;
9ee6e8bb 11584 break;
99c475ab
FB
11585 }
11586 return;
9ee6e8bb 11587undef32:
73710361
GB
11588 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11589 default_exception_el(s));
9ee6e8bb
PB
11590 return;
11591illegal_op:
99c475ab 11592undef:
73710361
GB
11593 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11594 default_exception_el(s));
99c475ab
FB
11595}
11596
541ebcd4
PM
11597static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11598{
11599 /* Return true if the insn at dc->pc might cross a page boundary.
11600 * (False positives are OK, false negatives are not.)
11601 */
11602 uint16_t insn;
11603
11604 if ((s->pc & 3) == 0) {
11605 /* At a 4-aligned address we can't be crossing a page */
11606 return false;
11607 }
11608
11609 /* This must be a Thumb insn */
f9fd40eb 11610 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11611
11612 if ((insn >> 11) >= 0x1d) {
11613 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11614 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11615 * end up actually treating this as two 16-bit insns (see the
11616 * code at the start of disas_thumb2_insn()) but we don't bother
11617 * to check for that as it is unlikely, and false positives here
11618 * are harmless.
11619 */
11620 return true;
11621 }
11622 /* Definitely a 16-bit insn, can't be crossing a page. */
11623 return false;
11624}
11625
20157705 11626/* generate intermediate code for basic block 'tb'. */
4e5e1215 11627void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11628{
4e5e1215 11629 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11630 CPUState *cs = CPU(cpu);
2c0262af 11631 DisasContext dc1, *dc = &dc1;
0fa85d43 11632 target_ulong pc_start;
0a2461fa 11633 target_ulong next_page_start;
2e70f6ef
PB
11634 int num_insns;
11635 int max_insns;
541ebcd4 11636 bool end_of_page;
3b46e624 11637
2c0262af 11638 /* generate intermediate code */
40f860cd
PM
11639
11640 /* The A64 decoder has its own top level loop, because it doesn't need
11641 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11642 */
11643 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11644 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11645 return;
11646 }
11647
0fa85d43 11648 pc_start = tb->pc;
3b46e624 11649
2c0262af
FB
11650 dc->tb = tb;
11651
2c0262af
FB
11652 dc->is_jmp = DISAS_NEXT;
11653 dc->pc = pc_start;
ed2803da 11654 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11655 dc->condjmp = 0;
3926cc84 11656
40f860cd 11657 dc->aarch64 = 0;
cef9ee70
SS
11658 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11659 * there is no secure EL1, so we route exceptions to EL3.
11660 */
11661 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11662 !arm_el_is_aa64(env, 3);
40f860cd 11663 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11664 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11665 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11666 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11667 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11668 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11669 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11670#if !defined(CONFIG_USER_ONLY)
c1e37810 11671 dc->user = (dc->current_el == 0);
3926cc84 11672#endif
3f342b9e 11673 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11674 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11675 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11676 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11677 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11678 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11679 dc->cp_regs = cpu->cp_regs;
a984e42c 11680 dc->features = env->features;
40f860cd 11681
50225ad0
PM
11682 /* Single step state. The code-generation logic here is:
11683 * SS_ACTIVE == 0:
11684 * generate code with no special handling for single-stepping (except
11685 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11686 * this happens anyway because those changes are all system register or
11687 * PSTATE writes).
11688 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11689 * emit code for one insn
11690 * emit code to clear PSTATE.SS
11691 * emit code to generate software step exception for completed step
11692 * end TB (as usual for having generated an exception)
11693 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11694 * emit code to generate a software step exception
11695 * end the TB
11696 */
11697 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11698 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11699 dc->is_ldex = false;
11700 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11701
a7812ae4
PB
11702 cpu_F0s = tcg_temp_new_i32();
11703 cpu_F1s = tcg_temp_new_i32();
11704 cpu_F0d = tcg_temp_new_i64();
11705 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11706 cpu_V0 = cpu_F0d;
11707 cpu_V1 = cpu_F1d;
e677137d 11708 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11709 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11710 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11711 num_insns = 0;
11712 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11713 if (max_insns == 0) {
2e70f6ef 11714 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11715 }
11716 if (max_insns > TCG_MAX_INSNS) {
11717 max_insns = TCG_MAX_INSNS;
11718 }
2e70f6ef 11719
cd42d5b2 11720 gen_tb_start(tb);
e12ce78d 11721
3849902c
PM
11722 tcg_clear_temp_count();
11723
e12ce78d
PM
11724 /* A note on handling of the condexec (IT) bits:
11725 *
11726 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11727 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11728 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11729 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11730 * to do it at the end of the block. (For example if we don't do this
11731 * it's hard to identify whether we can safely skip writing condexec
11732 * at the end of the TB, which we definitely want to do for the case
11733 * where a TB doesn't do anything with the IT state at all.)
11734 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11735 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11736 * This is done both for leaving the TB at the end, and for leaving
11737 * it because of an exception we know will happen, which is done in
11738 * gen_exception_insn(). The latter is necessary because we need to
11739 * leave the TB with the PC/IT state just prior to execution of the
11740 * instruction which caused the exception.
11741 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11742 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11743 * This is handled in the same way as restoration of the
4e5e1215
RH
11744 * PC in these situations; we save the value of the condexec bits
11745 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11746 * then uses this to restore them after an exception.
e12ce78d
PM
11747 *
11748 * Note that there are no instructions which can read the condexec
11749 * bits, and none which can write non-static values to them, so
0ecb72a5 11750 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11751 * middle of a TB.
11752 */
11753
9ee6e8bb
PB
11754 /* Reset the conditional execution bits immediately. This avoids
11755 complications trying to do it at the end of the block. */
98eac7ca 11756 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11757 {
39d5492a 11758 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11759 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11760 store_cpu_field(tmp, condexec_bits);
8f01245e 11761 }
2c0262af 11762 do {
52e971d9 11763 tcg_gen_insn_start(dc->pc,
aaa1f954
EI
11764 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11765 0);
b933066a
RH
11766 num_insns++;
11767
fbb4a2e3
PB
11768#ifdef CONFIG_USER_ONLY
11769 /* Intercept jump to the magic kernel page. */
40f860cd 11770 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11771 /* We always get here via a jump, so know we are not in a
11772 conditional execution block. */
d4a2dc67 11773 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11774 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11775 break;
11776 }
11777#else
b53d8923 11778 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11779 /* We always get here via a jump, so know we are not in a
11780 conditional execution block. */
d4a2dc67 11781 gen_exception_internal(EXCP_EXCEPTION_EXIT);
577bf808 11782 dc->is_jmp = DISAS_EXC;
d60bb01c 11783 break;
9ee6e8bb
PB
11784 }
11785#endif
11786
f0c3c505 11787 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11788 CPUBreakpoint *bp;
f0c3c505 11789 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11790 if (bp->pc == dc->pc) {
5d98bf8f 11791 if (bp->flags & BP_CPU) {
ce8a1b54 11792 gen_set_condexec(dc);
ed6c6448 11793 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11794 gen_helper_check_breakpoints(cpu_env);
11795 /* End the TB early; it's likely not going to be executed */
11796 dc->is_jmp = DISAS_UPDATE;
11797 } else {
11798 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11799 /* The address covered by the breakpoint must be
11800 included in [tb->pc, tb->pc + tb->size) in order
11801 to for it to be properly cleared -- thus we
11802 increment the PC here so that the logic setting
11803 tb->size below does the right thing. */
5d98bf8f
SF
11804 /* TODO: Advance PC by correct instruction length to
11805 * avoid disassembler error messages */
11806 dc->pc += 2;
11807 goto done_generating;
11808 }
11809 break;
1fddef4b
FB
11810 }
11811 }
11812 }
e50e6a20 11813
959082fc 11814 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11815 gen_io_start();
959082fc 11816 }
2e70f6ef 11817
50225ad0
PM
11818 if (dc->ss_active && !dc->pstate_ss) {
11819 /* Singlestep state is Active-pending.
11820 * If we're in this state at the start of a TB then either
11821 * a) we just took an exception to an EL which is being debugged
11822 * and this is the first insn in the exception handler
11823 * b) debug exceptions were masked and we just unmasked them
11824 * without changing EL (eg by clearing PSTATE.D)
11825 * In either case we're going to take a swstep exception in the
11826 * "did not step an insn" case, and so the syndrome ISV and EX
11827 * bits should be zero.
11828 */
959082fc 11829 assert(num_insns == 1);
73710361
GB
11830 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11831 default_exception_el(dc));
50225ad0
PM
11832 goto done_generating;
11833 }
11834
40f860cd 11835 if (dc->thumb) {
9ee6e8bb
PB
11836 disas_thumb_insn(env, dc);
11837 if (dc->condexec_mask) {
11838 dc->condexec_cond = (dc->condexec_cond & 0xe)
11839 | ((dc->condexec_mask >> 4) & 1);
11840 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11841 if (dc->condexec_mask == 0) {
11842 dc->condexec_cond = 0;
11843 }
11844 }
11845 } else {
f9fd40eb 11846 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11847 dc->pc += 4;
11848 disas_arm_insn(dc, insn);
9ee6e8bb 11849 }
e50e6a20
FB
11850
11851 if (dc->condjmp && !dc->is_jmp) {
11852 gen_set_label(dc->condlabel);
11853 dc->condjmp = 0;
11854 }
3849902c
PM
11855
11856 if (tcg_check_temp_count()) {
0a2461fa
AG
11857 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11858 dc->pc);
3849902c
PM
11859 }
11860
aaf2d97d 11861 /* Translation stops when a conditional branch is encountered.
e50e6a20 11862 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11863 * Also stop translation when a page boundary is reached. This
bf20dc07 11864 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11865
11866 /* We want to stop the TB if the next insn starts in a new page,
11867 * or if it spans between this page and the next. This means that
11868 * if we're looking at the last halfword in the page we need to
11869 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11870 * or a 32-bit Thumb insn (which won't).
11871 * This is to avoid generating a silly TB with a single 16-bit insn
11872 * in it at the end of this page (which would execute correctly
11873 * but isn't very efficient).
11874 */
11875 end_of_page = (dc->pc >= next_page_start) ||
11876 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11877
fe700adb 11878 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11879 !cs->singlestep_enabled &&
1b530a6d 11880 !singlestep &&
50225ad0 11881 !dc->ss_active &&
541ebcd4 11882 !end_of_page &&
2e70f6ef
PB
11883 num_insns < max_insns);
11884
11885 if (tb->cflags & CF_LAST_IO) {
11886 if (dc->condjmp) {
11887 /* FIXME: This can theoretically happen with self-modifying
11888 code. */
a47dddd7 11889 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11890 }
11891 gen_io_end();
11892 }
9ee6e8bb 11893
b5ff1b31 11894 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11895 instruction was a conditional branch or trap, and the PC has
11896 already been written. */
50225ad0 11897 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11898 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11899 gen_set_condexec(dc);
7999a5c8
SF
11900 switch (dc->is_jmp) {
11901 case DISAS_SWI:
50225ad0 11902 gen_ss_advance(dc);
73710361
GB
11903 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11904 default_exception_el(dc));
7999a5c8
SF
11905 break;
11906 case DISAS_HVC:
37e6456e 11907 gen_ss_advance(dc);
73710361 11908 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11909 break;
11910 case DISAS_SMC:
37e6456e 11911 gen_ss_advance(dc);
73710361 11912 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11913 break;
11914 case DISAS_NEXT:
11915 case DISAS_UPDATE:
11916 gen_set_pc_im(dc, dc->pc);
11917 /* fall through */
11918 default:
11919 if (dc->ss_active) {
11920 gen_step_complete_exception(dc);
11921 } else {
11922 /* FIXME: Single stepping a WFI insn will not halt
11923 the CPU. */
11924 gen_exception_internal(EXCP_DEBUG);
11925 }
11926 }
11927 if (dc->condjmp) {
11928 /* "Condition failed" instruction codepath. */
11929 gen_set_label(dc->condlabel);
11930 gen_set_condexec(dc);
11931 gen_set_pc_im(dc, dc->pc);
11932 if (dc->ss_active) {
11933 gen_step_complete_exception(dc);
11934 } else {
11935 gen_exception_internal(EXCP_DEBUG);
11936 }
9ee6e8bb 11937 }
8aaca4c0 11938 } else {
9ee6e8bb
PB
11939 /* While branches must always occur at the end of an IT block,
11940 there are a few other things that can cause us to terminate
65626741 11941 the TB in the middle of an IT block:
9ee6e8bb
PB
11942 - Exception generating instructions (bkpt, swi, undefined).
11943 - Page boundaries.
11944 - Hardware watchpoints.
11945 Hardware breakpoints have already been handled and skip this code.
11946 */
11947 gen_set_condexec(dc);
8aaca4c0 11948 switch(dc->is_jmp) {
8aaca4c0 11949 case DISAS_NEXT:
6e256c93 11950 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 11951 break;
8aaca4c0 11952 case DISAS_UPDATE:
577bf808
SF
11953 gen_set_pc_im(dc, dc->pc);
11954 /* fall through */
11955 case DISAS_JUMP:
11956 default:
8aaca4c0 11957 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11958 tcg_gen_exit_tb(0);
8aaca4c0
FB
11959 break;
11960 case DISAS_TB_JUMP:
11961 /* nothing more to generate */
11962 break;
9ee6e8bb 11963 case DISAS_WFI:
1ce94f81 11964 gen_helper_wfi(cpu_env);
84549b6d
PM
11965 /* The helper doesn't necessarily throw an exception, but we
11966 * must go back to the main loop to check for interrupts anyway.
11967 */
11968 tcg_gen_exit_tb(0);
9ee6e8bb 11969 break;
72c1d3af
PM
11970 case DISAS_WFE:
11971 gen_helper_wfe(cpu_env);
11972 break;
c87e5a61
PM
11973 case DISAS_YIELD:
11974 gen_helper_yield(cpu_env);
11975 break;
9ee6e8bb 11976 case DISAS_SWI:
73710361
GB
11977 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11978 default_exception_el(dc));
9ee6e8bb 11979 break;
37e6456e 11980 case DISAS_HVC:
73710361 11981 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
11982 break;
11983 case DISAS_SMC:
73710361 11984 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 11985 break;
8aaca4c0 11986 }
e50e6a20
FB
11987 if (dc->condjmp) {
11988 gen_set_label(dc->condlabel);
9ee6e8bb 11989 gen_set_condexec(dc);
6e256c93 11990 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11991 dc->condjmp = 0;
11992 }
2c0262af 11993 }
2e70f6ef 11994
9ee6e8bb 11995done_generating:
806f352d 11996 gen_tb_end(tb, num_insns);
2c0262af
FB
11997
11998#ifdef DEBUG_DISAS
06486077
AB
11999 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12000 qemu_log_in_addr_range(pc_start)) {
93fcfe39
AL
12001 qemu_log("----------------\n");
12002 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 12003 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 12004 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12005 qemu_log("\n");
2c0262af
FB
12006 }
12007#endif
4e5e1215
RH
12008 tb->size = dc->pc - pc_start;
12009 tb->icount = num_insns;
2c0262af
FB
12010}
12011
b5ff1b31 12012static const char *cpu_mode_names[16] = {
28c9457d
EI
12013 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12014 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12015};
9ee6e8bb 12016
878096ee
AF
12017void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12018 int flags)
2c0262af 12019{
878096ee
AF
12020 ARMCPU *cpu = ARM_CPU(cs);
12021 CPUARMState *env = &cpu->env;
2c0262af 12022 int i;
b5ff1b31 12023 uint32_t psr;
06e5cf7a 12024 const char *ns_status;
2c0262af 12025
17731115
PM
12026 if (is_a64(env)) {
12027 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12028 return;
12029 }
12030
2c0262af 12031 for(i=0;i<16;i++) {
7fe48483 12032 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12033 if ((i % 4) == 3)
7fe48483 12034 cpu_fprintf(f, "\n");
2c0262af 12035 else
7fe48483 12036 cpu_fprintf(f, " ");
2c0262af 12037 }
b5ff1b31 12038 psr = cpsr_read(env);
06e5cf7a
PM
12039
12040 if (arm_feature(env, ARM_FEATURE_EL3) &&
12041 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12042 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12043 } else {
12044 ns_status = "";
12045 }
12046
12047 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12048 psr,
b5ff1b31
FB
12049 psr & (1 << 31) ? 'N' : '-',
12050 psr & (1 << 30) ? 'Z' : '-',
12051 psr & (1 << 29) ? 'C' : '-',
12052 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12053 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12054 ns_status,
b5ff1b31 12055 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12056
f2617cfc
PM
12057 if (flags & CPU_DUMP_FPU) {
12058 int numvfpregs = 0;
12059 if (arm_feature(env, ARM_FEATURE_VFP)) {
12060 numvfpregs += 16;
12061 }
12062 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12063 numvfpregs += 16;
12064 }
12065 for (i = 0; i < numvfpregs; i++) {
12066 uint64_t v = float64_val(env->vfp.regs[i]);
12067 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12068 i * 2, (uint32_t)v,
12069 i * 2 + 1, (uint32_t)(v >> 32),
12070 i, v);
12071 }
12072 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12073 }
2c0262af 12074}
a6b025d3 12075
bad729e2
RH
12076void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12077 target_ulong *data)
d2856f1a 12078{
3926cc84 12079 if (is_a64(env)) {
bad729e2 12080 env->pc = data[0];
40f860cd 12081 env->condexec_bits = 0;
aaa1f954 12082 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12083 } else {
bad729e2
RH
12084 env->regs[15] = data[0];
12085 env->condexec_bits = data[1];
aaa1f954 12086 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12087 }
d2856f1a 12088}