]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
target/arm: Use pointers in neon zip/uzp helpers
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
c99a55d3 44#define ENABLE_ARCH_5J arm_dc_feature(s, ARM_FEATURE_JAZELLE)
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
ad69471c 61/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 62static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 63static TCGv_i32 cpu_R[16];
78bcaa3e
RH
64TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
65TCGv_i64 cpu_exclusive_addr;
66TCGv_i64 cpu_exclusive_val;
ad69471c 67
b26eefb6 68/* FIXME: These should be removed. */
39d5492a 69static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 70static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 71
022c62cb 72#include "exec/gen-icount.h"
2e70f6ef 73
155c3eac
FN
74static const char *regnames[] =
75 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
76 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
77
b26eefb6
PB
78/* initialize TCG globals. */
79void arm_translate_init(void)
80{
155c3eac
FN
81 int i;
82
155c3eac 83 for (i = 0; i < 16; i++) {
e1ccc054 84 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 85 offsetof(CPUARMState, regs[i]),
155c3eac
FN
86 regnames[i]);
87 }
e1ccc054
RH
88 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
89 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
90 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
91 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 92
e1ccc054 93 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 94 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 95 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 96 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 97
14ade10f 98 a64_translate_init();
b26eefb6
PB
99}
100
9bb6558a
PM
101/* Flags for the disas_set_da_iss info argument:
102 * lower bits hold the Rt register number, higher bits are flags.
103 */
104typedef enum ISSInfo {
105 ISSNone = 0,
106 ISSRegMask = 0x1f,
107 ISSInvalid = (1 << 5),
108 ISSIsAcqRel = (1 << 6),
109 ISSIsWrite = (1 << 7),
110 ISSIs16Bit = (1 << 8),
111} ISSInfo;
112
113/* Save the syndrome information for a Data Abort */
114static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
115{
116 uint32_t syn;
117 int sas = memop & MO_SIZE;
118 bool sse = memop & MO_SIGN;
119 bool is_acqrel = issinfo & ISSIsAcqRel;
120 bool is_write = issinfo & ISSIsWrite;
121 bool is_16bit = issinfo & ISSIs16Bit;
122 int srt = issinfo & ISSRegMask;
123
124 if (issinfo & ISSInvalid) {
125 /* Some callsites want to conditionally provide ISS info,
126 * eg "only if this was not a writeback"
127 */
128 return;
129 }
130
131 if (srt == 15) {
132 /* For AArch32, insns where the src/dest is R15 never generate
133 * ISS information. Catching that here saves checking at all
134 * the call sites.
135 */
136 return;
137 }
138
139 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
140 0, 0, 0, is_write, 0, is_16bit);
141 disas_set_insn_syndrome(s, syn);
142}
143
8bd5c820 144static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 145{
8bd5c820 146 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
147 * insns:
148 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
149 * otherwise, access as if at PL0.
150 */
151 switch (s->mmu_idx) {
152 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
153 case ARMMMUIdx_S12NSE0:
154 case ARMMMUIdx_S12NSE1:
8bd5c820 155 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
156 case ARMMMUIdx_S1E3:
157 case ARMMMUIdx_S1SE0:
158 case ARMMMUIdx_S1SE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
160 case ARMMMUIdx_MUser:
161 case ARMMMUIdx_MPriv:
162 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
62593718
PM
163 case ARMMMUIdx_MUserNegPri:
164 case ARMMMUIdx_MPrivNegPri:
165 return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
b9f587d6
PM
166 case ARMMMUIdx_MSUser:
167 case ARMMMUIdx_MSPriv:
b9f587d6 168 return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
62593718
PM
169 case ARMMMUIdx_MSUserNegPri:
170 case ARMMMUIdx_MSPrivNegPri:
171 return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
579d21cc
PM
172 case ARMMMUIdx_S2NS:
173 default:
174 g_assert_not_reached();
175 }
176}
177
39d5492a 178static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 179{
39d5492a 180 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
181 tcg_gen_ld_i32(tmp, cpu_env, offset);
182 return tmp;
183}
184
0ecb72a5 185#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 186
39d5492a 187static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
188{
189 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 190 tcg_temp_free_i32(var);
d9ba4830
PB
191}
192
193#define store_cpu_field(var, name) \
0ecb72a5 194 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 195
b26eefb6 196/* Set a variable to the value of a CPU register. */
39d5492a 197static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
198{
199 if (reg == 15) {
200 uint32_t addr;
b90372ad 201 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
202 if (s->thumb)
203 addr = (long)s->pc + 2;
204 else
205 addr = (long)s->pc + 4;
206 tcg_gen_movi_i32(var, addr);
207 } else {
155c3eac 208 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
209 }
210}
211
212/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 213static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 214{
39d5492a 215 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
216 load_reg_var(s, tmp, reg);
217 return tmp;
218}
219
220/* Set a CPU register. The source must be a temporary and will be
221 marked as dead. */
39d5492a 222static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
223{
224 if (reg == 15) {
9b6a3ea7
PM
225 /* In Thumb mode, we must ignore bit 0.
226 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
227 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
228 * We choose to ignore [1:0] in ARM mode for all architecture versions.
229 */
230 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
dcba3a8d 231 s->base.is_jmp = DISAS_JUMP;
b26eefb6 232 }
155c3eac 233 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 234 tcg_temp_free_i32(var);
b26eefb6
PB
235}
236
b26eefb6 237/* Value extensions. */
86831435
PB
238#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
239#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
240#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
241#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
242
1497c961
PB
243#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
244#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 245
b26eefb6 246
39d5492a 247static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 248{
39d5492a 249 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 250 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
251 tcg_temp_free_i32(tmp_mask);
252}
d9ba4830
PB
253/* Set NZCV flags from the high 4 bits of var. */
254#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
255
d4a2dc67 256static void gen_exception_internal(int excp)
d9ba4830 257{
d4a2dc67
PM
258 TCGv_i32 tcg_excp = tcg_const_i32(excp);
259
260 assert(excp_is_internal(excp));
261 gen_helper_exception_internal(cpu_env, tcg_excp);
262 tcg_temp_free_i32(tcg_excp);
263}
264
73710361 265static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
266{
267 TCGv_i32 tcg_excp = tcg_const_i32(excp);
268 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 269 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 270
73710361
GB
271 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
272 tcg_syn, tcg_el);
273
274 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
275 tcg_temp_free_i32(tcg_syn);
276 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
277}
278
50225ad0
PM
279static void gen_ss_advance(DisasContext *s)
280{
281 /* If the singlestep state is Active-not-pending, advance to
282 * Active-pending.
283 */
284 if (s->ss_active) {
285 s->pstate_ss = 0;
286 gen_helper_clear_pstate_ss(cpu_env);
287 }
288}
289
290static void gen_step_complete_exception(DisasContext *s)
291{
292 /* We just completed step of an insn. Move from Active-not-pending
293 * to Active-pending, and then also take the swstep exception.
294 * This corresponds to making the (IMPDEF) choice to prioritize
295 * swstep exceptions over asynchronous exceptions taken to an exception
296 * level where debug is disabled. This choice has the advantage that
297 * we do not need to maintain internal state corresponding to the
298 * ISV/EX syndrome bits between completion of the step and generation
299 * of the exception, and our syndrome information is always correct.
300 */
301 gen_ss_advance(s);
73710361
GB
302 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
303 default_exception_el(s));
dcba3a8d 304 s->base.is_jmp = DISAS_NORETURN;
50225ad0
PM
305}
306
5425415e
PM
307static void gen_singlestep_exception(DisasContext *s)
308{
309 /* Generate the right kind of exception for singlestep, which is
310 * either the architectural singlestep or EXCP_DEBUG for QEMU's
311 * gdb singlestepping.
312 */
313 if (s->ss_active) {
314 gen_step_complete_exception(s);
315 } else {
316 gen_exception_internal(EXCP_DEBUG);
317 }
318}
319
b636649f
PM
320static inline bool is_singlestepping(DisasContext *s)
321{
322 /* Return true if we are singlestepping either because of
323 * architectural singlestep or QEMU gdbstub singlestep. This does
324 * not include the command line '-singlestep' mode which is rather
325 * misnamed as it only means "one instruction per TB" and doesn't
326 * affect the code we generate.
327 */
dcba3a8d 328 return s->base.singlestep_enabled || s->ss_active;
b636649f
PM
329}
330
39d5492a 331static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 332{
39d5492a
PM
333 TCGv_i32 tmp1 = tcg_temp_new_i32();
334 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
335 tcg_gen_ext16s_i32(tmp1, a);
336 tcg_gen_ext16s_i32(tmp2, b);
3670669c 337 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 338 tcg_temp_free_i32(tmp2);
3670669c
PB
339 tcg_gen_sari_i32(a, a, 16);
340 tcg_gen_sari_i32(b, b, 16);
341 tcg_gen_mul_i32(b, b, a);
342 tcg_gen_mov_i32(a, tmp1);
7d1b0095 343 tcg_temp_free_i32(tmp1);
3670669c
PB
344}
345
346/* Byteswap each halfword. */
39d5492a 347static void gen_rev16(TCGv_i32 var)
3670669c 348{
39d5492a 349 TCGv_i32 tmp = tcg_temp_new_i32();
68cedf73 350 TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
3670669c 351 tcg_gen_shri_i32(tmp, var, 8);
68cedf73
AJ
352 tcg_gen_and_i32(tmp, tmp, mask);
353 tcg_gen_and_i32(var, var, mask);
3670669c 354 tcg_gen_shli_i32(var, var, 8);
3670669c 355 tcg_gen_or_i32(var, var, tmp);
68cedf73 356 tcg_temp_free_i32(mask);
7d1b0095 357 tcg_temp_free_i32(tmp);
3670669c
PB
358}
359
360/* Byteswap low halfword and sign extend. */
39d5492a 361static void gen_revsh(TCGv_i32 var)
3670669c 362{
1a855029
AJ
363 tcg_gen_ext16u_i32(var, var);
364 tcg_gen_bswap16_i32(var, var);
365 tcg_gen_ext16s_i32(var, var);
3670669c
PB
366}
367
838fa72d 368/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 369static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 370{
838fa72d
AJ
371 TCGv_i64 tmp64 = tcg_temp_new_i64();
372
373 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 374 tcg_temp_free_i32(b);
838fa72d
AJ
375 tcg_gen_shli_i64(tmp64, tmp64, 32);
376 tcg_gen_add_i64(a, tmp64, a);
377
378 tcg_temp_free_i64(tmp64);
379 return a;
380}
381
382/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 383static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
384{
385 TCGv_i64 tmp64 = tcg_temp_new_i64();
386
387 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 388 tcg_temp_free_i32(b);
838fa72d
AJ
389 tcg_gen_shli_i64(tmp64, tmp64, 32);
390 tcg_gen_sub_i64(a, tmp64, a);
391
392 tcg_temp_free_i64(tmp64);
393 return a;
3670669c
PB
394}
395
5e3f878a 396/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 397static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 398{
39d5492a
PM
399 TCGv_i32 lo = tcg_temp_new_i32();
400 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 401 TCGv_i64 ret;
5e3f878a 402
831d7fe8 403 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 404 tcg_temp_free_i32(a);
7d1b0095 405 tcg_temp_free_i32(b);
831d7fe8
RH
406
407 ret = tcg_temp_new_i64();
408 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
409 tcg_temp_free_i32(lo);
410 tcg_temp_free_i32(hi);
831d7fe8
RH
411
412 return ret;
5e3f878a
PB
413}
414
39d5492a 415static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 416{
39d5492a
PM
417 TCGv_i32 lo = tcg_temp_new_i32();
418 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 419 TCGv_i64 ret;
5e3f878a 420
831d7fe8 421 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 422 tcg_temp_free_i32(a);
7d1b0095 423 tcg_temp_free_i32(b);
831d7fe8
RH
424
425 ret = tcg_temp_new_i64();
426 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
427 tcg_temp_free_i32(lo);
428 tcg_temp_free_i32(hi);
831d7fe8
RH
429
430 return ret;
5e3f878a
PB
431}
432
8f01245e 433/* Swap low and high halfwords. */
39d5492a 434static void gen_swap_half(TCGv_i32 var)
8f01245e 435{
39d5492a 436 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
437 tcg_gen_shri_i32(tmp, var, 16);
438 tcg_gen_shli_i32(var, var, 16);
439 tcg_gen_or_i32(var, var, tmp);
7d1b0095 440 tcg_temp_free_i32(tmp);
8f01245e
PB
441}
442
b26eefb6
PB
443/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
444 tmp = (t0 ^ t1) & 0x8000;
445 t0 &= ~0x8000;
446 t1 &= ~0x8000;
447 t0 = (t0 + t1) ^ tmp;
448 */
449
39d5492a 450static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 451{
39d5492a 452 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andi_i32(tmp, tmp, 0x8000);
455 tcg_gen_andi_i32(t0, t0, ~0x8000);
456 tcg_gen_andi_i32(t1, t1, ~0x8000);
457 tcg_gen_add_i32(t0, t0, t1);
458 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
459 tcg_temp_free_i32(tmp);
460 tcg_temp_free_i32(t1);
b26eefb6
PB
461}
462
463/* Set CF to the top bit of var. */
39d5492a 464static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 465{
66c374de 466 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
467}
468
469/* Set N and Z flags from var. */
39d5492a 470static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 471{
66c374de
AJ
472 tcg_gen_mov_i32(cpu_NF, var);
473 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
474}
475
476/* T0 += T1 + CF. */
39d5492a 477static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 478{
396e467c 479 tcg_gen_add_i32(t0, t0, t1);
66c374de 480 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
481}
482
e9bb4aa9 483/* dest = T0 + T1 + CF. */
39d5492a 484static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 485{
e9bb4aa9 486 tcg_gen_add_i32(dest, t0, t1);
66c374de 487 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
488}
489
3670669c 490/* dest = T0 - T1 + CF - 1. */
39d5492a 491static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 492{
3670669c 493 tcg_gen_sub_i32(dest, t0, t1);
66c374de 494 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 495 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
496}
497
72485ec4 498/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 499static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 500{
39d5492a 501 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
502 tcg_gen_movi_i32(tmp, 0);
503 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 504 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 505 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
506 tcg_gen_xor_i32(tmp, t0, t1);
507 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
508 tcg_temp_free_i32(tmp);
509 tcg_gen_mov_i32(dest, cpu_NF);
510}
511
49b4c31e 512/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 513static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 514{
39d5492a 515 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
516 if (TCG_TARGET_HAS_add2_i32) {
517 tcg_gen_movi_i32(tmp, 0);
518 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 519 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
520 } else {
521 TCGv_i64 q0 = tcg_temp_new_i64();
522 TCGv_i64 q1 = tcg_temp_new_i64();
523 tcg_gen_extu_i32_i64(q0, t0);
524 tcg_gen_extu_i32_i64(q1, t1);
525 tcg_gen_add_i64(q0, q0, q1);
526 tcg_gen_extu_i32_i64(q1, cpu_CF);
527 tcg_gen_add_i64(q0, q0, q1);
528 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
529 tcg_temp_free_i64(q0);
530 tcg_temp_free_i64(q1);
531 }
532 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
533 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
534 tcg_gen_xor_i32(tmp, t0, t1);
535 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
536 tcg_temp_free_i32(tmp);
537 tcg_gen_mov_i32(dest, cpu_NF);
538}
539
72485ec4 540/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 541static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 542{
39d5492a 543 TCGv_i32 tmp;
72485ec4
AJ
544 tcg_gen_sub_i32(cpu_NF, t0, t1);
545 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
546 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
547 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
548 tmp = tcg_temp_new_i32();
549 tcg_gen_xor_i32(tmp, t0, t1);
550 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
551 tcg_temp_free_i32(tmp);
552 tcg_gen_mov_i32(dest, cpu_NF);
553}
554
e77f0832 555/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 556static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 557{
39d5492a 558 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
559 tcg_gen_not_i32(tmp, t1);
560 gen_adc_CC(dest, t0, tmp);
39d5492a 561 tcg_temp_free_i32(tmp);
2de68a49
RH
562}
563
365af80e 564#define GEN_SHIFT(name) \
39d5492a 565static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 566{ \
39d5492a 567 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
568 tmp1 = tcg_temp_new_i32(); \
569 tcg_gen_andi_i32(tmp1, t1, 0xff); \
570 tmp2 = tcg_const_i32(0); \
571 tmp3 = tcg_const_i32(0x1f); \
572 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
573 tcg_temp_free_i32(tmp3); \
574 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
575 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
576 tcg_temp_free_i32(tmp2); \
577 tcg_temp_free_i32(tmp1); \
578}
579GEN_SHIFT(shl)
580GEN_SHIFT(shr)
581#undef GEN_SHIFT
582
39d5492a 583static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 584{
39d5492a 585 TCGv_i32 tmp1, tmp2;
365af80e
AJ
586 tmp1 = tcg_temp_new_i32();
587 tcg_gen_andi_i32(tmp1, t1, 0xff);
588 tmp2 = tcg_const_i32(0x1f);
589 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
590 tcg_temp_free_i32(tmp2);
591 tcg_gen_sar_i32(dest, t0, tmp1);
592 tcg_temp_free_i32(tmp1);
593}
594
39d5492a 595static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 596{
39d5492a
PM
597 TCGv_i32 c0 = tcg_const_i32(0);
598 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
599 tcg_gen_neg_i32(tmp, src);
600 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
601 tcg_temp_free_i32(c0);
602 tcg_temp_free_i32(tmp);
603}
ad69471c 604
39d5492a 605static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 606{
9a119ff6 607 if (shift == 0) {
66c374de 608 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 609 } else {
66c374de
AJ
610 tcg_gen_shri_i32(cpu_CF, var, shift);
611 if (shift != 31) {
612 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
613 }
9a119ff6 614 }
9a119ff6 615}
b26eefb6 616
9a119ff6 617/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
618static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
619 int shift, int flags)
9a119ff6
PB
620{
621 switch (shiftop) {
622 case 0: /* LSL */
623 if (shift != 0) {
624 if (flags)
625 shifter_out_im(var, 32 - shift);
626 tcg_gen_shli_i32(var, var, shift);
627 }
628 break;
629 case 1: /* LSR */
630 if (shift == 0) {
631 if (flags) {
66c374de 632 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
633 }
634 tcg_gen_movi_i32(var, 0);
635 } else {
636 if (flags)
637 shifter_out_im(var, shift - 1);
638 tcg_gen_shri_i32(var, var, shift);
639 }
640 break;
641 case 2: /* ASR */
642 if (shift == 0)
643 shift = 32;
644 if (flags)
645 shifter_out_im(var, shift - 1);
646 if (shift == 32)
647 shift = 31;
648 tcg_gen_sari_i32(var, var, shift);
649 break;
650 case 3: /* ROR/RRX */
651 if (shift != 0) {
652 if (flags)
653 shifter_out_im(var, shift - 1);
f669df27 654 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 655 } else {
39d5492a 656 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 657 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
658 if (flags)
659 shifter_out_im(var, 0);
660 tcg_gen_shri_i32(var, var, 1);
b26eefb6 661 tcg_gen_or_i32(var, var, tmp);
7d1b0095 662 tcg_temp_free_i32(tmp);
b26eefb6
PB
663 }
664 }
665};
666
39d5492a
PM
667static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
668 TCGv_i32 shift, int flags)
8984bd2e
PB
669{
670 if (flags) {
671 switch (shiftop) {
9ef39277
BS
672 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
673 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
674 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
675 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
676 }
677 } else {
678 switch (shiftop) {
365af80e
AJ
679 case 0:
680 gen_shl(var, var, shift);
681 break;
682 case 1:
683 gen_shr(var, var, shift);
684 break;
685 case 2:
686 gen_sar(var, var, shift);
687 break;
f669df27
AJ
688 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
689 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
690 }
691 }
7d1b0095 692 tcg_temp_free_i32(shift);
8984bd2e
PB
693}
694
6ddbc6e4
PB
695#define PAS_OP(pfx) \
696 switch (op2) { \
697 case 0: gen_pas_helper(glue(pfx,add16)); break; \
698 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
699 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
700 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
701 case 4: gen_pas_helper(glue(pfx,add8)); break; \
702 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
703 }
39d5492a 704static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 705{
a7812ae4 706 TCGv_ptr tmp;
6ddbc6e4
PB
707
708 switch (op1) {
709#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
710 case 1:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(s)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716 case 5:
a7812ae4 717 tmp = tcg_temp_new_ptr();
0ecb72a5 718 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 719 PAS_OP(u)
b75263d6 720 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
721 break;
722#undef gen_pas_helper
723#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
724 case 2:
725 PAS_OP(q);
726 break;
727 case 3:
728 PAS_OP(sh);
729 break;
730 case 6:
731 PAS_OP(uq);
732 break;
733 case 7:
734 PAS_OP(uh);
735 break;
736#undef gen_pas_helper
737 }
738}
9ee6e8bb
PB
739#undef PAS_OP
740
6ddbc6e4
PB
741/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
742#define PAS_OP(pfx) \
ed89a2f1 743 switch (op1) { \
6ddbc6e4
PB
744 case 0: gen_pas_helper(glue(pfx,add8)); break; \
745 case 1: gen_pas_helper(glue(pfx,add16)); break; \
746 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
747 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
748 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
749 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
750 }
39d5492a 751static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 752{
a7812ae4 753 TCGv_ptr tmp;
6ddbc6e4 754
ed89a2f1 755 switch (op2) {
6ddbc6e4
PB
756#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
757 case 0:
a7812ae4 758 tmp = tcg_temp_new_ptr();
0ecb72a5 759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 760 PAS_OP(s)
b75263d6 761 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
762 break;
763 case 4:
a7812ae4 764 tmp = tcg_temp_new_ptr();
0ecb72a5 765 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 766 PAS_OP(u)
b75263d6 767 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
768 break;
769#undef gen_pas_helper
770#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
771 case 1:
772 PAS_OP(q);
773 break;
774 case 2:
775 PAS_OP(sh);
776 break;
777 case 5:
778 PAS_OP(uq);
779 break;
780 case 6:
781 PAS_OP(uh);
782 break;
783#undef gen_pas_helper
784 }
785}
9ee6e8bb
PB
786#undef PAS_OP
787
39fb730a 788/*
6c2c63d3 789 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
790 * This is common between ARM and Aarch64 targets.
791 */
6c2c63d3 792void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 793{
6c2c63d3
RH
794 TCGv_i32 value;
795 TCGCond cond;
796 bool global = true;
d9ba4830 797
d9ba4830
PB
798 switch (cc) {
799 case 0: /* eq: Z */
d9ba4830 800 case 1: /* ne: !Z */
6c2c63d3
RH
801 cond = TCG_COND_EQ;
802 value = cpu_ZF;
d9ba4830 803 break;
6c2c63d3 804
d9ba4830 805 case 2: /* cs: C */
d9ba4830 806 case 3: /* cc: !C */
6c2c63d3
RH
807 cond = TCG_COND_NE;
808 value = cpu_CF;
d9ba4830 809 break;
6c2c63d3 810
d9ba4830 811 case 4: /* mi: N */
d9ba4830 812 case 5: /* pl: !N */
6c2c63d3
RH
813 cond = TCG_COND_LT;
814 value = cpu_NF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 6: /* vs: V */
d9ba4830 818 case 7: /* vc: !V */
6c2c63d3
RH
819 cond = TCG_COND_LT;
820 value = cpu_VF;
d9ba4830 821 break;
6c2c63d3 822
d9ba4830 823 case 8: /* hi: C && !Z */
6c2c63d3
RH
824 case 9: /* ls: !C || Z -> !(C && !Z) */
825 cond = TCG_COND_NE;
826 value = tcg_temp_new_i32();
827 global = false;
828 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
829 ZF is non-zero for !Z; so AND the two subexpressions. */
830 tcg_gen_neg_i32(value, cpu_CF);
831 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 832 break;
6c2c63d3 833
d9ba4830 834 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 835 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
836 /* Since we're only interested in the sign bit, == 0 is >= 0. */
837 cond = TCG_COND_GE;
838 value = tcg_temp_new_i32();
839 global = false;
840 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 841 break;
6c2c63d3 842
d9ba4830 843 case 12: /* gt: !Z && N == V */
d9ba4830 844 case 13: /* le: Z || N != V */
6c2c63d3
RH
845 cond = TCG_COND_NE;
846 value = tcg_temp_new_i32();
847 global = false;
848 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
849 * the sign bit then AND with ZF to yield the result. */
850 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
851 tcg_gen_sari_i32(value, value, 31);
852 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 853 break;
6c2c63d3 854
9305eac0
RH
855 case 14: /* always */
856 case 15: /* always */
857 /* Use the ALWAYS condition, which will fold early.
858 * It doesn't matter what we use for the value. */
859 cond = TCG_COND_ALWAYS;
860 value = cpu_ZF;
861 goto no_invert;
862
d9ba4830
PB
863 default:
864 fprintf(stderr, "Bad condition code 0x%x\n", cc);
865 abort();
866 }
6c2c63d3
RH
867
868 if (cc & 1) {
869 cond = tcg_invert_cond(cond);
870 }
871
9305eac0 872 no_invert:
6c2c63d3
RH
873 cmp->cond = cond;
874 cmp->value = value;
875 cmp->value_global = global;
876}
877
878void arm_free_cc(DisasCompare *cmp)
879{
880 if (!cmp->value_global) {
881 tcg_temp_free_i32(cmp->value);
882 }
883}
884
885void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
886{
887 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
888}
889
890void arm_gen_test_cc(int cc, TCGLabel *label)
891{
892 DisasCompare cmp;
893 arm_test_cc(&cmp, cc);
894 arm_jump_cc(&cmp, label);
895 arm_free_cc(&cmp);
d9ba4830 896}
2c0262af 897
b1d8e52e 898static const uint8_t table_logic_cc[16] = {
2c0262af
FB
899 1, /* and */
900 1, /* xor */
901 0, /* sub */
902 0, /* rsb */
903 0, /* add */
904 0, /* adc */
905 0, /* sbc */
906 0, /* rsc */
907 1, /* andl */
908 1, /* xorl */
909 0, /* cmp */
910 0, /* cmn */
911 1, /* orr */
912 1, /* mov */
913 1, /* bic */
914 1, /* mvn */
915};
3b46e624 916
4d5e8c96
PM
917static inline void gen_set_condexec(DisasContext *s)
918{
919 if (s->condexec_mask) {
920 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
921 TCGv_i32 tmp = tcg_temp_new_i32();
922 tcg_gen_movi_i32(tmp, val);
923 store_cpu_field(tmp, condexec_bits);
924 }
925}
926
927static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
928{
929 tcg_gen_movi_i32(cpu_R[15], val);
930}
931
d9ba4830
PB
932/* Set PC and Thumb state from an immediate address. */
933static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 934{
39d5492a 935 TCGv_i32 tmp;
99c475ab 936
dcba3a8d 937 s->base.is_jmp = DISAS_JUMP;
d9ba4830 938 if (s->thumb != (addr & 1)) {
7d1b0095 939 tmp = tcg_temp_new_i32();
d9ba4830 940 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 941 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 942 tcg_temp_free_i32(tmp);
d9ba4830 943 }
155c3eac 944 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
945}
946
947/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 948static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 949{
dcba3a8d 950 s->base.is_jmp = DISAS_JUMP;
155c3eac
FN
951 tcg_gen_andi_i32(cpu_R[15], var, ~1);
952 tcg_gen_andi_i32(var, var, 1);
953 store_cpu_field(var, thumb);
d9ba4830
PB
954}
955
3bb8a96f
PM
956/* Set PC and Thumb state from var. var is marked as dead.
957 * For M-profile CPUs, include logic to detect exception-return
958 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
959 * and BX reg, and no others, and happens only for code in Handler mode.
960 */
961static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
962{
963 /* Generate the same code here as for a simple bx, but flag via
dcba3a8d 964 * s->base.is_jmp that we need to do the rest of the work later.
3bb8a96f
PM
965 */
966 gen_bx(s, var);
d02a8698
PM
967 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
968 (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
dcba3a8d 969 s->base.is_jmp = DISAS_BX_EXCRET;
3bb8a96f
PM
970 }
971}
972
973static inline void gen_bx_excret_final_code(DisasContext *s)
974{
975 /* Generate the code to finish possible exception return and end the TB */
976 TCGLabel *excret_label = gen_new_label();
d02a8698
PM
977 uint32_t min_magic;
978
979 if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
980 /* Covers FNC_RETURN and EXC_RETURN magic */
981 min_magic = FNC_RETURN_MIN_MAGIC;
982 } else {
983 /* EXC_RETURN magic only */
984 min_magic = EXC_RETURN_MIN_MAGIC;
985 }
3bb8a96f
PM
986
987 /* Is the new PC value in the magic range indicating exception return? */
d02a8698 988 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
3bb8a96f
PM
989 /* No: end the TB as we would for a DISAS_JMP */
990 if (is_singlestepping(s)) {
991 gen_singlestep_exception(s);
992 } else {
993 tcg_gen_exit_tb(0);
994 }
995 gen_set_label(excret_label);
996 /* Yes: this is an exception return.
997 * At this point in runtime env->regs[15] and env->thumb will hold
998 * the exception-return magic number, which do_v7m_exception_exit()
999 * will read. Nothing else will be able to see those values because
1000 * the cpu-exec main loop guarantees that we will always go straight
1001 * from raising the exception to the exception-handling code.
1002 *
1003 * gen_ss_advance(s) does nothing on M profile currently but
1004 * calling it is conceptually the right thing as we have executed
1005 * this instruction (compare SWI, HVC, SMC handling).
1006 */
1007 gen_ss_advance(s);
1008 gen_exception_internal(EXCP_EXCEPTION_EXIT);
1009}
1010
fb602cb7
PM
1011static inline void gen_bxns(DisasContext *s, int rm)
1012{
1013 TCGv_i32 var = load_reg(s, rm);
1014
1015 /* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
1016 * we need to sync state before calling it, but:
1017 * - we don't need to do gen_set_pc_im() because the bxns helper will
1018 * always set the PC itself
1019 * - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
1020 * unless it's outside an IT block or the last insn in an IT block,
1021 * so we know that condexec == 0 (already set at the top of the TB)
1022 * is correct in the non-UNPREDICTABLE cases, and we can choose
1023 * "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
1024 */
1025 gen_helper_v7m_bxns(cpu_env, var);
1026 tcg_temp_free_i32(var);
ef475b5d 1027 s->base.is_jmp = DISAS_EXIT;
fb602cb7
PM
1028}
1029
3e3fa230
PM
1030static inline void gen_blxns(DisasContext *s, int rm)
1031{
1032 TCGv_i32 var = load_reg(s, rm);
1033
1034 /* We don't need to sync condexec state, for the same reason as bxns.
1035 * We do however need to set the PC, because the blxns helper reads it.
1036 * The blxns helper may throw an exception.
1037 */
1038 gen_set_pc_im(s, s->pc);
1039 gen_helper_v7m_blxns(cpu_env, var);
1040 tcg_temp_free_i32(var);
1041 s->base.is_jmp = DISAS_EXIT;
1042}
1043
21aeb343
JR
1044/* Variant of store_reg which uses branch&exchange logic when storing
1045 to r15 in ARM architecture v7 and above. The source must be a temporary
1046 and will be marked as dead. */
7dcc1f89 1047static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
1048{
1049 if (reg == 15 && ENABLE_ARCH_7) {
1050 gen_bx(s, var);
1051 } else {
1052 store_reg(s, reg, var);
1053 }
1054}
1055
be5e7a76
DES
1056/* Variant of store_reg which uses branch&exchange logic when storing
1057 * to r15 in ARM architecture v5T and above. This is used for storing
1058 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1059 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1060static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1061{
1062 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1063 gen_bx_excret(s, var);
be5e7a76
DES
1064 } else {
1065 store_reg(s, reg, var);
1066 }
1067}
1068
e334bd31
PB
1069#ifdef CONFIG_USER_ONLY
1070#define IS_USER_ONLY 1
1071#else
1072#define IS_USER_ONLY 0
1073#endif
1074
08307563
PM
1075/* Abstractions of "generate code to do a guest load/store for
1076 * AArch32", where a vaddr is always 32 bits (and is zero
1077 * extended if we're a 64 bit core) and data is also
1078 * 32 bits unless specifically doing a 64 bit access.
1079 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1080 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1081 */
08307563 1082
7f5616f5 1083static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1084{
7f5616f5
RH
1085 TCGv addr = tcg_temp_new();
1086 tcg_gen_extu_i32_tl(addr, a32);
1087
e334bd31 1088 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1089 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1090 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1091 }
7f5616f5 1092 return addr;
08307563
PM
1093}
1094
7f5616f5
RH
1095static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1096 int index, TCGMemOp opc)
08307563 1097{
7f5616f5
RH
1098 TCGv addr = gen_aa32_addr(s, a32, opc);
1099 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1100 tcg_temp_free(addr);
08307563
PM
1101}
1102
7f5616f5
RH
1103static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1104 int index, TCGMemOp opc)
1105{
1106 TCGv addr = gen_aa32_addr(s, a32, opc);
1107 tcg_gen_qemu_st_i32(val, addr, index, opc);
1108 tcg_temp_free(addr);
1109}
08307563 1110
7f5616f5 1111#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1112static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1113 TCGv_i32 a32, int index) \
08307563 1114{ \
7f5616f5 1115 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1116} \
1117static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1118 TCGv_i32 val, \
1119 TCGv_i32 a32, int index, \
1120 ISSInfo issinfo) \
1121{ \
1122 gen_aa32_ld##SUFF(s, val, a32, index); \
1123 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1124}
1125
7f5616f5 1126#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1127static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1128 TCGv_i32 a32, int index) \
08307563 1129{ \
7f5616f5 1130 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1131} \
1132static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1133 TCGv_i32 val, \
1134 TCGv_i32 a32, int index, \
1135 ISSInfo issinfo) \
1136{ \
1137 gen_aa32_st##SUFF(s, val, a32, index); \
1138 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1139}
1140
7f5616f5 1141static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1142{
e334bd31
PB
1143 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1144 if (!IS_USER_ONLY && s->sctlr_b) {
1145 tcg_gen_rotri_i64(val, val, 32);
1146 }
08307563
PM
1147}
1148
7f5616f5
RH
1149static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1150 int index, TCGMemOp opc)
08307563 1151{
7f5616f5
RH
1152 TCGv addr = gen_aa32_addr(s, a32, opc);
1153 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1154 gen_aa32_frob64(s, val);
1155 tcg_temp_free(addr);
1156}
1157
1158static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1159 TCGv_i32 a32, int index)
1160{
1161 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1162}
1163
1164static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1165 int index, TCGMemOp opc)
1166{
1167 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1168
1169 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1170 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1171 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1172 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1173 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1174 tcg_temp_free_i64(tmp);
e334bd31 1175 } else {
7f5616f5 1176 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1177 }
7f5616f5 1178 tcg_temp_free(addr);
08307563
PM
1179}
1180
7f5616f5
RH
1181static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1182 TCGv_i32 a32, int index)
1183{
1184 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1185}
08307563 1186
7f5616f5
RH
1187DO_GEN_LD(8s, MO_SB)
1188DO_GEN_LD(8u, MO_UB)
1189DO_GEN_LD(16s, MO_SW)
1190DO_GEN_LD(16u, MO_UW)
1191DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1192DO_GEN_ST(8, MO_UB)
1193DO_GEN_ST(16, MO_UW)
1194DO_GEN_ST(32, MO_UL)
08307563 1195
37e6456e
PM
1196static inline void gen_hvc(DisasContext *s, int imm16)
1197{
1198 /* The pre HVC helper handles cases when HVC gets trapped
1199 * as an undefined insn by runtime configuration (ie before
1200 * the insn really executes).
1201 */
1202 gen_set_pc_im(s, s->pc - 4);
1203 gen_helper_pre_hvc(cpu_env);
1204 /* Otherwise we will treat this as a real exception which
1205 * happens after execution of the insn. (The distinction matters
1206 * for the PC value reported to the exception handler and also
1207 * for single stepping.)
1208 */
1209 s->svc_imm = imm16;
1210 gen_set_pc_im(s, s->pc);
dcba3a8d 1211 s->base.is_jmp = DISAS_HVC;
37e6456e
PM
1212}
1213
1214static inline void gen_smc(DisasContext *s)
1215{
1216 /* As with HVC, we may take an exception either before or after
1217 * the insn executes.
1218 */
1219 TCGv_i32 tmp;
1220
1221 gen_set_pc_im(s, s->pc - 4);
1222 tmp = tcg_const_i32(syn_aa32_smc());
1223 gen_helper_pre_smc(cpu_env, tmp);
1224 tcg_temp_free_i32(tmp);
1225 gen_set_pc_im(s, s->pc);
dcba3a8d 1226 s->base.is_jmp = DISAS_SMC;
37e6456e
PM
1227}
1228
d4a2dc67
PM
1229static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1230{
1231 gen_set_condexec(s);
1232 gen_set_pc_im(s, s->pc - offset);
1233 gen_exception_internal(excp);
dcba3a8d 1234 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1235}
1236
73710361
GB
1237static void gen_exception_insn(DisasContext *s, int offset, int excp,
1238 int syn, uint32_t target_el)
d4a2dc67
PM
1239{
1240 gen_set_condexec(s);
1241 gen_set_pc_im(s, s->pc - offset);
73710361 1242 gen_exception(excp, syn, target_el);
dcba3a8d 1243 s->base.is_jmp = DISAS_NORETURN;
d4a2dc67
PM
1244}
1245
b5ff1b31
FB
1246/* Force a TB lookup after an instruction that changes the CPU state. */
1247static inline void gen_lookup_tb(DisasContext *s)
1248{
a6445c52 1249 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
dcba3a8d 1250 s->base.is_jmp = DISAS_EXIT;
b5ff1b31
FB
1251}
1252
19a6e31c
PM
1253static inline void gen_hlt(DisasContext *s, int imm)
1254{
1255 /* HLT. This has two purposes.
1256 * Architecturally, it is an external halting debug instruction.
1257 * Since QEMU doesn't implement external debug, we treat this as
1258 * it is required for halting debug disabled: it will UNDEF.
1259 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1260 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1261 * must trigger semihosting even for ARMv7 and earlier, where
1262 * HLT was an undefined encoding.
1263 * In system mode, we don't allow userspace access to
1264 * semihosting, to provide some semblance of security
1265 * (and for consistency with our 32-bit semihosting).
1266 */
1267 if (semihosting_enabled() &&
1268#ifndef CONFIG_USER_ONLY
1269 s->current_el != 0 &&
1270#endif
1271 (imm == (s->thumb ? 0x3c : 0xf000))) {
1272 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1273 return;
1274 }
1275
1276 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1277 default_exception_el(s));
1278}
1279
b0109805 1280static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1281 TCGv_i32 var)
2c0262af 1282{
1e8d4eec 1283 int val, rm, shift, shiftop;
39d5492a 1284 TCGv_i32 offset;
2c0262af
FB
1285
1286 if (!(insn & (1 << 25))) {
1287 /* immediate */
1288 val = insn & 0xfff;
1289 if (!(insn & (1 << 23)))
1290 val = -val;
537730b9 1291 if (val != 0)
b0109805 1292 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1293 } else {
1294 /* shift/register */
1295 rm = (insn) & 0xf;
1296 shift = (insn >> 7) & 0x1f;
1e8d4eec 1297 shiftop = (insn >> 5) & 3;
b26eefb6 1298 offset = load_reg(s, rm);
9a119ff6 1299 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1300 if (!(insn & (1 << 23)))
b0109805 1301 tcg_gen_sub_i32(var, var, offset);
2c0262af 1302 else
b0109805 1303 tcg_gen_add_i32(var, var, offset);
7d1b0095 1304 tcg_temp_free_i32(offset);
2c0262af
FB
1305 }
1306}
1307
191f9a93 1308static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1309 int extra, TCGv_i32 var)
2c0262af
FB
1310{
1311 int val, rm;
39d5492a 1312 TCGv_i32 offset;
3b46e624 1313
2c0262af
FB
1314 if (insn & (1 << 22)) {
1315 /* immediate */
1316 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1317 if (!(insn & (1 << 23)))
1318 val = -val;
18acad92 1319 val += extra;
537730b9 1320 if (val != 0)
b0109805 1321 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1322 } else {
1323 /* register */
191f9a93 1324 if (extra)
b0109805 1325 tcg_gen_addi_i32(var, var, extra);
2c0262af 1326 rm = (insn) & 0xf;
b26eefb6 1327 offset = load_reg(s, rm);
2c0262af 1328 if (!(insn & (1 << 23)))
b0109805 1329 tcg_gen_sub_i32(var, var, offset);
2c0262af 1330 else
b0109805 1331 tcg_gen_add_i32(var, var, offset);
7d1b0095 1332 tcg_temp_free_i32(offset);
2c0262af
FB
1333 }
1334}
1335
5aaebd13
PM
1336static TCGv_ptr get_fpstatus_ptr(int neon)
1337{
1338 TCGv_ptr statusptr = tcg_temp_new_ptr();
1339 int offset;
1340 if (neon) {
0ecb72a5 1341 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1342 } else {
0ecb72a5 1343 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1344 }
1345 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1346 return statusptr;
1347}
1348
4373f3ce
PB
1349#define VFP_OP2(name) \
1350static inline void gen_vfp_##name(int dp) \
1351{ \
ae1857ec
PM
1352 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1353 if (dp) { \
1354 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1355 } else { \
1356 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1357 } \
1358 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1359}
1360
4373f3ce
PB
1361VFP_OP2(add)
1362VFP_OP2(sub)
1363VFP_OP2(mul)
1364VFP_OP2(div)
1365
1366#undef VFP_OP2
1367
605a6aed
PM
1368static inline void gen_vfp_F1_mul(int dp)
1369{
1370 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1371 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1372 if (dp) {
ae1857ec 1373 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1374 } else {
ae1857ec 1375 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1376 }
ae1857ec 1377 tcg_temp_free_ptr(fpst);
605a6aed
PM
1378}
1379
1380static inline void gen_vfp_F1_neg(int dp)
1381{
1382 /* Like gen_vfp_neg() but put result in F1 */
1383 if (dp) {
1384 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1385 } else {
1386 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1387 }
1388}
1389
4373f3ce
PB
1390static inline void gen_vfp_abs(int dp)
1391{
1392 if (dp)
1393 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1394 else
1395 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1396}
1397
1398static inline void gen_vfp_neg(int dp)
1399{
1400 if (dp)
1401 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1402 else
1403 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1404}
1405
1406static inline void gen_vfp_sqrt(int dp)
1407{
1408 if (dp)
1409 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1410 else
1411 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1412}
1413
1414static inline void gen_vfp_cmp(int dp)
1415{
1416 if (dp)
1417 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1418 else
1419 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1420}
1421
1422static inline void gen_vfp_cmpe(int dp)
1423{
1424 if (dp)
1425 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1426 else
1427 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1428}
1429
1430static inline void gen_vfp_F1_ld0(int dp)
1431{
1432 if (dp)
5b340b51 1433 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1434 else
5b340b51 1435 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1436}
1437
5500b06c
PM
1438#define VFP_GEN_ITOF(name) \
1439static inline void gen_vfp_##name(int dp, int neon) \
1440{ \
5aaebd13 1441 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1442 if (dp) { \
1443 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1444 } else { \
1445 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1446 } \
b7fa9214 1447 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1448}
1449
5500b06c
PM
1450VFP_GEN_ITOF(uito)
1451VFP_GEN_ITOF(sito)
1452#undef VFP_GEN_ITOF
4373f3ce 1453
5500b06c
PM
1454#define VFP_GEN_FTOI(name) \
1455static inline void gen_vfp_##name(int dp, int neon) \
1456{ \
5aaebd13 1457 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1458 if (dp) { \
1459 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1460 } else { \
1461 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1462 } \
b7fa9214 1463 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1464}
1465
5500b06c
PM
1466VFP_GEN_FTOI(toui)
1467VFP_GEN_FTOI(touiz)
1468VFP_GEN_FTOI(tosi)
1469VFP_GEN_FTOI(tosiz)
1470#undef VFP_GEN_FTOI
4373f3ce 1471
16d5b3ca 1472#define VFP_GEN_FIX(name, round) \
5500b06c 1473static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1474{ \
39d5492a 1475 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1476 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1477 if (dp) { \
16d5b3ca
WN
1478 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1479 statusptr); \
5500b06c 1480 } else { \
16d5b3ca
WN
1481 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1482 statusptr); \
5500b06c 1483 } \
b75263d6 1484 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1485 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1486}
16d5b3ca
WN
1487VFP_GEN_FIX(tosh, _round_to_zero)
1488VFP_GEN_FIX(tosl, _round_to_zero)
1489VFP_GEN_FIX(touh, _round_to_zero)
1490VFP_GEN_FIX(toul, _round_to_zero)
1491VFP_GEN_FIX(shto, )
1492VFP_GEN_FIX(slto, )
1493VFP_GEN_FIX(uhto, )
1494VFP_GEN_FIX(ulto, )
4373f3ce 1495#undef VFP_GEN_FIX
9ee6e8bb 1496
39d5492a 1497static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1498{
08307563 1499 if (dp) {
12dcc321 1500 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1501 } else {
12dcc321 1502 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1503 }
b5ff1b31
FB
1504}
1505
39d5492a 1506static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1507{
08307563 1508 if (dp) {
12dcc321 1509 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1510 } else {
12dcc321 1511 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1512 }
b5ff1b31
FB
1513}
1514
8e96005d
FB
1515static inline long
1516vfp_reg_offset (int dp, int reg)
1517{
1518 if (dp)
1519 return offsetof(CPUARMState, vfp.regs[reg]);
1520 else if (reg & 1) {
1521 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1522 + offsetof(CPU_DoubleU, l.upper);
1523 } else {
1524 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1525 + offsetof(CPU_DoubleU, l.lower);
1526 }
1527}
9ee6e8bb
PB
1528
1529/* Return the offset of a 32-bit piece of a NEON register.
1530 zero is the least significant end of the register. */
1531static inline long
1532neon_reg_offset (int reg, int n)
1533{
1534 int sreg;
1535 sreg = reg * 2 + n;
1536 return vfp_reg_offset(0, sreg);
1537}
1538
39d5492a 1539static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1540{
39d5492a 1541 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1542 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1543 return tmp;
1544}
1545
39d5492a 1546static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1547{
1548 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1549 tcg_temp_free_i32(var);
8f8e3aa4
PB
1550}
1551
a7812ae4 1552static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1553{
1554 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1555}
1556
a7812ae4 1557static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1558{
1559 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1560}
1561
1a66ac61
RH
1562static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
1563{
1564 TCGv_ptr ret = tcg_temp_new_ptr();
1565 tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
1566 return ret;
1567}
1568
4373f3ce
PB
1569#define tcg_gen_ld_f32 tcg_gen_ld_i32
1570#define tcg_gen_ld_f64 tcg_gen_ld_i64
1571#define tcg_gen_st_f32 tcg_gen_st_i32
1572#define tcg_gen_st_f64 tcg_gen_st_i64
1573
b7bcbe95
FB
1574static inline void gen_mov_F0_vreg(int dp, int reg)
1575{
1576 if (dp)
4373f3ce 1577 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1578 else
4373f3ce 1579 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1580}
1581
1582static inline void gen_mov_F1_vreg(int dp, int reg)
1583{
1584 if (dp)
4373f3ce 1585 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1586 else
4373f3ce 1587 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1588}
1589
1590static inline void gen_mov_vreg_F0(int dp, int reg)
1591{
1592 if (dp)
4373f3ce 1593 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1594 else
4373f3ce 1595 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1596}
1597
18c9b560
AZ
1598#define ARM_CP_RW_BIT (1 << 20)
1599
a7812ae4 1600static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1601{
0ecb72a5 1602 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1603}
1604
a7812ae4 1605static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1606{
0ecb72a5 1607 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1608}
1609
39d5492a 1610static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1611{
39d5492a 1612 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1613 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1614 return var;
e677137d
PB
1615}
1616
39d5492a 1617static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1618{
0ecb72a5 1619 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1620 tcg_temp_free_i32(var);
e677137d
PB
1621}
1622
1623static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1624{
1625 iwmmxt_store_reg(cpu_M0, rn);
1626}
1627
1628static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1629{
1630 iwmmxt_load_reg(cpu_M0, rn);
1631}
1632
1633static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1634{
1635 iwmmxt_load_reg(cpu_V1, rn);
1636 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1637}
1638
1639static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1640{
1641 iwmmxt_load_reg(cpu_V1, rn);
1642 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1643}
1644
1645static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1646{
1647 iwmmxt_load_reg(cpu_V1, rn);
1648 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1649}
1650
1651#define IWMMXT_OP(name) \
1652static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1653{ \
1654 iwmmxt_load_reg(cpu_V1, rn); \
1655 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1656}
1657
477955bd
PM
1658#define IWMMXT_OP_ENV(name) \
1659static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1660{ \
1661 iwmmxt_load_reg(cpu_V1, rn); \
1662 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1663}
1664
1665#define IWMMXT_OP_ENV_SIZE(name) \
1666IWMMXT_OP_ENV(name##b) \
1667IWMMXT_OP_ENV(name##w) \
1668IWMMXT_OP_ENV(name##l)
e677137d 1669
477955bd 1670#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1671static inline void gen_op_iwmmxt_##name##_M0(void) \
1672{ \
477955bd 1673 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1674}
1675
1676IWMMXT_OP(maddsq)
1677IWMMXT_OP(madduq)
1678IWMMXT_OP(sadb)
1679IWMMXT_OP(sadw)
1680IWMMXT_OP(mulslw)
1681IWMMXT_OP(mulshw)
1682IWMMXT_OP(mululw)
1683IWMMXT_OP(muluhw)
1684IWMMXT_OP(macsw)
1685IWMMXT_OP(macuw)
1686
477955bd
PM
1687IWMMXT_OP_ENV_SIZE(unpackl)
1688IWMMXT_OP_ENV_SIZE(unpackh)
1689
1690IWMMXT_OP_ENV1(unpacklub)
1691IWMMXT_OP_ENV1(unpackluw)
1692IWMMXT_OP_ENV1(unpacklul)
1693IWMMXT_OP_ENV1(unpackhub)
1694IWMMXT_OP_ENV1(unpackhuw)
1695IWMMXT_OP_ENV1(unpackhul)
1696IWMMXT_OP_ENV1(unpacklsb)
1697IWMMXT_OP_ENV1(unpacklsw)
1698IWMMXT_OP_ENV1(unpacklsl)
1699IWMMXT_OP_ENV1(unpackhsb)
1700IWMMXT_OP_ENV1(unpackhsw)
1701IWMMXT_OP_ENV1(unpackhsl)
1702
1703IWMMXT_OP_ENV_SIZE(cmpeq)
1704IWMMXT_OP_ENV_SIZE(cmpgtu)
1705IWMMXT_OP_ENV_SIZE(cmpgts)
1706
1707IWMMXT_OP_ENV_SIZE(mins)
1708IWMMXT_OP_ENV_SIZE(minu)
1709IWMMXT_OP_ENV_SIZE(maxs)
1710IWMMXT_OP_ENV_SIZE(maxu)
1711
1712IWMMXT_OP_ENV_SIZE(subn)
1713IWMMXT_OP_ENV_SIZE(addn)
1714IWMMXT_OP_ENV_SIZE(subu)
1715IWMMXT_OP_ENV_SIZE(addu)
1716IWMMXT_OP_ENV_SIZE(subs)
1717IWMMXT_OP_ENV_SIZE(adds)
1718
1719IWMMXT_OP_ENV(avgb0)
1720IWMMXT_OP_ENV(avgb1)
1721IWMMXT_OP_ENV(avgw0)
1722IWMMXT_OP_ENV(avgw1)
e677137d 1723
477955bd
PM
1724IWMMXT_OP_ENV(packuw)
1725IWMMXT_OP_ENV(packul)
1726IWMMXT_OP_ENV(packuq)
1727IWMMXT_OP_ENV(packsw)
1728IWMMXT_OP_ENV(packsl)
1729IWMMXT_OP_ENV(packsq)
e677137d 1730
e677137d
PB
1731static void gen_op_iwmmxt_set_mup(void)
1732{
39d5492a 1733 TCGv_i32 tmp;
e677137d
PB
1734 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1735 tcg_gen_ori_i32(tmp, tmp, 2);
1736 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1737}
1738
1739static void gen_op_iwmmxt_set_cup(void)
1740{
39d5492a 1741 TCGv_i32 tmp;
e677137d
PB
1742 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1743 tcg_gen_ori_i32(tmp, tmp, 1);
1744 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1745}
1746
1747static void gen_op_iwmmxt_setpsr_nz(void)
1748{
39d5492a 1749 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1750 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1751 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1752}
1753
1754static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1755{
1756 iwmmxt_load_reg(cpu_V1, rn);
86831435 1757 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1758 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1759}
1760
39d5492a
PM
1761static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1762 TCGv_i32 dest)
18c9b560
AZ
1763{
1764 int rd;
1765 uint32_t offset;
39d5492a 1766 TCGv_i32 tmp;
18c9b560
AZ
1767
1768 rd = (insn >> 16) & 0xf;
da6b5335 1769 tmp = load_reg(s, rd);
18c9b560
AZ
1770
1771 offset = (insn & 0xff) << ((insn >> 7) & 2);
1772 if (insn & (1 << 24)) {
1773 /* Pre indexed */
1774 if (insn & (1 << 23))
da6b5335 1775 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1776 else
da6b5335
FN
1777 tcg_gen_addi_i32(tmp, tmp, -offset);
1778 tcg_gen_mov_i32(dest, tmp);
18c9b560 1779 if (insn & (1 << 21))
da6b5335
FN
1780 store_reg(s, rd, tmp);
1781 else
7d1b0095 1782 tcg_temp_free_i32(tmp);
18c9b560
AZ
1783 } else if (insn & (1 << 21)) {
1784 /* Post indexed */
da6b5335 1785 tcg_gen_mov_i32(dest, tmp);
18c9b560 1786 if (insn & (1 << 23))
da6b5335 1787 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1788 else
da6b5335
FN
1789 tcg_gen_addi_i32(tmp, tmp, -offset);
1790 store_reg(s, rd, tmp);
18c9b560
AZ
1791 } else if (!(insn & (1 << 23)))
1792 return 1;
1793 return 0;
1794}
1795
39d5492a 1796static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1797{
1798 int rd = (insn >> 0) & 0xf;
39d5492a 1799 TCGv_i32 tmp;
18c9b560 1800
da6b5335
FN
1801 if (insn & (1 << 8)) {
1802 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1803 return 1;
da6b5335
FN
1804 } else {
1805 tmp = iwmmxt_load_creg(rd);
1806 }
1807 } else {
7d1b0095 1808 tmp = tcg_temp_new_i32();
da6b5335 1809 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1810 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1811 }
1812 tcg_gen_andi_i32(tmp, tmp, mask);
1813 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1814 tcg_temp_free_i32(tmp);
18c9b560
AZ
1815 return 0;
1816}
1817
a1c7273b 1818/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1819 (ie. an undefined instruction). */
7dcc1f89 1820static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1821{
1822 int rd, wrd;
1823 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1824 TCGv_i32 addr;
1825 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1826
1827 if ((insn & 0x0e000e00) == 0x0c000000) {
1828 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1829 wrd = insn & 0xf;
1830 rdlo = (insn >> 12) & 0xf;
1831 rdhi = (insn >> 16) & 0xf;
1832 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1833 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1834 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1835 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1836 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1837 } else { /* TMCRR */
da6b5335
FN
1838 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1839 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1840 gen_op_iwmmxt_set_mup();
1841 }
1842 return 0;
1843 }
1844
1845 wrd = (insn >> 12) & 0xf;
7d1b0095 1846 addr = tcg_temp_new_i32();
da6b5335 1847 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1848 tcg_temp_free_i32(addr);
18c9b560 1849 return 1;
da6b5335 1850 }
18c9b560
AZ
1851 if (insn & ARM_CP_RW_BIT) {
1852 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1853 tmp = tcg_temp_new_i32();
12dcc321 1854 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1855 iwmmxt_store_creg(wrd, tmp);
18c9b560 1856 } else {
e677137d
PB
1857 i = 1;
1858 if (insn & (1 << 8)) {
1859 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1860 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1861 i = 0;
1862 } else { /* WLDRW wRd */
29531141 1863 tmp = tcg_temp_new_i32();
12dcc321 1864 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1865 }
1866 } else {
29531141 1867 tmp = tcg_temp_new_i32();
e677137d 1868 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1869 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1870 } else { /* WLDRB */
12dcc321 1871 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1872 }
1873 }
1874 if (i) {
1875 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1876 tcg_temp_free_i32(tmp);
e677137d 1877 }
18c9b560
AZ
1878 gen_op_iwmmxt_movq_wRn_M0(wrd);
1879 }
1880 } else {
1881 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1882 tmp = iwmmxt_load_creg(wrd);
12dcc321 1883 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1884 } else {
1885 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1886 tmp = tcg_temp_new_i32();
e677137d
PB
1887 if (insn & (1 << 8)) {
1888 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1889 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1890 } else { /* WSTRW wRd */
ecc7b3aa 1891 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1892 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1893 }
1894 } else {
1895 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1896 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1897 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1898 } else { /* WSTRB */
ecc7b3aa 1899 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1900 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1901 }
1902 }
18c9b560 1903 }
29531141 1904 tcg_temp_free_i32(tmp);
18c9b560 1905 }
7d1b0095 1906 tcg_temp_free_i32(addr);
18c9b560
AZ
1907 return 0;
1908 }
1909
1910 if ((insn & 0x0f000000) != 0x0e000000)
1911 return 1;
1912
1913 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1914 case 0x000: /* WOR */
1915 wrd = (insn >> 12) & 0xf;
1916 rd0 = (insn >> 0) & 0xf;
1917 rd1 = (insn >> 16) & 0xf;
1918 gen_op_iwmmxt_movq_M0_wRn(rd0);
1919 gen_op_iwmmxt_orq_M0_wRn(rd1);
1920 gen_op_iwmmxt_setpsr_nz();
1921 gen_op_iwmmxt_movq_wRn_M0(wrd);
1922 gen_op_iwmmxt_set_mup();
1923 gen_op_iwmmxt_set_cup();
1924 break;
1925 case 0x011: /* TMCR */
1926 if (insn & 0xf)
1927 return 1;
1928 rd = (insn >> 12) & 0xf;
1929 wrd = (insn >> 16) & 0xf;
1930 switch (wrd) {
1931 case ARM_IWMMXT_wCID:
1932 case ARM_IWMMXT_wCASF:
1933 break;
1934 case ARM_IWMMXT_wCon:
1935 gen_op_iwmmxt_set_cup();
1936 /* Fall through. */
1937 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1938 tmp = iwmmxt_load_creg(wrd);
1939 tmp2 = load_reg(s, rd);
f669df27 1940 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1941 tcg_temp_free_i32(tmp2);
da6b5335 1942 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1943 break;
1944 case ARM_IWMMXT_wCGR0:
1945 case ARM_IWMMXT_wCGR1:
1946 case ARM_IWMMXT_wCGR2:
1947 case ARM_IWMMXT_wCGR3:
1948 gen_op_iwmmxt_set_cup();
da6b5335
FN
1949 tmp = load_reg(s, rd);
1950 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1951 break;
1952 default:
1953 return 1;
1954 }
1955 break;
1956 case 0x100: /* WXOR */
1957 wrd = (insn >> 12) & 0xf;
1958 rd0 = (insn >> 0) & 0xf;
1959 rd1 = (insn >> 16) & 0xf;
1960 gen_op_iwmmxt_movq_M0_wRn(rd0);
1961 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1962 gen_op_iwmmxt_setpsr_nz();
1963 gen_op_iwmmxt_movq_wRn_M0(wrd);
1964 gen_op_iwmmxt_set_mup();
1965 gen_op_iwmmxt_set_cup();
1966 break;
1967 case 0x111: /* TMRC */
1968 if (insn & 0xf)
1969 return 1;
1970 rd = (insn >> 12) & 0xf;
1971 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1972 tmp = iwmmxt_load_creg(wrd);
1973 store_reg(s, rd, tmp);
18c9b560
AZ
1974 break;
1975 case 0x300: /* WANDN */
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 0) & 0xf;
1978 rd1 = (insn >> 16) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1980 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1981 gen_op_iwmmxt_andq_M0_wRn(rd1);
1982 gen_op_iwmmxt_setpsr_nz();
1983 gen_op_iwmmxt_movq_wRn_M0(wrd);
1984 gen_op_iwmmxt_set_mup();
1985 gen_op_iwmmxt_set_cup();
1986 break;
1987 case 0x200: /* WAND */
1988 wrd = (insn >> 12) & 0xf;
1989 rd0 = (insn >> 0) & 0xf;
1990 rd1 = (insn >> 16) & 0xf;
1991 gen_op_iwmmxt_movq_M0_wRn(rd0);
1992 gen_op_iwmmxt_andq_M0_wRn(rd1);
1993 gen_op_iwmmxt_setpsr_nz();
1994 gen_op_iwmmxt_movq_wRn_M0(wrd);
1995 gen_op_iwmmxt_set_mup();
1996 gen_op_iwmmxt_set_cup();
1997 break;
1998 case 0x810: case 0xa10: /* WMADD */
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 0) & 0xf;
2001 rd1 = (insn >> 16) & 0xf;
2002 gen_op_iwmmxt_movq_M0_wRn(rd0);
2003 if (insn & (1 << 21))
2004 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
2005 else
2006 gen_op_iwmmxt_madduq_M0_wRn(rd1);
2007 gen_op_iwmmxt_movq_wRn_M0(wrd);
2008 gen_op_iwmmxt_set_mup();
2009 break;
2010 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
2011 wrd = (insn >> 12) & 0xf;
2012 rd0 = (insn >> 16) & 0xf;
2013 rd1 = (insn >> 0) & 0xf;
2014 gen_op_iwmmxt_movq_M0_wRn(rd0);
2015 switch ((insn >> 22) & 3) {
2016 case 0:
2017 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
2018 break;
2019 case 1:
2020 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
2021 break;
2022 case 2:
2023 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
2024 break;
2025 case 3:
2026 return 1;
2027 }
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 gen_op_iwmmxt_set_mup();
2030 gen_op_iwmmxt_set_cup();
2031 break;
2032 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
2033 wrd = (insn >> 12) & 0xf;
2034 rd0 = (insn >> 16) & 0xf;
2035 rd1 = (insn >> 0) & 0xf;
2036 gen_op_iwmmxt_movq_M0_wRn(rd0);
2037 switch ((insn >> 22) & 3) {
2038 case 0:
2039 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
2040 break;
2041 case 1:
2042 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
2043 break;
2044 case 2:
2045 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
2046 break;
2047 case 3:
2048 return 1;
2049 }
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
2055 wrd = (insn >> 12) & 0xf;
2056 rd0 = (insn >> 16) & 0xf;
2057 rd1 = (insn >> 0) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 if (insn & (1 << 22))
2060 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2061 else
2062 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2063 if (!(insn & (1 << 20)))
2064 gen_op_iwmmxt_addl_M0_wRn(wrd);
2065 gen_op_iwmmxt_movq_wRn_M0(wrd);
2066 gen_op_iwmmxt_set_mup();
2067 break;
2068 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2069 wrd = (insn >> 12) & 0xf;
2070 rd0 = (insn >> 16) & 0xf;
2071 rd1 = (insn >> 0) & 0xf;
2072 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2073 if (insn & (1 << 21)) {
2074 if (insn & (1 << 20))
2075 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2076 else
2077 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2078 } else {
2079 if (insn & (1 << 20))
2080 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2081 else
2082 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2083 }
18c9b560
AZ
2084 gen_op_iwmmxt_movq_wRn_M0(wrd);
2085 gen_op_iwmmxt_set_mup();
2086 break;
2087 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 if (insn & (1 << 21))
2093 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2094 else
2095 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2096 if (!(insn & (1 << 20))) {
e677137d
PB
2097 iwmmxt_load_reg(cpu_V1, wrd);
2098 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2099 }
2100 gen_op_iwmmxt_movq_wRn_M0(wrd);
2101 gen_op_iwmmxt_set_mup();
2102 break;
2103 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 rd1 = (insn >> 0) & 0xf;
2107 gen_op_iwmmxt_movq_M0_wRn(rd0);
2108 switch ((insn >> 22) & 3) {
2109 case 0:
2110 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2111 break;
2112 case 1:
2113 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2114 break;
2115 case 2:
2116 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2117 break;
2118 case 3:
2119 return 1;
2120 }
2121 gen_op_iwmmxt_movq_wRn_M0(wrd);
2122 gen_op_iwmmxt_set_mup();
2123 gen_op_iwmmxt_set_cup();
2124 break;
2125 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2126 wrd = (insn >> 12) & 0xf;
2127 rd0 = (insn >> 16) & 0xf;
2128 rd1 = (insn >> 0) & 0xf;
2129 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2130 if (insn & (1 << 22)) {
2131 if (insn & (1 << 20))
2132 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2135 } else {
2136 if (insn & (1 << 20))
2137 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2138 else
2139 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2140 }
18c9b560
AZ
2141 gen_op_iwmmxt_movq_wRn_M0(wrd);
2142 gen_op_iwmmxt_set_mup();
2143 gen_op_iwmmxt_set_cup();
2144 break;
2145 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2146 wrd = (insn >> 12) & 0xf;
2147 rd0 = (insn >> 16) & 0xf;
2148 rd1 = (insn >> 0) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2150 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2151 tcg_gen_andi_i32(tmp, tmp, 7);
2152 iwmmxt_load_reg(cpu_V1, rd1);
2153 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2154 tcg_temp_free_i32(tmp);
18c9b560
AZ
2155 gen_op_iwmmxt_movq_wRn_M0(wrd);
2156 gen_op_iwmmxt_set_mup();
2157 break;
2158 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2159 if (((insn >> 6) & 3) == 3)
2160 return 1;
18c9b560
AZ
2161 rd = (insn >> 12) & 0xf;
2162 wrd = (insn >> 16) & 0xf;
da6b5335 2163 tmp = load_reg(s, rd);
18c9b560
AZ
2164 gen_op_iwmmxt_movq_M0_wRn(wrd);
2165 switch ((insn >> 6) & 3) {
2166 case 0:
da6b5335
FN
2167 tmp2 = tcg_const_i32(0xff);
2168 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2169 break;
2170 case 1:
da6b5335
FN
2171 tmp2 = tcg_const_i32(0xffff);
2172 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2173 break;
2174 case 2:
da6b5335
FN
2175 tmp2 = tcg_const_i32(0xffffffff);
2176 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2177 break;
da6b5335 2178 default:
f764718d
RH
2179 tmp2 = NULL;
2180 tmp3 = NULL;
18c9b560 2181 }
da6b5335 2182 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2183 tcg_temp_free_i32(tmp3);
2184 tcg_temp_free_i32(tmp2);
7d1b0095 2185 tcg_temp_free_i32(tmp);
18c9b560
AZ
2186 gen_op_iwmmxt_movq_wRn_M0(wrd);
2187 gen_op_iwmmxt_set_mup();
2188 break;
2189 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2190 rd = (insn >> 12) & 0xf;
2191 wrd = (insn >> 16) & 0xf;
da6b5335 2192 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2193 return 1;
2194 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2195 tmp = tcg_temp_new_i32();
18c9b560
AZ
2196 switch ((insn >> 22) & 3) {
2197 case 0:
da6b5335 2198 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2199 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2200 if (insn & 8) {
2201 tcg_gen_ext8s_i32(tmp, tmp);
2202 } else {
2203 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2204 }
2205 break;
2206 case 1:
da6b5335 2207 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2208 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2209 if (insn & 8) {
2210 tcg_gen_ext16s_i32(tmp, tmp);
2211 } else {
2212 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2213 }
2214 break;
2215 case 2:
da6b5335 2216 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2217 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2218 break;
18c9b560 2219 }
da6b5335 2220 store_reg(s, rd, tmp);
18c9b560
AZ
2221 break;
2222 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2223 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2224 return 1;
da6b5335 2225 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2226 switch ((insn >> 22) & 3) {
2227 case 0:
da6b5335 2228 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2229 break;
2230 case 1:
da6b5335 2231 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2232 break;
2233 case 2:
da6b5335 2234 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2235 break;
18c9b560 2236 }
da6b5335
FN
2237 tcg_gen_shli_i32(tmp, tmp, 28);
2238 gen_set_nzcv(tmp);
7d1b0095 2239 tcg_temp_free_i32(tmp);
18c9b560
AZ
2240 break;
2241 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2242 if (((insn >> 6) & 3) == 3)
2243 return 1;
18c9b560
AZ
2244 rd = (insn >> 12) & 0xf;
2245 wrd = (insn >> 16) & 0xf;
da6b5335 2246 tmp = load_reg(s, rd);
18c9b560
AZ
2247 switch ((insn >> 6) & 3) {
2248 case 0:
da6b5335 2249 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2250 break;
2251 case 1:
da6b5335 2252 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2253 break;
2254 case 2:
da6b5335 2255 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2256 break;
18c9b560 2257 }
7d1b0095 2258 tcg_temp_free_i32(tmp);
18c9b560
AZ
2259 gen_op_iwmmxt_movq_wRn_M0(wrd);
2260 gen_op_iwmmxt_set_mup();
2261 break;
2262 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2263 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2264 return 1;
da6b5335 2265 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2266 tmp2 = tcg_temp_new_i32();
da6b5335 2267 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2268 switch ((insn >> 22) & 3) {
2269 case 0:
2270 for (i = 0; i < 7; i ++) {
da6b5335
FN
2271 tcg_gen_shli_i32(tmp2, tmp2, 4);
2272 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2273 }
2274 break;
2275 case 1:
2276 for (i = 0; i < 3; i ++) {
da6b5335
FN
2277 tcg_gen_shli_i32(tmp2, tmp2, 8);
2278 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2279 }
2280 break;
2281 case 2:
da6b5335
FN
2282 tcg_gen_shli_i32(tmp2, tmp2, 16);
2283 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2284 break;
18c9b560 2285 }
da6b5335 2286 gen_set_nzcv(tmp);
7d1b0095
PM
2287 tcg_temp_free_i32(tmp2);
2288 tcg_temp_free_i32(tmp);
18c9b560
AZ
2289 break;
2290 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2291 wrd = (insn >> 12) & 0xf;
2292 rd0 = (insn >> 16) & 0xf;
2293 gen_op_iwmmxt_movq_M0_wRn(rd0);
2294 switch ((insn >> 22) & 3) {
2295 case 0:
e677137d 2296 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2297 break;
2298 case 1:
e677137d 2299 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2300 break;
2301 case 2:
e677137d 2302 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2303 break;
2304 case 3:
2305 return 1;
2306 }
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 break;
2310 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2311 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2312 return 1;
da6b5335 2313 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2314 tmp2 = tcg_temp_new_i32();
da6b5335 2315 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2316 switch ((insn >> 22) & 3) {
2317 case 0:
2318 for (i = 0; i < 7; i ++) {
da6b5335
FN
2319 tcg_gen_shli_i32(tmp2, tmp2, 4);
2320 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2321 }
2322 break;
2323 case 1:
2324 for (i = 0; i < 3; i ++) {
da6b5335
FN
2325 tcg_gen_shli_i32(tmp2, tmp2, 8);
2326 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2327 }
2328 break;
2329 case 2:
da6b5335
FN
2330 tcg_gen_shli_i32(tmp2, tmp2, 16);
2331 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2332 break;
18c9b560 2333 }
da6b5335 2334 gen_set_nzcv(tmp);
7d1b0095
PM
2335 tcg_temp_free_i32(tmp2);
2336 tcg_temp_free_i32(tmp);
18c9b560
AZ
2337 break;
2338 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2339 rd = (insn >> 12) & 0xf;
2340 rd0 = (insn >> 16) & 0xf;
da6b5335 2341 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2342 return 1;
2343 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2344 tmp = tcg_temp_new_i32();
18c9b560
AZ
2345 switch ((insn >> 22) & 3) {
2346 case 0:
da6b5335 2347 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2348 break;
2349 case 1:
da6b5335 2350 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2351 break;
2352 case 2:
da6b5335 2353 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2354 break;
18c9b560 2355 }
da6b5335 2356 store_reg(s, rd, tmp);
18c9b560
AZ
2357 break;
2358 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2359 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2360 wrd = (insn >> 12) & 0xf;
2361 rd0 = (insn >> 16) & 0xf;
2362 rd1 = (insn >> 0) & 0xf;
2363 gen_op_iwmmxt_movq_M0_wRn(rd0);
2364 switch ((insn >> 22) & 3) {
2365 case 0:
2366 if (insn & (1 << 21))
2367 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2368 else
2369 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2370 break;
2371 case 1:
2372 if (insn & (1 << 21))
2373 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2374 else
2375 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2376 break;
2377 case 2:
2378 if (insn & (1 << 21))
2379 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2380 else
2381 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2382 break;
2383 case 3:
2384 return 1;
2385 }
2386 gen_op_iwmmxt_movq_wRn_M0(wrd);
2387 gen_op_iwmmxt_set_mup();
2388 gen_op_iwmmxt_set_cup();
2389 break;
2390 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2391 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 gen_op_iwmmxt_movq_M0_wRn(rd0);
2395 switch ((insn >> 22) & 3) {
2396 case 0:
2397 if (insn & (1 << 21))
2398 gen_op_iwmmxt_unpacklsb_M0();
2399 else
2400 gen_op_iwmmxt_unpacklub_M0();
2401 break;
2402 case 1:
2403 if (insn & (1 << 21))
2404 gen_op_iwmmxt_unpacklsw_M0();
2405 else
2406 gen_op_iwmmxt_unpackluw_M0();
2407 break;
2408 case 2:
2409 if (insn & (1 << 21))
2410 gen_op_iwmmxt_unpacklsl_M0();
2411 else
2412 gen_op_iwmmxt_unpacklul_M0();
2413 break;
2414 case 3:
2415 return 1;
2416 }
2417 gen_op_iwmmxt_movq_wRn_M0(wrd);
2418 gen_op_iwmmxt_set_mup();
2419 gen_op_iwmmxt_set_cup();
2420 break;
2421 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2422 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2423 wrd = (insn >> 12) & 0xf;
2424 rd0 = (insn >> 16) & 0xf;
2425 gen_op_iwmmxt_movq_M0_wRn(rd0);
2426 switch ((insn >> 22) & 3) {
2427 case 0:
2428 if (insn & (1 << 21))
2429 gen_op_iwmmxt_unpackhsb_M0();
2430 else
2431 gen_op_iwmmxt_unpackhub_M0();
2432 break;
2433 case 1:
2434 if (insn & (1 << 21))
2435 gen_op_iwmmxt_unpackhsw_M0();
2436 else
2437 gen_op_iwmmxt_unpackhuw_M0();
2438 break;
2439 case 2:
2440 if (insn & (1 << 21))
2441 gen_op_iwmmxt_unpackhsl_M0();
2442 else
2443 gen_op_iwmmxt_unpackhul_M0();
2444 break;
2445 case 3:
2446 return 1;
2447 }
2448 gen_op_iwmmxt_movq_wRn_M0(wrd);
2449 gen_op_iwmmxt_set_mup();
2450 gen_op_iwmmxt_set_cup();
2451 break;
2452 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2453 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2454 if (((insn >> 22) & 3) == 0)
2455 return 1;
18c9b560
AZ
2456 wrd = (insn >> 12) & 0xf;
2457 rd0 = (insn >> 16) & 0xf;
2458 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2459 tmp = tcg_temp_new_i32();
da6b5335 2460 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2461 tcg_temp_free_i32(tmp);
18c9b560 2462 return 1;
da6b5335 2463 }
18c9b560 2464 switch ((insn >> 22) & 3) {
18c9b560 2465 case 1:
477955bd 2466 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2467 break;
2468 case 2:
477955bd 2469 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2470 break;
2471 case 3:
477955bd 2472 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2473 break;
2474 }
7d1b0095 2475 tcg_temp_free_i32(tmp);
18c9b560
AZ
2476 gen_op_iwmmxt_movq_wRn_M0(wrd);
2477 gen_op_iwmmxt_set_mup();
2478 gen_op_iwmmxt_set_cup();
2479 break;
2480 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2481 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2482 if (((insn >> 22) & 3) == 0)
2483 return 1;
18c9b560
AZ
2484 wrd = (insn >> 12) & 0xf;
2485 rd0 = (insn >> 16) & 0xf;
2486 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2487 tmp = tcg_temp_new_i32();
da6b5335 2488 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2489 tcg_temp_free_i32(tmp);
18c9b560 2490 return 1;
da6b5335 2491 }
18c9b560 2492 switch ((insn >> 22) & 3) {
18c9b560 2493 case 1:
477955bd 2494 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2495 break;
2496 case 2:
477955bd 2497 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2498 break;
2499 case 3:
477955bd 2500 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2501 break;
2502 }
7d1b0095 2503 tcg_temp_free_i32(tmp);
18c9b560
AZ
2504 gen_op_iwmmxt_movq_wRn_M0(wrd);
2505 gen_op_iwmmxt_set_mup();
2506 gen_op_iwmmxt_set_cup();
2507 break;
2508 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2509 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2510 if (((insn >> 22) & 3) == 0)
2511 return 1;
18c9b560
AZ
2512 wrd = (insn >> 12) & 0xf;
2513 rd0 = (insn >> 16) & 0xf;
2514 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2515 tmp = tcg_temp_new_i32();
da6b5335 2516 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2517 tcg_temp_free_i32(tmp);
18c9b560 2518 return 1;
da6b5335 2519 }
18c9b560 2520 switch ((insn >> 22) & 3) {
18c9b560 2521 case 1:
477955bd 2522 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2523 break;
2524 case 2:
477955bd 2525 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2526 break;
2527 case 3:
477955bd 2528 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2529 break;
2530 }
7d1b0095 2531 tcg_temp_free_i32(tmp);
18c9b560
AZ
2532 gen_op_iwmmxt_movq_wRn_M0(wrd);
2533 gen_op_iwmmxt_set_mup();
2534 gen_op_iwmmxt_set_cup();
2535 break;
2536 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2537 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2538 if (((insn >> 22) & 3) == 0)
2539 return 1;
18c9b560
AZ
2540 wrd = (insn >> 12) & 0xf;
2541 rd0 = (insn >> 16) & 0xf;
2542 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2543 tmp = tcg_temp_new_i32();
18c9b560 2544 switch ((insn >> 22) & 3) {
18c9b560 2545 case 1:
da6b5335 2546 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2547 tcg_temp_free_i32(tmp);
18c9b560 2548 return 1;
da6b5335 2549 }
477955bd 2550 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2551 break;
2552 case 2:
da6b5335 2553 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2554 tcg_temp_free_i32(tmp);
18c9b560 2555 return 1;
da6b5335 2556 }
477955bd 2557 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2558 break;
2559 case 3:
da6b5335 2560 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2561 tcg_temp_free_i32(tmp);
18c9b560 2562 return 1;
da6b5335 2563 }
477955bd 2564 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2565 break;
2566 }
7d1b0095 2567 tcg_temp_free_i32(tmp);
18c9b560
AZ
2568 gen_op_iwmmxt_movq_wRn_M0(wrd);
2569 gen_op_iwmmxt_set_mup();
2570 gen_op_iwmmxt_set_cup();
2571 break;
2572 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2573 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2574 wrd = (insn >> 12) & 0xf;
2575 rd0 = (insn >> 16) & 0xf;
2576 rd1 = (insn >> 0) & 0xf;
2577 gen_op_iwmmxt_movq_M0_wRn(rd0);
2578 switch ((insn >> 22) & 3) {
2579 case 0:
2580 if (insn & (1 << 21))
2581 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2582 else
2583 gen_op_iwmmxt_minub_M0_wRn(rd1);
2584 break;
2585 case 1:
2586 if (insn & (1 << 21))
2587 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2588 else
2589 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2590 break;
2591 case 2:
2592 if (insn & (1 << 21))
2593 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2594 else
2595 gen_op_iwmmxt_minul_M0_wRn(rd1);
2596 break;
2597 case 3:
2598 return 1;
2599 }
2600 gen_op_iwmmxt_movq_wRn_M0(wrd);
2601 gen_op_iwmmxt_set_mup();
2602 break;
2603 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2604 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2605 wrd = (insn >> 12) & 0xf;
2606 rd0 = (insn >> 16) & 0xf;
2607 rd1 = (insn >> 0) & 0xf;
2608 gen_op_iwmmxt_movq_M0_wRn(rd0);
2609 switch ((insn >> 22) & 3) {
2610 case 0:
2611 if (insn & (1 << 21))
2612 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2613 else
2614 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2615 break;
2616 case 1:
2617 if (insn & (1 << 21))
2618 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2619 else
2620 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2621 break;
2622 case 2:
2623 if (insn & (1 << 21))
2624 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2625 else
2626 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2627 break;
2628 case 3:
2629 return 1;
2630 }
2631 gen_op_iwmmxt_movq_wRn_M0(wrd);
2632 gen_op_iwmmxt_set_mup();
2633 break;
2634 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2635 case 0x402: case 0x502: case 0x602: case 0x702:
2636 wrd = (insn >> 12) & 0xf;
2637 rd0 = (insn >> 16) & 0xf;
2638 rd1 = (insn >> 0) & 0xf;
2639 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2640 tmp = tcg_const_i32((insn >> 20) & 3);
2641 iwmmxt_load_reg(cpu_V1, rd1);
2642 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2643 tcg_temp_free_i32(tmp);
18c9b560
AZ
2644 gen_op_iwmmxt_movq_wRn_M0(wrd);
2645 gen_op_iwmmxt_set_mup();
2646 break;
2647 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2648 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2649 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2650 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2651 wrd = (insn >> 12) & 0xf;
2652 rd0 = (insn >> 16) & 0xf;
2653 rd1 = (insn >> 0) & 0xf;
2654 gen_op_iwmmxt_movq_M0_wRn(rd0);
2655 switch ((insn >> 20) & 0xf) {
2656 case 0x0:
2657 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2658 break;
2659 case 0x1:
2660 gen_op_iwmmxt_subub_M0_wRn(rd1);
2661 break;
2662 case 0x3:
2663 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2664 break;
2665 case 0x4:
2666 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2667 break;
2668 case 0x5:
2669 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2670 break;
2671 case 0x7:
2672 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2673 break;
2674 case 0x8:
2675 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2676 break;
2677 case 0x9:
2678 gen_op_iwmmxt_subul_M0_wRn(rd1);
2679 break;
2680 case 0xb:
2681 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2682 break;
2683 default:
2684 return 1;
2685 }
2686 gen_op_iwmmxt_movq_wRn_M0(wrd);
2687 gen_op_iwmmxt_set_mup();
2688 gen_op_iwmmxt_set_cup();
2689 break;
2690 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2691 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2692 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2693 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2694 wrd = (insn >> 12) & 0xf;
2695 rd0 = (insn >> 16) & 0xf;
2696 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2697 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2698 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2699 tcg_temp_free_i32(tmp);
18c9b560
AZ
2700 gen_op_iwmmxt_movq_wRn_M0(wrd);
2701 gen_op_iwmmxt_set_mup();
2702 gen_op_iwmmxt_set_cup();
2703 break;
2704 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2705 case 0x418: case 0x518: case 0x618: case 0x718:
2706 case 0x818: case 0x918: case 0xa18: case 0xb18:
2707 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2708 wrd = (insn >> 12) & 0xf;
2709 rd0 = (insn >> 16) & 0xf;
2710 rd1 = (insn >> 0) & 0xf;
2711 gen_op_iwmmxt_movq_M0_wRn(rd0);
2712 switch ((insn >> 20) & 0xf) {
2713 case 0x0:
2714 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2715 break;
2716 case 0x1:
2717 gen_op_iwmmxt_addub_M0_wRn(rd1);
2718 break;
2719 case 0x3:
2720 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2721 break;
2722 case 0x4:
2723 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2724 break;
2725 case 0x5:
2726 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2727 break;
2728 case 0x7:
2729 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2730 break;
2731 case 0x8:
2732 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2733 break;
2734 case 0x9:
2735 gen_op_iwmmxt_addul_M0_wRn(rd1);
2736 break;
2737 case 0xb:
2738 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2739 break;
2740 default:
2741 return 1;
2742 }
2743 gen_op_iwmmxt_movq_wRn_M0(wrd);
2744 gen_op_iwmmxt_set_mup();
2745 gen_op_iwmmxt_set_cup();
2746 break;
2747 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2748 case 0x408: case 0x508: case 0x608: case 0x708:
2749 case 0x808: case 0x908: case 0xa08: case 0xb08:
2750 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2751 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2752 return 1;
18c9b560
AZ
2753 wrd = (insn >> 12) & 0xf;
2754 rd0 = (insn >> 16) & 0xf;
2755 rd1 = (insn >> 0) & 0xf;
2756 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2757 switch ((insn >> 22) & 3) {
18c9b560
AZ
2758 case 1:
2759 if (insn & (1 << 21))
2760 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2761 else
2762 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2763 break;
2764 case 2:
2765 if (insn & (1 << 21))
2766 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2767 else
2768 gen_op_iwmmxt_packul_M0_wRn(rd1);
2769 break;
2770 case 3:
2771 if (insn & (1 << 21))
2772 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2773 else
2774 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2775 break;
2776 }
2777 gen_op_iwmmxt_movq_wRn_M0(wrd);
2778 gen_op_iwmmxt_set_mup();
2779 gen_op_iwmmxt_set_cup();
2780 break;
2781 case 0x201: case 0x203: case 0x205: case 0x207:
2782 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2783 case 0x211: case 0x213: case 0x215: case 0x217:
2784 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2785 wrd = (insn >> 5) & 0xf;
2786 rd0 = (insn >> 12) & 0xf;
2787 rd1 = (insn >> 0) & 0xf;
2788 if (rd0 == 0xf || rd1 == 0xf)
2789 return 1;
2790 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2791 tmp = load_reg(s, rd0);
2792 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2793 switch ((insn >> 16) & 0xf) {
2794 case 0x0: /* TMIA */
da6b5335 2795 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2796 break;
2797 case 0x8: /* TMIAPH */
da6b5335 2798 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2799 break;
2800 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2801 if (insn & (1 << 16))
da6b5335 2802 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2803 if (insn & (1 << 17))
da6b5335
FN
2804 tcg_gen_shri_i32(tmp2, tmp2, 16);
2805 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2806 break;
2807 default:
7d1b0095
PM
2808 tcg_temp_free_i32(tmp2);
2809 tcg_temp_free_i32(tmp);
18c9b560
AZ
2810 return 1;
2811 }
7d1b0095
PM
2812 tcg_temp_free_i32(tmp2);
2813 tcg_temp_free_i32(tmp);
18c9b560
AZ
2814 gen_op_iwmmxt_movq_wRn_M0(wrd);
2815 gen_op_iwmmxt_set_mup();
2816 break;
2817 default:
2818 return 1;
2819 }
2820
2821 return 0;
2822}
2823
a1c7273b 2824/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2825 (ie. an undefined instruction). */
7dcc1f89 2826static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2827{
2828 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2829 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2830
2831 if ((insn & 0x0ff00f10) == 0x0e200010) {
2832 /* Multiply with Internal Accumulate Format */
2833 rd0 = (insn >> 12) & 0xf;
2834 rd1 = insn & 0xf;
2835 acc = (insn >> 5) & 7;
2836
2837 if (acc != 0)
2838 return 1;
2839
3a554c0f
FN
2840 tmp = load_reg(s, rd0);
2841 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2842 switch ((insn >> 16) & 0xf) {
2843 case 0x0: /* MIA */
3a554c0f 2844 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2845 break;
2846 case 0x8: /* MIAPH */
3a554c0f 2847 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2848 break;
2849 case 0xc: /* MIABB */
2850 case 0xd: /* MIABT */
2851 case 0xe: /* MIATB */
2852 case 0xf: /* MIATT */
18c9b560 2853 if (insn & (1 << 16))
3a554c0f 2854 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2855 if (insn & (1 << 17))
3a554c0f
FN
2856 tcg_gen_shri_i32(tmp2, tmp2, 16);
2857 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2858 break;
2859 default:
2860 return 1;
2861 }
7d1b0095
PM
2862 tcg_temp_free_i32(tmp2);
2863 tcg_temp_free_i32(tmp);
18c9b560
AZ
2864
2865 gen_op_iwmmxt_movq_wRn_M0(acc);
2866 return 0;
2867 }
2868
2869 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2870 /* Internal Accumulator Access Format */
2871 rdhi = (insn >> 16) & 0xf;
2872 rdlo = (insn >> 12) & 0xf;
2873 acc = insn & 7;
2874
2875 if (acc != 0)
2876 return 1;
2877
2878 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2879 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2880 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2881 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2882 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2883 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2884 } else { /* MAR */
3a554c0f
FN
2885 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2886 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2887 }
2888 return 0;
2889 }
2890
2891 return 1;
2892}
2893
9ee6e8bb
PB
2894#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2895#define VFP_SREG(insn, bigbit, smallbit) \
2896 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2897#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2898 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2899 reg = (((insn) >> (bigbit)) & 0x0f) \
2900 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2901 } else { \
2902 if (insn & (1 << (smallbit))) \
2903 return 1; \
2904 reg = ((insn) >> (bigbit)) & 0x0f; \
2905 }} while (0)
2906
2907#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2908#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2909#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2910#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2911#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2912#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2913
4373f3ce 2914/* Move between integer and VFP cores. */
39d5492a 2915static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2916{
39d5492a 2917 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2918 tcg_gen_mov_i32(tmp, cpu_F0s);
2919 return tmp;
2920}
2921
39d5492a 2922static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2923{
2924 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2925 tcg_temp_free_i32(tmp);
4373f3ce
PB
2926}
2927
39d5492a 2928static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2929{
39d5492a 2930 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2931 if (shift)
2932 tcg_gen_shri_i32(var, var, shift);
86831435 2933 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2934 tcg_gen_shli_i32(tmp, var, 8);
2935 tcg_gen_or_i32(var, var, tmp);
2936 tcg_gen_shli_i32(tmp, var, 16);
2937 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2938 tcg_temp_free_i32(tmp);
ad69471c
PB
2939}
2940
39d5492a 2941static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2942{
39d5492a 2943 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2944 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2945 tcg_gen_shli_i32(tmp, var, 16);
2946 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2947 tcg_temp_free_i32(tmp);
ad69471c
PB
2948}
2949
39d5492a 2950static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2951{
39d5492a 2952 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2953 tcg_gen_andi_i32(var, var, 0xffff0000);
2954 tcg_gen_shri_i32(tmp, var, 16);
2955 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2956 tcg_temp_free_i32(tmp);
ad69471c
PB
2957}
2958
39d5492a 2959static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2960{
2961 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2962 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2963 switch (size) {
2964 case 0:
12dcc321 2965 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2966 gen_neon_dup_u8(tmp, 0);
2967 break;
2968 case 1:
12dcc321 2969 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2970 gen_neon_dup_low16(tmp);
2971 break;
2972 case 2:
12dcc321 2973 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2974 break;
2975 default: /* Avoid compiler warnings. */
2976 abort();
2977 }
2978 return tmp;
2979}
2980
04731fb5
WN
2981static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2982 uint32_t dp)
2983{
2984 uint32_t cc = extract32(insn, 20, 2);
2985
2986 if (dp) {
2987 TCGv_i64 frn, frm, dest;
2988 TCGv_i64 tmp, zero, zf, nf, vf;
2989
2990 zero = tcg_const_i64(0);
2991
2992 frn = tcg_temp_new_i64();
2993 frm = tcg_temp_new_i64();
2994 dest = tcg_temp_new_i64();
2995
2996 zf = tcg_temp_new_i64();
2997 nf = tcg_temp_new_i64();
2998 vf = tcg_temp_new_i64();
2999
3000 tcg_gen_extu_i32_i64(zf, cpu_ZF);
3001 tcg_gen_ext_i32_i64(nf, cpu_NF);
3002 tcg_gen_ext_i32_i64(vf, cpu_VF);
3003
3004 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3005 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3006 switch (cc) {
3007 case 0: /* eq: Z */
3008 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
3009 frn, frm);
3010 break;
3011 case 1: /* vs: V */
3012 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
3013 frn, frm);
3014 break;
3015 case 2: /* ge: N == V -> N ^ V == 0 */
3016 tmp = tcg_temp_new_i64();
3017 tcg_gen_xor_i64(tmp, vf, nf);
3018 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3019 frn, frm);
3020 tcg_temp_free_i64(tmp);
3021 break;
3022 case 3: /* gt: !Z && N == V */
3023 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
3024 frn, frm);
3025 tmp = tcg_temp_new_i64();
3026 tcg_gen_xor_i64(tmp, vf, nf);
3027 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
3028 dest, frm);
3029 tcg_temp_free_i64(tmp);
3030 break;
3031 }
3032 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3033 tcg_temp_free_i64(frn);
3034 tcg_temp_free_i64(frm);
3035 tcg_temp_free_i64(dest);
3036
3037 tcg_temp_free_i64(zf);
3038 tcg_temp_free_i64(nf);
3039 tcg_temp_free_i64(vf);
3040
3041 tcg_temp_free_i64(zero);
3042 } else {
3043 TCGv_i32 frn, frm, dest;
3044 TCGv_i32 tmp, zero;
3045
3046 zero = tcg_const_i32(0);
3047
3048 frn = tcg_temp_new_i32();
3049 frm = tcg_temp_new_i32();
3050 dest = tcg_temp_new_i32();
3051 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3052 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3053 switch (cc) {
3054 case 0: /* eq: Z */
3055 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3056 frn, frm);
3057 break;
3058 case 1: /* vs: V */
3059 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3060 frn, frm);
3061 break;
3062 case 2: /* ge: N == V -> N ^ V == 0 */
3063 tmp = tcg_temp_new_i32();
3064 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3065 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3066 frn, frm);
3067 tcg_temp_free_i32(tmp);
3068 break;
3069 case 3: /* gt: !Z && N == V */
3070 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3071 frn, frm);
3072 tmp = tcg_temp_new_i32();
3073 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3074 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3075 dest, frm);
3076 tcg_temp_free_i32(tmp);
3077 break;
3078 }
3079 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3080 tcg_temp_free_i32(frn);
3081 tcg_temp_free_i32(frm);
3082 tcg_temp_free_i32(dest);
3083
3084 tcg_temp_free_i32(zero);
3085 }
3086
3087 return 0;
3088}
3089
40cfacdd
WN
3090static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3091 uint32_t rm, uint32_t dp)
3092{
3093 uint32_t vmin = extract32(insn, 6, 1);
3094 TCGv_ptr fpst = get_fpstatus_ptr(0);
3095
3096 if (dp) {
3097 TCGv_i64 frn, frm, dest;
3098
3099 frn = tcg_temp_new_i64();
3100 frm = tcg_temp_new_i64();
3101 dest = tcg_temp_new_i64();
3102
3103 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3104 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3105 if (vmin) {
f71a2ae5 3106 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3107 } else {
f71a2ae5 3108 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3109 }
3110 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3111 tcg_temp_free_i64(frn);
3112 tcg_temp_free_i64(frm);
3113 tcg_temp_free_i64(dest);
3114 } else {
3115 TCGv_i32 frn, frm, dest;
3116
3117 frn = tcg_temp_new_i32();
3118 frm = tcg_temp_new_i32();
3119 dest = tcg_temp_new_i32();
3120
3121 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3122 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3123 if (vmin) {
f71a2ae5 3124 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3125 } else {
f71a2ae5 3126 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3127 }
3128 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3129 tcg_temp_free_i32(frn);
3130 tcg_temp_free_i32(frm);
3131 tcg_temp_free_i32(dest);
3132 }
3133
3134 tcg_temp_free_ptr(fpst);
3135 return 0;
3136}
3137
7655f39b
WN
3138static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3139 int rounding)
3140{
3141 TCGv_ptr fpst = get_fpstatus_ptr(0);
3142 TCGv_i32 tcg_rmode;
3143
3144 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3145 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3146
3147 if (dp) {
3148 TCGv_i64 tcg_op;
3149 TCGv_i64 tcg_res;
3150 tcg_op = tcg_temp_new_i64();
3151 tcg_res = tcg_temp_new_i64();
3152 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3153 gen_helper_rintd(tcg_res, tcg_op, fpst);
3154 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3155 tcg_temp_free_i64(tcg_op);
3156 tcg_temp_free_i64(tcg_res);
3157 } else {
3158 TCGv_i32 tcg_op;
3159 TCGv_i32 tcg_res;
3160 tcg_op = tcg_temp_new_i32();
3161 tcg_res = tcg_temp_new_i32();
3162 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3163 gen_helper_rints(tcg_res, tcg_op, fpst);
3164 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3165 tcg_temp_free_i32(tcg_op);
3166 tcg_temp_free_i32(tcg_res);
3167 }
3168
3169 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3170 tcg_temp_free_i32(tcg_rmode);
3171
3172 tcg_temp_free_ptr(fpst);
3173 return 0;
3174}
3175
c9975a83
WN
3176static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3177 int rounding)
3178{
3179 bool is_signed = extract32(insn, 7, 1);
3180 TCGv_ptr fpst = get_fpstatus_ptr(0);
3181 TCGv_i32 tcg_rmode, tcg_shift;
3182
3183 tcg_shift = tcg_const_i32(0);
3184
3185 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3186 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3187
3188 if (dp) {
3189 TCGv_i64 tcg_double, tcg_res;
3190 TCGv_i32 tcg_tmp;
3191 /* Rd is encoded as a single precision register even when the source
3192 * is double precision.
3193 */
3194 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3195 tcg_double = tcg_temp_new_i64();
3196 tcg_res = tcg_temp_new_i64();
3197 tcg_tmp = tcg_temp_new_i32();
3198 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3199 if (is_signed) {
3200 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3201 } else {
3202 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3203 }
ecc7b3aa 3204 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3205 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3206 tcg_temp_free_i32(tcg_tmp);
3207 tcg_temp_free_i64(tcg_res);
3208 tcg_temp_free_i64(tcg_double);
3209 } else {
3210 TCGv_i32 tcg_single, tcg_res;
3211 tcg_single = tcg_temp_new_i32();
3212 tcg_res = tcg_temp_new_i32();
3213 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3214 if (is_signed) {
3215 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3216 } else {
3217 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3218 }
3219 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3220 tcg_temp_free_i32(tcg_res);
3221 tcg_temp_free_i32(tcg_single);
3222 }
3223
3224 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3225 tcg_temp_free_i32(tcg_rmode);
3226
3227 tcg_temp_free_i32(tcg_shift);
3228
3229 tcg_temp_free_ptr(fpst);
3230
3231 return 0;
3232}
7655f39b
WN
3233
3234/* Table for converting the most common AArch32 encoding of
3235 * rounding mode to arm_fprounding order (which matches the
3236 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3237 */
3238static const uint8_t fp_decode_rm[] = {
3239 FPROUNDING_TIEAWAY,
3240 FPROUNDING_TIEEVEN,
3241 FPROUNDING_POSINF,
3242 FPROUNDING_NEGINF,
3243};
3244
7dcc1f89 3245static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3246{
3247 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3248
d614a513 3249 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3250 return 1;
3251 }
3252
3253 if (dp) {
3254 VFP_DREG_D(rd, insn);
3255 VFP_DREG_N(rn, insn);
3256 VFP_DREG_M(rm, insn);
3257 } else {
3258 rd = VFP_SREG_D(insn);
3259 rn = VFP_SREG_N(insn);
3260 rm = VFP_SREG_M(insn);
3261 }
3262
3263 if ((insn & 0x0f800e50) == 0x0e000a00) {
3264 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3265 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3266 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3267 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3268 /* VRINTA, VRINTN, VRINTP, VRINTM */
3269 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3270 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3271 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3272 /* VCVTA, VCVTN, VCVTP, VCVTM */
3273 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3274 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3275 }
3276 return 1;
3277}
3278
a1c7273b 3279/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3280 (ie. an undefined instruction). */
7dcc1f89 3281static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3282{
3283 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3284 int dp, veclen;
39d5492a
PM
3285 TCGv_i32 addr;
3286 TCGv_i32 tmp;
3287 TCGv_i32 tmp2;
b7bcbe95 3288
d614a513 3289 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3290 return 1;
d614a513 3291 }
40f137e1 3292
2c7ffc41
PM
3293 /* FIXME: this access check should not take precedence over UNDEF
3294 * for invalid encodings; we will generate incorrect syndrome information
3295 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3296 */
9dbbc748 3297 if (s->fp_excp_el) {
2c7ffc41 3298 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3299 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3300 return 0;
3301 }
3302
5df8bac1 3303 if (!s->vfp_enabled) {
9ee6e8bb 3304 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3305 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3306 return 1;
3307 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3308 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3309 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3310 return 1;
a50c0f51 3311 }
40f137e1 3312 }
6a57f3eb
WN
3313
3314 if (extract32(insn, 28, 4) == 0xf) {
3315 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3316 * only used in v8 and above.
3317 */
7dcc1f89 3318 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3319 }
3320
b7bcbe95
FB
3321 dp = ((insn & 0xf00) == 0xb00);
3322 switch ((insn >> 24) & 0xf) {
3323 case 0xe:
3324 if (insn & (1 << 4)) {
3325 /* single register transfer */
b7bcbe95
FB
3326 rd = (insn >> 12) & 0xf;
3327 if (dp) {
9ee6e8bb
PB
3328 int size;
3329 int pass;
3330
3331 VFP_DREG_N(rn, insn);
3332 if (insn & 0xf)
b7bcbe95 3333 return 1;
9ee6e8bb 3334 if (insn & 0x00c00060
d614a513 3335 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3336 return 1;
d614a513 3337 }
9ee6e8bb
PB
3338
3339 pass = (insn >> 21) & 1;
3340 if (insn & (1 << 22)) {
3341 size = 0;
3342 offset = ((insn >> 5) & 3) * 8;
3343 } else if (insn & (1 << 5)) {
3344 size = 1;
3345 offset = (insn & (1 << 6)) ? 16 : 0;
3346 } else {
3347 size = 2;
3348 offset = 0;
3349 }
18c9b560 3350 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3351 /* vfp->arm */
ad69471c 3352 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3353 switch (size) {
3354 case 0:
9ee6e8bb 3355 if (offset)
ad69471c 3356 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3357 if (insn & (1 << 23))
ad69471c 3358 gen_uxtb(tmp);
9ee6e8bb 3359 else
ad69471c 3360 gen_sxtb(tmp);
9ee6e8bb
PB
3361 break;
3362 case 1:
9ee6e8bb
PB
3363 if (insn & (1 << 23)) {
3364 if (offset) {
ad69471c 3365 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3366 } else {
ad69471c 3367 gen_uxth(tmp);
9ee6e8bb
PB
3368 }
3369 } else {
3370 if (offset) {
ad69471c 3371 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3372 } else {
ad69471c 3373 gen_sxth(tmp);
9ee6e8bb
PB
3374 }
3375 }
3376 break;
3377 case 2:
9ee6e8bb
PB
3378 break;
3379 }
ad69471c 3380 store_reg(s, rd, tmp);
b7bcbe95
FB
3381 } else {
3382 /* arm->vfp */
ad69471c 3383 tmp = load_reg(s, rd);
9ee6e8bb
PB
3384 if (insn & (1 << 23)) {
3385 /* VDUP */
3386 if (size == 0) {
ad69471c 3387 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3388 } else if (size == 1) {
ad69471c 3389 gen_neon_dup_low16(tmp);
9ee6e8bb 3390 }
cbbccffc 3391 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3392 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3393 tcg_gen_mov_i32(tmp2, tmp);
3394 neon_store_reg(rn, n, tmp2);
3395 }
3396 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3397 } else {
3398 /* VMOV */
3399 switch (size) {
3400 case 0:
ad69471c 3401 tmp2 = neon_load_reg(rn, pass);
d593c48e 3402 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3403 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3404 break;
3405 case 1:
ad69471c 3406 tmp2 = neon_load_reg(rn, pass);
d593c48e 3407 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3408 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3409 break;
3410 case 2:
9ee6e8bb
PB
3411 break;
3412 }
ad69471c 3413 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3414 }
b7bcbe95 3415 }
9ee6e8bb
PB
3416 } else { /* !dp */
3417 if ((insn & 0x6f) != 0x00)
3418 return 1;
3419 rn = VFP_SREG_N(insn);
18c9b560 3420 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3421 /* vfp->arm */
3422 if (insn & (1 << 21)) {
3423 /* system register */
40f137e1 3424 rn >>= 1;
9ee6e8bb 3425
b7bcbe95 3426 switch (rn) {
40f137e1 3427 case ARM_VFP_FPSID:
4373f3ce 3428 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3429 VFP3 restricts all id registers to privileged
3430 accesses. */
3431 if (IS_USER(s)
d614a513 3432 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3433 return 1;
d614a513 3434 }
4373f3ce 3435 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3436 break;
40f137e1 3437 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3438 if (IS_USER(s))
3439 return 1;
4373f3ce 3440 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3441 break;
40f137e1
PB
3442 case ARM_VFP_FPINST:
3443 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3444 /* Not present in VFP3. */
3445 if (IS_USER(s)
d614a513 3446 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3447 return 1;
d614a513 3448 }
4373f3ce 3449 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3450 break;
40f137e1 3451 case ARM_VFP_FPSCR:
601d70b9 3452 if (rd == 15) {
4373f3ce
PB
3453 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3454 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3455 } else {
7d1b0095 3456 tmp = tcg_temp_new_i32();
4373f3ce
PB
3457 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3458 }
b7bcbe95 3459 break;
a50c0f51 3460 case ARM_VFP_MVFR2:
d614a513 3461 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3462 return 1;
3463 }
3464 /* fall through */
9ee6e8bb
PB
3465 case ARM_VFP_MVFR0:
3466 case ARM_VFP_MVFR1:
3467 if (IS_USER(s)
d614a513 3468 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3469 return 1;
d614a513 3470 }
4373f3ce 3471 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3472 break;
b7bcbe95
FB
3473 default:
3474 return 1;
3475 }
3476 } else {
3477 gen_mov_F0_vreg(0, rn);
4373f3ce 3478 tmp = gen_vfp_mrs();
b7bcbe95
FB
3479 }
3480 if (rd == 15) {
b5ff1b31 3481 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3482 gen_set_nzcv(tmp);
7d1b0095 3483 tcg_temp_free_i32(tmp);
4373f3ce
PB
3484 } else {
3485 store_reg(s, rd, tmp);
3486 }
b7bcbe95
FB
3487 } else {
3488 /* arm->vfp */
b7bcbe95 3489 if (insn & (1 << 21)) {
40f137e1 3490 rn >>= 1;
b7bcbe95
FB
3491 /* system register */
3492 switch (rn) {
40f137e1 3493 case ARM_VFP_FPSID:
9ee6e8bb
PB
3494 case ARM_VFP_MVFR0:
3495 case ARM_VFP_MVFR1:
b7bcbe95
FB
3496 /* Writes are ignored. */
3497 break;
40f137e1 3498 case ARM_VFP_FPSCR:
e4c1cfa5 3499 tmp = load_reg(s, rd);
4373f3ce 3500 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3501 tcg_temp_free_i32(tmp);
b5ff1b31 3502 gen_lookup_tb(s);
b7bcbe95 3503 break;
40f137e1 3504 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3505 if (IS_USER(s))
3506 return 1;
71b3c3de
JR
3507 /* TODO: VFP subarchitecture support.
3508 * For now, keep the EN bit only */
e4c1cfa5 3509 tmp = load_reg(s, rd);
71b3c3de 3510 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3511 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3512 gen_lookup_tb(s);
3513 break;
3514 case ARM_VFP_FPINST:
3515 case ARM_VFP_FPINST2:
23adb861
PM
3516 if (IS_USER(s)) {
3517 return 1;
3518 }
e4c1cfa5 3519 tmp = load_reg(s, rd);
4373f3ce 3520 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3521 break;
b7bcbe95
FB
3522 default:
3523 return 1;
3524 }
3525 } else {
e4c1cfa5 3526 tmp = load_reg(s, rd);
4373f3ce 3527 gen_vfp_msr(tmp);
b7bcbe95
FB
3528 gen_mov_vreg_F0(0, rn);
3529 }
3530 }
3531 }
3532 } else {
3533 /* data processing */
3534 /* The opcode is in bits 23, 21, 20 and 6. */
3535 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3536 if (dp) {
3537 if (op == 15) {
3538 /* rn is opcode */
3539 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3540 } else {
3541 /* rn is register number */
9ee6e8bb 3542 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3543 }
3544
239c20c7
WN
3545 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3546 ((rn & 0x1e) == 0x6))) {
3547 /* Integer or single/half precision destination. */
9ee6e8bb 3548 rd = VFP_SREG_D(insn);
b7bcbe95 3549 } else {
9ee6e8bb 3550 VFP_DREG_D(rd, insn);
b7bcbe95 3551 }
04595bf6 3552 if (op == 15 &&
239c20c7
WN
3553 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3554 ((rn & 0x1e) == 0x4))) {
3555 /* VCVT from int or half precision is always from S reg
3556 * regardless of dp bit. VCVT with immediate frac_bits
3557 * has same format as SREG_M.
04595bf6
PM
3558 */
3559 rm = VFP_SREG_M(insn);
b7bcbe95 3560 } else {
9ee6e8bb 3561 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3562 }
3563 } else {
9ee6e8bb 3564 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3565 if (op == 15 && rn == 15) {
3566 /* Double precision destination. */
9ee6e8bb
PB
3567 VFP_DREG_D(rd, insn);
3568 } else {
3569 rd = VFP_SREG_D(insn);
3570 }
04595bf6
PM
3571 /* NB that we implicitly rely on the encoding for the frac_bits
3572 * in VCVT of fixed to float being the same as that of an SREG_M
3573 */
9ee6e8bb 3574 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3575 }
3576
69d1fc22 3577 veclen = s->vec_len;
b7bcbe95
FB
3578 if (op == 15 && rn > 3)
3579 veclen = 0;
3580
3581 /* Shut up compiler warnings. */
3582 delta_m = 0;
3583 delta_d = 0;
3584 bank_mask = 0;
3b46e624 3585
b7bcbe95
FB
3586 if (veclen > 0) {
3587 if (dp)
3588 bank_mask = 0xc;
3589 else
3590 bank_mask = 0x18;
3591
3592 /* Figure out what type of vector operation this is. */
3593 if ((rd & bank_mask) == 0) {
3594 /* scalar */
3595 veclen = 0;
3596 } else {
3597 if (dp)
69d1fc22 3598 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3599 else
69d1fc22 3600 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3601
3602 if ((rm & bank_mask) == 0) {
3603 /* mixed scalar/vector */
3604 delta_m = 0;
3605 } else {
3606 /* vector */
3607 delta_m = delta_d;
3608 }
3609 }
3610 }
3611
3612 /* Load the initial operands. */
3613 if (op == 15) {
3614 switch (rn) {
3615 case 16:
3616 case 17:
3617 /* Integer source */
3618 gen_mov_F0_vreg(0, rm);
3619 break;
3620 case 8:
3621 case 9:
3622 /* Compare */
3623 gen_mov_F0_vreg(dp, rd);
3624 gen_mov_F1_vreg(dp, rm);
3625 break;
3626 case 10:
3627 case 11:
3628 /* Compare with zero */
3629 gen_mov_F0_vreg(dp, rd);
3630 gen_vfp_F1_ld0(dp);
3631 break;
9ee6e8bb
PB
3632 case 20:
3633 case 21:
3634 case 22:
3635 case 23:
644ad806
PB
3636 case 28:
3637 case 29:
3638 case 30:
3639 case 31:
9ee6e8bb
PB
3640 /* Source and destination the same. */
3641 gen_mov_F0_vreg(dp, rd);
3642 break;
6e0c0ed1
PM
3643 case 4:
3644 case 5:
3645 case 6:
3646 case 7:
239c20c7
WN
3647 /* VCVTB, VCVTT: only present with the halfprec extension
3648 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3649 * (we choose to UNDEF)
6e0c0ed1 3650 */
d614a513
PM
3651 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3652 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3653 return 1;
3654 }
239c20c7
WN
3655 if (!extract32(rn, 1, 1)) {
3656 /* Half precision source. */
3657 gen_mov_F0_vreg(0, rm);
3658 break;
3659 }
6e0c0ed1 3660 /* Otherwise fall through */
b7bcbe95
FB
3661 default:
3662 /* One source operand. */
3663 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3664 break;
b7bcbe95
FB
3665 }
3666 } else {
3667 /* Two source operands. */
3668 gen_mov_F0_vreg(dp, rn);
3669 gen_mov_F1_vreg(dp, rm);
3670 }
3671
3672 for (;;) {
3673 /* Perform the calculation. */
3674 switch (op) {
605a6aed
PM
3675 case 0: /* VMLA: fd + (fn * fm) */
3676 /* Note that order of inputs to the add matters for NaNs */
3677 gen_vfp_F1_mul(dp);
3678 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3679 gen_vfp_add(dp);
3680 break;
605a6aed 3681 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3682 gen_vfp_mul(dp);
605a6aed
PM
3683 gen_vfp_F1_neg(dp);
3684 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3685 gen_vfp_add(dp);
3686 break;
605a6aed
PM
3687 case 2: /* VNMLS: -fd + (fn * fm) */
3688 /* Note that it isn't valid to replace (-A + B) with (B - A)
3689 * or similar plausible looking simplifications
3690 * because this will give wrong results for NaNs.
3691 */
3692 gen_vfp_F1_mul(dp);
3693 gen_mov_F0_vreg(dp, rd);
3694 gen_vfp_neg(dp);
3695 gen_vfp_add(dp);
b7bcbe95 3696 break;
605a6aed 3697 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3698 gen_vfp_mul(dp);
605a6aed
PM
3699 gen_vfp_F1_neg(dp);
3700 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3701 gen_vfp_neg(dp);
605a6aed 3702 gen_vfp_add(dp);
b7bcbe95
FB
3703 break;
3704 case 4: /* mul: fn * fm */
3705 gen_vfp_mul(dp);
3706 break;
3707 case 5: /* nmul: -(fn * fm) */
3708 gen_vfp_mul(dp);
3709 gen_vfp_neg(dp);
3710 break;
3711 case 6: /* add: fn + fm */
3712 gen_vfp_add(dp);
3713 break;
3714 case 7: /* sub: fn - fm */
3715 gen_vfp_sub(dp);
3716 break;
3717 case 8: /* div: fn / fm */
3718 gen_vfp_div(dp);
3719 break;
da97f52c
PM
3720 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3721 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3722 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3723 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3724 /* These are fused multiply-add, and must be done as one
3725 * floating point operation with no rounding between the
3726 * multiplication and addition steps.
3727 * NB that doing the negations here as separate steps is
3728 * correct : an input NaN should come out with its sign bit
3729 * flipped if it is a negated-input.
3730 */
d614a513 3731 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3732 return 1;
3733 }
3734 if (dp) {
3735 TCGv_ptr fpst;
3736 TCGv_i64 frd;
3737 if (op & 1) {
3738 /* VFNMS, VFMS */
3739 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3740 }
3741 frd = tcg_temp_new_i64();
3742 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3743 if (op & 2) {
3744 /* VFNMA, VFNMS */
3745 gen_helper_vfp_negd(frd, frd);
3746 }
3747 fpst = get_fpstatus_ptr(0);
3748 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3749 cpu_F1d, frd, fpst);
3750 tcg_temp_free_ptr(fpst);
3751 tcg_temp_free_i64(frd);
3752 } else {
3753 TCGv_ptr fpst;
3754 TCGv_i32 frd;
3755 if (op & 1) {
3756 /* VFNMS, VFMS */
3757 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3758 }
3759 frd = tcg_temp_new_i32();
3760 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3761 if (op & 2) {
3762 gen_helper_vfp_negs(frd, frd);
3763 }
3764 fpst = get_fpstatus_ptr(0);
3765 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3766 cpu_F1s, frd, fpst);
3767 tcg_temp_free_ptr(fpst);
3768 tcg_temp_free_i32(frd);
3769 }
3770 break;
9ee6e8bb 3771 case 14: /* fconst */
d614a513
PM
3772 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3773 return 1;
3774 }
9ee6e8bb
PB
3775
3776 n = (insn << 12) & 0x80000000;
3777 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3778 if (dp) {
3779 if (i & 0x40)
3780 i |= 0x3f80;
3781 else
3782 i |= 0x4000;
3783 n |= i << 16;
4373f3ce 3784 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3785 } else {
3786 if (i & 0x40)
3787 i |= 0x780;
3788 else
3789 i |= 0x800;
3790 n |= i << 19;
5b340b51 3791 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3792 }
9ee6e8bb 3793 break;
b7bcbe95
FB
3794 case 15: /* extension space */
3795 switch (rn) {
3796 case 0: /* cpy */
3797 /* no-op */
3798 break;
3799 case 1: /* abs */
3800 gen_vfp_abs(dp);
3801 break;
3802 case 2: /* neg */
3803 gen_vfp_neg(dp);
3804 break;
3805 case 3: /* sqrt */
3806 gen_vfp_sqrt(dp);
3807 break;
239c20c7 3808 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3809 tmp = gen_vfp_mrs();
3810 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3811 if (dp) {
3812 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3813 cpu_env);
3814 } else {
3815 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3816 cpu_env);
3817 }
7d1b0095 3818 tcg_temp_free_i32(tmp);
60011498 3819 break;
239c20c7 3820 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3821 tmp = gen_vfp_mrs();
3822 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3823 if (dp) {
3824 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3825 cpu_env);
3826 } else {
3827 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3828 cpu_env);
3829 }
7d1b0095 3830 tcg_temp_free_i32(tmp);
60011498 3831 break;
239c20c7 3832 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3833 tmp = tcg_temp_new_i32();
239c20c7
WN
3834 if (dp) {
3835 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3836 cpu_env);
3837 } else {
3838 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3839 cpu_env);
3840 }
60011498
PB
3841 gen_mov_F0_vreg(0, rd);
3842 tmp2 = gen_vfp_mrs();
3843 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3844 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3845 tcg_temp_free_i32(tmp2);
60011498
PB
3846 gen_vfp_msr(tmp);
3847 break;
239c20c7 3848 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3849 tmp = tcg_temp_new_i32();
239c20c7
WN
3850 if (dp) {
3851 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3852 cpu_env);
3853 } else {
3854 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3855 cpu_env);
3856 }
60011498
PB
3857 tcg_gen_shli_i32(tmp, tmp, 16);
3858 gen_mov_F0_vreg(0, rd);
3859 tmp2 = gen_vfp_mrs();
3860 tcg_gen_ext16u_i32(tmp2, tmp2);
3861 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3862 tcg_temp_free_i32(tmp2);
60011498
PB
3863 gen_vfp_msr(tmp);
3864 break;
b7bcbe95
FB
3865 case 8: /* cmp */
3866 gen_vfp_cmp(dp);
3867 break;
3868 case 9: /* cmpe */
3869 gen_vfp_cmpe(dp);
3870 break;
3871 case 10: /* cmpz */
3872 gen_vfp_cmp(dp);
3873 break;
3874 case 11: /* cmpez */
3875 gen_vfp_F1_ld0(dp);
3876 gen_vfp_cmpe(dp);
3877 break;
664c6733
WN
3878 case 12: /* vrintr */
3879 {
3880 TCGv_ptr fpst = get_fpstatus_ptr(0);
3881 if (dp) {
3882 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3883 } else {
3884 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3885 }
3886 tcg_temp_free_ptr(fpst);
3887 break;
3888 }
a290c62a
WN
3889 case 13: /* vrintz */
3890 {
3891 TCGv_ptr fpst = get_fpstatus_ptr(0);
3892 TCGv_i32 tcg_rmode;
3893 tcg_rmode = tcg_const_i32(float_round_to_zero);
3894 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3895 if (dp) {
3896 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3897 } else {
3898 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3899 }
3900 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3901 tcg_temp_free_i32(tcg_rmode);
3902 tcg_temp_free_ptr(fpst);
3903 break;
3904 }
4e82bc01
WN
3905 case 14: /* vrintx */
3906 {
3907 TCGv_ptr fpst = get_fpstatus_ptr(0);
3908 if (dp) {
3909 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3910 } else {
3911 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3912 }
3913 tcg_temp_free_ptr(fpst);
3914 break;
3915 }
b7bcbe95
FB
3916 case 15: /* single<->double conversion */
3917 if (dp)
4373f3ce 3918 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3919 else
4373f3ce 3920 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3921 break;
3922 case 16: /* fuito */
5500b06c 3923 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3924 break;
3925 case 17: /* fsito */
5500b06c 3926 gen_vfp_sito(dp, 0);
b7bcbe95 3927 break;
9ee6e8bb 3928 case 20: /* fshto */
d614a513
PM
3929 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3930 return 1;
3931 }
5500b06c 3932 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3933 break;
3934 case 21: /* fslto */
d614a513
PM
3935 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3936 return 1;
3937 }
5500b06c 3938 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3939 break;
3940 case 22: /* fuhto */
d614a513
PM
3941 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3942 return 1;
3943 }
5500b06c 3944 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3945 break;
3946 case 23: /* fulto */
d614a513
PM
3947 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3948 return 1;
3949 }
5500b06c 3950 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3951 break;
b7bcbe95 3952 case 24: /* ftoui */
5500b06c 3953 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3954 break;
3955 case 25: /* ftouiz */
5500b06c 3956 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3957 break;
3958 case 26: /* ftosi */
5500b06c 3959 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3960 break;
3961 case 27: /* ftosiz */
5500b06c 3962 gen_vfp_tosiz(dp, 0);
b7bcbe95 3963 break;
9ee6e8bb 3964 case 28: /* ftosh */
d614a513
PM
3965 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3966 return 1;
3967 }
5500b06c 3968 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3969 break;
3970 case 29: /* ftosl */
d614a513
PM
3971 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3972 return 1;
3973 }
5500b06c 3974 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3975 break;
3976 case 30: /* ftouh */
d614a513
PM
3977 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3978 return 1;
3979 }
5500b06c 3980 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3981 break;
3982 case 31: /* ftoul */
d614a513
PM
3983 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3984 return 1;
3985 }
5500b06c 3986 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3987 break;
b7bcbe95 3988 default: /* undefined */
b7bcbe95
FB
3989 return 1;
3990 }
3991 break;
3992 default: /* undefined */
b7bcbe95
FB
3993 return 1;
3994 }
3995
3996 /* Write back the result. */
239c20c7
WN
3997 if (op == 15 && (rn >= 8 && rn <= 11)) {
3998 /* Comparison, do nothing. */
3999 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
4000 (rn & 0x1e) == 0x6)) {
4001 /* VCVT double to int: always integer result.
4002 * VCVT double to half precision is always a single
4003 * precision result.
4004 */
b7bcbe95 4005 gen_mov_vreg_F0(0, rd);
239c20c7 4006 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
4007 /* conversion */
4008 gen_mov_vreg_F0(!dp, rd);
239c20c7 4009 } else {
b7bcbe95 4010 gen_mov_vreg_F0(dp, rd);
239c20c7 4011 }
b7bcbe95
FB
4012
4013 /* break out of the loop if we have finished */
4014 if (veclen == 0)
4015 break;
4016
4017 if (op == 15 && delta_m == 0) {
4018 /* single source one-many */
4019 while (veclen--) {
4020 rd = ((rd + delta_d) & (bank_mask - 1))
4021 | (rd & bank_mask);
4022 gen_mov_vreg_F0(dp, rd);
4023 }
4024 break;
4025 }
4026 /* Setup the next operands. */
4027 veclen--;
4028 rd = ((rd + delta_d) & (bank_mask - 1))
4029 | (rd & bank_mask);
4030
4031 if (op == 15) {
4032 /* One source operand. */
4033 rm = ((rm + delta_m) & (bank_mask - 1))
4034 | (rm & bank_mask);
4035 gen_mov_F0_vreg(dp, rm);
4036 } else {
4037 /* Two source operands. */
4038 rn = ((rn + delta_d) & (bank_mask - 1))
4039 | (rn & bank_mask);
4040 gen_mov_F0_vreg(dp, rn);
4041 if (delta_m) {
4042 rm = ((rm + delta_m) & (bank_mask - 1))
4043 | (rm & bank_mask);
4044 gen_mov_F1_vreg(dp, rm);
4045 }
4046 }
4047 }
4048 }
4049 break;
4050 case 0xc:
4051 case 0xd:
8387da81 4052 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
4053 /* two-register transfer */
4054 rn = (insn >> 16) & 0xf;
4055 rd = (insn >> 12) & 0xf;
4056 if (dp) {
9ee6e8bb
PB
4057 VFP_DREG_M(rm, insn);
4058 } else {
4059 rm = VFP_SREG_M(insn);
4060 }
b7bcbe95 4061
18c9b560 4062 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4063 /* vfp->arm */
4064 if (dp) {
4373f3ce
PB
4065 gen_mov_F0_vreg(0, rm * 2);
4066 tmp = gen_vfp_mrs();
4067 store_reg(s, rd, tmp);
4068 gen_mov_F0_vreg(0, rm * 2 + 1);
4069 tmp = gen_vfp_mrs();
4070 store_reg(s, rn, tmp);
b7bcbe95
FB
4071 } else {
4072 gen_mov_F0_vreg(0, rm);
4373f3ce 4073 tmp = gen_vfp_mrs();
8387da81 4074 store_reg(s, rd, tmp);
b7bcbe95 4075 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4076 tmp = gen_vfp_mrs();
8387da81 4077 store_reg(s, rn, tmp);
b7bcbe95
FB
4078 }
4079 } else {
4080 /* arm->vfp */
4081 if (dp) {
4373f3ce
PB
4082 tmp = load_reg(s, rd);
4083 gen_vfp_msr(tmp);
4084 gen_mov_vreg_F0(0, rm * 2);
4085 tmp = load_reg(s, rn);
4086 gen_vfp_msr(tmp);
4087 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4088 } else {
8387da81 4089 tmp = load_reg(s, rd);
4373f3ce 4090 gen_vfp_msr(tmp);
b7bcbe95 4091 gen_mov_vreg_F0(0, rm);
8387da81 4092 tmp = load_reg(s, rn);
4373f3ce 4093 gen_vfp_msr(tmp);
b7bcbe95
FB
4094 gen_mov_vreg_F0(0, rm + 1);
4095 }
4096 }
4097 } else {
4098 /* Load/store */
4099 rn = (insn >> 16) & 0xf;
4100 if (dp)
9ee6e8bb 4101 VFP_DREG_D(rd, insn);
b7bcbe95 4102 else
9ee6e8bb 4103 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4104 if ((insn & 0x01200000) == 0x01000000) {
4105 /* Single load/store */
4106 offset = (insn & 0xff) << 2;
4107 if ((insn & (1 << 23)) == 0)
4108 offset = -offset;
934814f1
PM
4109 if (s->thumb && rn == 15) {
4110 /* This is actually UNPREDICTABLE */
4111 addr = tcg_temp_new_i32();
4112 tcg_gen_movi_i32(addr, s->pc & ~2);
4113 } else {
4114 addr = load_reg(s, rn);
4115 }
312eea9f 4116 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4117 if (insn & (1 << 20)) {
312eea9f 4118 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4119 gen_mov_vreg_F0(dp, rd);
4120 } else {
4121 gen_mov_F0_vreg(dp, rd);
312eea9f 4122 gen_vfp_st(s, dp, addr);
b7bcbe95 4123 }
7d1b0095 4124 tcg_temp_free_i32(addr);
b7bcbe95
FB
4125 } else {
4126 /* load/store multiple */
934814f1 4127 int w = insn & (1 << 21);
b7bcbe95
FB
4128 if (dp)
4129 n = (insn >> 1) & 0x7f;
4130 else
4131 n = insn & 0xff;
4132
934814f1
PM
4133 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4134 /* P == U , W == 1 => UNDEF */
4135 return 1;
4136 }
4137 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4138 /* UNPREDICTABLE cases for bad immediates: we choose to
4139 * UNDEF to avoid generating huge numbers of TCG ops
4140 */
4141 return 1;
4142 }
4143 if (rn == 15 && w) {
4144 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4145 return 1;
4146 }
4147
4148 if (s->thumb && rn == 15) {
4149 /* This is actually UNPREDICTABLE */
4150 addr = tcg_temp_new_i32();
4151 tcg_gen_movi_i32(addr, s->pc & ~2);
4152 } else {
4153 addr = load_reg(s, rn);
4154 }
b7bcbe95 4155 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4156 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4157
4158 if (dp)
4159 offset = 8;
4160 else
4161 offset = 4;
4162 for (i = 0; i < n; i++) {
18c9b560 4163 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4164 /* load */
312eea9f 4165 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4166 gen_mov_vreg_F0(dp, rd + i);
4167 } else {
4168 /* store */
4169 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4170 gen_vfp_st(s, dp, addr);
b7bcbe95 4171 }
312eea9f 4172 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4173 }
934814f1 4174 if (w) {
b7bcbe95
FB
4175 /* writeback */
4176 if (insn & (1 << 24))
4177 offset = -offset * n;
4178 else if (dp && (insn & 1))
4179 offset = 4;
4180 else
4181 offset = 0;
4182
4183 if (offset != 0)
312eea9f
FN
4184 tcg_gen_addi_i32(addr, addr, offset);
4185 store_reg(s, rn, addr);
4186 } else {
7d1b0095 4187 tcg_temp_free_i32(addr);
b7bcbe95
FB
4188 }
4189 }
4190 }
4191 break;
4192 default:
4193 /* Should never happen. */
4194 return 1;
4195 }
4196 return 0;
4197}
4198
90aa39a1 4199static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4200{
90aa39a1 4201#ifndef CONFIG_USER_ONLY
dcba3a8d 4202 return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
90aa39a1
SF
4203 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4204#else
4205 return true;
4206#endif
4207}
6e256c93 4208
8a6b28c7
EC
4209static void gen_goto_ptr(void)
4210{
7f11636d 4211 tcg_gen_lookup_and_goto_ptr();
8a6b28c7
EC
4212}
4213
4cae8f56
AB
4214/* This will end the TB but doesn't guarantee we'll return to
4215 * cpu_loop_exec. Any live exit_requests will be processed as we
4216 * enter the next TB.
4217 */
8a6b28c7 4218static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4219{
4220 if (use_goto_tb(s, dest)) {
57fec1fe 4221 tcg_gen_goto_tb(n);
eaed129d 4222 gen_set_pc_im(s, dest);
dcba3a8d 4223 tcg_gen_exit_tb((uintptr_t)s->base.tb + n);
6e256c93 4224 } else {
eaed129d 4225 gen_set_pc_im(s, dest);
8a6b28c7 4226 gen_goto_ptr();
6e256c93 4227 }
dcba3a8d 4228 s->base.is_jmp = DISAS_NORETURN;
c53be334
FB
4229}
4230
8aaca4c0
FB
4231static inline void gen_jmp (DisasContext *s, uint32_t dest)
4232{
b636649f 4233 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4234 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4235 if (s->thumb)
d9ba4830
PB
4236 dest |= 1;
4237 gen_bx_im(s, dest);
8aaca4c0 4238 } else {
6e256c93 4239 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4240 }
4241}
4242
39d5492a 4243static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4244{
ee097184 4245 if (x)
d9ba4830 4246 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4247 else
d9ba4830 4248 gen_sxth(t0);
ee097184 4249 if (y)
d9ba4830 4250 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4251 else
d9ba4830
PB
4252 gen_sxth(t1);
4253 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4254}
4255
4256/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4257static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4258{
b5ff1b31
FB
4259 uint32_t mask;
4260
4261 mask = 0;
4262 if (flags & (1 << 0))
4263 mask |= 0xff;
4264 if (flags & (1 << 1))
4265 mask |= 0xff00;
4266 if (flags & (1 << 2))
4267 mask |= 0xff0000;
4268 if (flags & (1 << 3))
4269 mask |= 0xff000000;
9ee6e8bb 4270
2ae23e75 4271 /* Mask out undefined bits. */
9ee6e8bb 4272 mask &= ~CPSR_RESERVED;
d614a513 4273 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4274 mask &= ~CPSR_T;
d614a513
PM
4275 }
4276 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4277 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4278 }
4279 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4280 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4281 }
4282 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4283 mask &= ~CPSR_IT;
d614a513 4284 }
4051e12c
PM
4285 /* Mask out execution state and reserved bits. */
4286 if (!spsr) {
4287 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4288 }
b5ff1b31
FB
4289 /* Mask out privileged bits. */
4290 if (IS_USER(s))
9ee6e8bb 4291 mask &= CPSR_USER;
b5ff1b31
FB
4292 return mask;
4293}
4294
2fbac54b 4295/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4296static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4297{
39d5492a 4298 TCGv_i32 tmp;
b5ff1b31
FB
4299 if (spsr) {
4300 /* ??? This is also undefined in system mode. */
4301 if (IS_USER(s))
4302 return 1;
d9ba4830
PB
4303
4304 tmp = load_cpu_field(spsr);
4305 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4306 tcg_gen_andi_i32(t0, t0, mask);
4307 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4308 store_cpu_field(tmp, spsr);
b5ff1b31 4309 } else {
2fbac54b 4310 gen_set_cpsr(t0, mask);
b5ff1b31 4311 }
7d1b0095 4312 tcg_temp_free_i32(t0);
b5ff1b31
FB
4313 gen_lookup_tb(s);
4314 return 0;
4315}
4316
2fbac54b
FN
4317/* Returns nonzero if access to the PSR is not permitted. */
4318static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4319{
39d5492a 4320 TCGv_i32 tmp;
7d1b0095 4321 tmp = tcg_temp_new_i32();
2fbac54b
FN
4322 tcg_gen_movi_i32(tmp, val);
4323 return gen_set_psr(s, mask, spsr, tmp);
4324}
4325
8bfd0550
PM
4326static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4327 int *tgtmode, int *regno)
4328{
4329 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4330 * the target mode and register number, and identify the various
4331 * unpredictable cases.
4332 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4333 * + executed in user mode
4334 * + using R15 as the src/dest register
4335 * + accessing an unimplemented register
4336 * + accessing a register that's inaccessible at current PL/security state*
4337 * + accessing a register that you could access with a different insn
4338 * We choose to UNDEF in all these cases.
4339 * Since we don't know which of the various AArch32 modes we are in
4340 * we have to defer some checks to runtime.
4341 * Accesses to Monitor mode registers from Secure EL1 (which implies
4342 * that EL3 is AArch64) must trap to EL3.
4343 *
4344 * If the access checks fail this function will emit code to take
4345 * an exception and return false. Otherwise it will return true,
4346 * and set *tgtmode and *regno appropriately.
4347 */
4348 int exc_target = default_exception_el(s);
4349
4350 /* These instructions are present only in ARMv8, or in ARMv7 with the
4351 * Virtualization Extensions.
4352 */
4353 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4354 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4355 goto undef;
4356 }
4357
4358 if (IS_USER(s) || rn == 15) {
4359 goto undef;
4360 }
4361
4362 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4363 * of registers into (r, sysm).
4364 */
4365 if (r) {
4366 /* SPSRs for other modes */
4367 switch (sysm) {
4368 case 0xe: /* SPSR_fiq */
4369 *tgtmode = ARM_CPU_MODE_FIQ;
4370 break;
4371 case 0x10: /* SPSR_irq */
4372 *tgtmode = ARM_CPU_MODE_IRQ;
4373 break;
4374 case 0x12: /* SPSR_svc */
4375 *tgtmode = ARM_CPU_MODE_SVC;
4376 break;
4377 case 0x14: /* SPSR_abt */
4378 *tgtmode = ARM_CPU_MODE_ABT;
4379 break;
4380 case 0x16: /* SPSR_und */
4381 *tgtmode = ARM_CPU_MODE_UND;
4382 break;
4383 case 0x1c: /* SPSR_mon */
4384 *tgtmode = ARM_CPU_MODE_MON;
4385 break;
4386 case 0x1e: /* SPSR_hyp */
4387 *tgtmode = ARM_CPU_MODE_HYP;
4388 break;
4389 default: /* unallocated */
4390 goto undef;
4391 }
4392 /* We arbitrarily assign SPSR a register number of 16. */
4393 *regno = 16;
4394 } else {
4395 /* general purpose registers for other modes */
4396 switch (sysm) {
4397 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4398 *tgtmode = ARM_CPU_MODE_USR;
4399 *regno = sysm + 8;
4400 break;
4401 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4402 *tgtmode = ARM_CPU_MODE_FIQ;
4403 *regno = sysm;
4404 break;
4405 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4406 *tgtmode = ARM_CPU_MODE_IRQ;
4407 *regno = sysm & 1 ? 13 : 14;
4408 break;
4409 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4410 *tgtmode = ARM_CPU_MODE_SVC;
4411 *regno = sysm & 1 ? 13 : 14;
4412 break;
4413 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4414 *tgtmode = ARM_CPU_MODE_ABT;
4415 *regno = sysm & 1 ? 13 : 14;
4416 break;
4417 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4418 *tgtmode = ARM_CPU_MODE_UND;
4419 *regno = sysm & 1 ? 13 : 14;
4420 break;
4421 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4422 *tgtmode = ARM_CPU_MODE_MON;
4423 *regno = sysm & 1 ? 13 : 14;
4424 break;
4425 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4426 *tgtmode = ARM_CPU_MODE_HYP;
4427 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4428 *regno = sysm & 1 ? 13 : 17;
4429 break;
4430 default: /* unallocated */
4431 goto undef;
4432 }
4433 }
4434
4435 /* Catch the 'accessing inaccessible register' cases we can detect
4436 * at translate time.
4437 */
4438 switch (*tgtmode) {
4439 case ARM_CPU_MODE_MON:
4440 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4441 goto undef;
4442 }
4443 if (s->current_el == 1) {
4444 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4445 * then accesses to Mon registers trap to EL3
4446 */
4447 exc_target = 3;
4448 goto undef;
4449 }
4450 break;
4451 case ARM_CPU_MODE_HYP:
4452 /* Note that we can forbid accesses from EL2 here because they
4453 * must be from Hyp mode itself
4454 */
4455 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4456 goto undef;
4457 }
4458 break;
4459 default:
4460 break;
4461 }
4462
4463 return true;
4464
4465undef:
4466 /* If we get here then some access check did not pass */
4467 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4468 return false;
4469}
4470
4471static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4472{
4473 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4474 int tgtmode = 0, regno = 0;
4475
4476 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4477 return;
4478 }
4479
4480 /* Sync state because msr_banked() can raise exceptions */
4481 gen_set_condexec(s);
4482 gen_set_pc_im(s, s->pc - 4);
4483 tcg_reg = load_reg(s, rn);
4484 tcg_tgtmode = tcg_const_i32(tgtmode);
4485 tcg_regno = tcg_const_i32(regno);
4486 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4487 tcg_temp_free_i32(tcg_tgtmode);
4488 tcg_temp_free_i32(tcg_regno);
4489 tcg_temp_free_i32(tcg_reg);
dcba3a8d 4490 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4491}
4492
4493static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4494{
4495 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4496 int tgtmode = 0, regno = 0;
4497
4498 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4499 return;
4500 }
4501
4502 /* Sync state because mrs_banked() can raise exceptions */
4503 gen_set_condexec(s);
4504 gen_set_pc_im(s, s->pc - 4);
4505 tcg_reg = tcg_temp_new_i32();
4506 tcg_tgtmode = tcg_const_i32(tgtmode);
4507 tcg_regno = tcg_const_i32(regno);
4508 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4509 tcg_temp_free_i32(tcg_tgtmode);
4510 tcg_temp_free_i32(tcg_regno);
4511 store_reg(s, rn, tcg_reg);
dcba3a8d 4512 s->base.is_jmp = DISAS_UPDATE;
8bfd0550
PM
4513}
4514
fb0e8e79
PM
4515/* Store value to PC as for an exception return (ie don't
4516 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4517 * will do the masking based on the new value of the Thumb bit.
4518 */
4519static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4520{
fb0e8e79
PM
4521 tcg_gen_mov_i32(cpu_R[15], pc);
4522 tcg_temp_free_i32(pc);
b5ff1b31
FB
4523}
4524
b0109805 4525/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4526static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4527{
fb0e8e79
PM
4528 store_pc_exc_ret(s, pc);
4529 /* The cpsr_write_eret helper will mask the low bits of PC
4530 * appropriately depending on the new Thumb bit, so it must
4531 * be called after storing the new PC.
4532 */
235ea1f5 4533 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4534 tcg_temp_free_i32(cpsr);
b29fd33d 4535 /* Must exit loop to check un-masked IRQs */
dcba3a8d 4536 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb 4537}
3b46e624 4538
fb0e8e79
PM
4539/* Generate an old-style exception return. Marks pc as dead. */
4540static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4541{
4542 gen_rfe(s, pc, load_cpu_field(spsr));
4543}
4544
c22edfeb
AB
4545/*
4546 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4547 * only call the helper when running single threaded TCG code to ensure
4548 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4549 * just skip this instruction. Currently the SEV/SEVL instructions
4550 * which are *one* of many ways to wake the CPU from WFE are not
4551 * implemented so we can't sleep like WFI does.
4552 */
9ee6e8bb
PB
4553static void gen_nop_hint(DisasContext *s, int val)
4554{
4555 switch (val) {
2399d4e7
EC
4556 /* When running in MTTCG we don't generate jumps to the yield and
4557 * WFE helpers as it won't affect the scheduling of other vCPUs.
4558 * If we wanted to more completely model WFE/SEV so we don't busy
4559 * spin unnecessarily we would need to do something more involved.
4560 */
c87e5a61 4561 case 1: /* yield */
2399d4e7 4562 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4563 gen_set_pc_im(s, s->pc);
dcba3a8d 4564 s->base.is_jmp = DISAS_YIELD;
c22edfeb 4565 }
c87e5a61 4566 break;
9ee6e8bb 4567 case 3: /* wfi */
eaed129d 4568 gen_set_pc_im(s, s->pc);
dcba3a8d 4569 s->base.is_jmp = DISAS_WFI;
9ee6e8bb
PB
4570 break;
4571 case 2: /* wfe */
2399d4e7 4572 if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
c22edfeb 4573 gen_set_pc_im(s, s->pc);
dcba3a8d 4574 s->base.is_jmp = DISAS_WFE;
c22edfeb 4575 }
72c1d3af 4576 break;
9ee6e8bb 4577 case 4: /* sev */
12b10571
MR
4578 case 5: /* sevl */
4579 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4580 default: /* nop */
4581 break;
4582 }
4583}
99c475ab 4584
ad69471c 4585#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4586
39d5492a 4587static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4588{
4589 switch (size) {
dd8fbd78
FN
4590 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4591 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4592 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4593 default: abort();
9ee6e8bb 4594 }
9ee6e8bb
PB
4595}
4596
39d5492a 4597static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4598{
4599 switch (size) {
dd8fbd78
FN
4600 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4601 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4602 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4603 default: return;
4604 }
4605}
4606
4607/* 32-bit pairwise ops end up the same as the elementwise versions. */
4608#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4609#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4610#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4611#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4612
ad69471c
PB
4613#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4614 switch ((size << 1) | u) { \
4615 case 0: \
dd8fbd78 4616 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4617 break; \
4618 case 1: \
dd8fbd78 4619 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4620 break; \
4621 case 2: \
dd8fbd78 4622 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4623 break; \
4624 case 3: \
dd8fbd78 4625 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4626 break; \
4627 case 4: \
dd8fbd78 4628 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4629 break; \
4630 case 5: \
dd8fbd78 4631 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4632 break; \
4633 default: return 1; \
4634 }} while (0)
9ee6e8bb
PB
4635
4636#define GEN_NEON_INTEGER_OP(name) do { \
4637 switch ((size << 1) | u) { \
ad69471c 4638 case 0: \
dd8fbd78 4639 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4640 break; \
4641 case 1: \
dd8fbd78 4642 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4643 break; \
4644 case 2: \
dd8fbd78 4645 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4646 break; \
4647 case 3: \
dd8fbd78 4648 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4649 break; \
4650 case 4: \
dd8fbd78 4651 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4652 break; \
4653 case 5: \
dd8fbd78 4654 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4655 break; \
9ee6e8bb
PB
4656 default: return 1; \
4657 }} while (0)
4658
39d5492a 4659static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4660{
39d5492a 4661 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4662 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4663 return tmp;
9ee6e8bb
PB
4664}
4665
39d5492a 4666static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4667{
dd8fbd78 4668 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4669 tcg_temp_free_i32(var);
9ee6e8bb
PB
4670}
4671
39d5492a 4672static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4673{
39d5492a 4674 TCGv_i32 tmp;
9ee6e8bb 4675 if (size == 1) {
0fad6efc
PM
4676 tmp = neon_load_reg(reg & 7, reg >> 4);
4677 if (reg & 8) {
dd8fbd78 4678 gen_neon_dup_high16(tmp);
0fad6efc
PM
4679 } else {
4680 gen_neon_dup_low16(tmp);
dd8fbd78 4681 }
0fad6efc
PM
4682 } else {
4683 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4684 }
dd8fbd78 4685 return tmp;
9ee6e8bb
PB
4686}
4687
02acedf9 4688static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4689{
b13708bb
RH
4690 TCGv_ptr pd, pm;
4691
600b828c 4692 if (!q && size == 2) {
02acedf9
PM
4693 return 1;
4694 }
b13708bb
RH
4695 pd = vfp_reg_ptr(true, rd);
4696 pm = vfp_reg_ptr(true, rm);
02acedf9
PM
4697 if (q) {
4698 switch (size) {
4699 case 0:
b13708bb 4700 gen_helper_neon_qunzip8(pd, pm);
02acedf9
PM
4701 break;
4702 case 1:
b13708bb 4703 gen_helper_neon_qunzip16(pd, pm);
02acedf9
PM
4704 break;
4705 case 2:
b13708bb 4706 gen_helper_neon_qunzip32(pd, pm);
02acedf9
PM
4707 break;
4708 default:
4709 abort();
4710 }
4711 } else {
4712 switch (size) {
4713 case 0:
b13708bb 4714 gen_helper_neon_unzip8(pd, pm);
02acedf9
PM
4715 break;
4716 case 1:
b13708bb 4717 gen_helper_neon_unzip16(pd, pm);
02acedf9
PM
4718 break;
4719 default:
4720 abort();
4721 }
4722 }
b13708bb
RH
4723 tcg_temp_free_ptr(pd);
4724 tcg_temp_free_ptr(pm);
02acedf9 4725 return 0;
19457615
FN
4726}
4727
d68a6f3a 4728static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4729{
b13708bb
RH
4730 TCGv_ptr pd, pm;
4731
600b828c 4732 if (!q && size == 2) {
d68a6f3a
PM
4733 return 1;
4734 }
b13708bb
RH
4735 pd = vfp_reg_ptr(true, rd);
4736 pm = vfp_reg_ptr(true, rm);
d68a6f3a
PM
4737 if (q) {
4738 switch (size) {
4739 case 0:
b13708bb 4740 gen_helper_neon_qzip8(pd, pm);
d68a6f3a
PM
4741 break;
4742 case 1:
b13708bb 4743 gen_helper_neon_qzip16(pd, pm);
d68a6f3a
PM
4744 break;
4745 case 2:
b13708bb 4746 gen_helper_neon_qzip32(pd, pm);
d68a6f3a
PM
4747 break;
4748 default:
4749 abort();
4750 }
4751 } else {
4752 switch (size) {
4753 case 0:
b13708bb 4754 gen_helper_neon_zip8(pd, pm);
d68a6f3a
PM
4755 break;
4756 case 1:
b13708bb 4757 gen_helper_neon_zip16(pd, pm);
d68a6f3a
PM
4758 break;
4759 default:
4760 abort();
4761 }
4762 }
b13708bb
RH
4763 tcg_temp_free_ptr(pd);
4764 tcg_temp_free_ptr(pm);
d68a6f3a 4765 return 0;
19457615
FN
4766}
4767
39d5492a 4768static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4769{
39d5492a 4770 TCGv_i32 rd, tmp;
19457615 4771
7d1b0095
PM
4772 rd = tcg_temp_new_i32();
4773 tmp = tcg_temp_new_i32();
19457615
FN
4774
4775 tcg_gen_shli_i32(rd, t0, 8);
4776 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4777 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4778 tcg_gen_or_i32(rd, rd, tmp);
4779
4780 tcg_gen_shri_i32(t1, t1, 8);
4781 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4782 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4783 tcg_gen_or_i32(t1, t1, tmp);
4784 tcg_gen_mov_i32(t0, rd);
4785
7d1b0095
PM
4786 tcg_temp_free_i32(tmp);
4787 tcg_temp_free_i32(rd);
19457615
FN
4788}
4789
39d5492a 4790static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4791{
39d5492a 4792 TCGv_i32 rd, tmp;
19457615 4793
7d1b0095
PM
4794 rd = tcg_temp_new_i32();
4795 tmp = tcg_temp_new_i32();
19457615
FN
4796
4797 tcg_gen_shli_i32(rd, t0, 16);
4798 tcg_gen_andi_i32(tmp, t1, 0xffff);
4799 tcg_gen_or_i32(rd, rd, tmp);
4800 tcg_gen_shri_i32(t1, t1, 16);
4801 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4802 tcg_gen_or_i32(t1, t1, tmp);
4803 tcg_gen_mov_i32(t0, rd);
4804
7d1b0095
PM
4805 tcg_temp_free_i32(tmp);
4806 tcg_temp_free_i32(rd);
19457615
FN
4807}
4808
4809
9ee6e8bb
PB
4810static struct {
4811 int nregs;
4812 int interleave;
4813 int spacing;
4814} neon_ls_element_type[11] = {
4815 {4, 4, 1},
4816 {4, 4, 2},
4817 {4, 1, 1},
4818 {4, 2, 1},
4819 {3, 3, 1},
4820 {3, 3, 2},
4821 {3, 1, 1},
4822 {1, 1, 1},
4823 {2, 2, 1},
4824 {2, 2, 2},
4825 {2, 1, 1}
4826};
4827
4828/* Translate a NEON load/store element instruction. Return nonzero if the
4829 instruction is invalid. */
7dcc1f89 4830static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4831{
4832 int rd, rn, rm;
4833 int op;
4834 int nregs;
4835 int interleave;
84496233 4836 int spacing;
9ee6e8bb
PB
4837 int stride;
4838 int size;
4839 int reg;
4840 int pass;
4841 int load;
4842 int shift;
9ee6e8bb 4843 int n;
39d5492a
PM
4844 TCGv_i32 addr;
4845 TCGv_i32 tmp;
4846 TCGv_i32 tmp2;
84496233 4847 TCGv_i64 tmp64;
9ee6e8bb 4848
2c7ffc41
PM
4849 /* FIXME: this access check should not take precedence over UNDEF
4850 * for invalid encodings; we will generate incorrect syndrome information
4851 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4852 */
9dbbc748 4853 if (s->fp_excp_el) {
2c7ffc41 4854 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4855 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4856 return 0;
4857 }
4858
5df8bac1 4859 if (!s->vfp_enabled)
9ee6e8bb
PB
4860 return 1;
4861 VFP_DREG_D(rd, insn);
4862 rn = (insn >> 16) & 0xf;
4863 rm = insn & 0xf;
4864 load = (insn & (1 << 21)) != 0;
4865 if ((insn & (1 << 23)) == 0) {
4866 /* Load store all elements. */
4867 op = (insn >> 8) & 0xf;
4868 size = (insn >> 6) & 3;
84496233 4869 if (op > 10)
9ee6e8bb 4870 return 1;
f2dd89d0
PM
4871 /* Catch UNDEF cases for bad values of align field */
4872 switch (op & 0xc) {
4873 case 4:
4874 if (((insn >> 5) & 1) == 1) {
4875 return 1;
4876 }
4877 break;
4878 case 8:
4879 if (((insn >> 4) & 3) == 3) {
4880 return 1;
4881 }
4882 break;
4883 default:
4884 break;
4885 }
9ee6e8bb
PB
4886 nregs = neon_ls_element_type[op].nregs;
4887 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4888 spacing = neon_ls_element_type[op].spacing;
4889 if (size == 3 && (interleave | spacing) != 1)
4890 return 1;
e318a60b 4891 addr = tcg_temp_new_i32();
dcc65026 4892 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4893 stride = (1 << size) * interleave;
4894 for (reg = 0; reg < nregs; reg++) {
4895 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4896 load_reg_var(s, addr, rn);
4897 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4898 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4899 load_reg_var(s, addr, rn);
4900 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4901 }
84496233 4902 if (size == 3) {
8ed1237d 4903 tmp64 = tcg_temp_new_i64();
84496233 4904 if (load) {
12dcc321 4905 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4906 neon_store_reg64(tmp64, rd);
84496233 4907 } else {
84496233 4908 neon_load_reg64(tmp64, rd);
12dcc321 4909 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4910 }
8ed1237d 4911 tcg_temp_free_i64(tmp64);
84496233
JR
4912 tcg_gen_addi_i32(addr, addr, stride);
4913 } else {
4914 for (pass = 0; pass < 2; pass++) {
4915 if (size == 2) {
4916 if (load) {
58ab8e96 4917 tmp = tcg_temp_new_i32();
12dcc321 4918 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4919 neon_store_reg(rd, pass, tmp);
4920 } else {
4921 tmp = neon_load_reg(rd, pass);
12dcc321 4922 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4923 tcg_temp_free_i32(tmp);
84496233 4924 }
1b2b1e54 4925 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4926 } else if (size == 1) {
4927 if (load) {
58ab8e96 4928 tmp = tcg_temp_new_i32();
12dcc321 4929 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4930 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4931 tmp2 = tcg_temp_new_i32();
12dcc321 4932 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4933 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4934 tcg_gen_shli_i32(tmp2, tmp2, 16);
4935 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4936 tcg_temp_free_i32(tmp2);
84496233
JR
4937 neon_store_reg(rd, pass, tmp);
4938 } else {
4939 tmp = neon_load_reg(rd, pass);
7d1b0095 4940 tmp2 = tcg_temp_new_i32();
84496233 4941 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4942 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4943 tcg_temp_free_i32(tmp);
84496233 4944 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4945 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4946 tcg_temp_free_i32(tmp2);
1b2b1e54 4947 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4948 }
84496233
JR
4949 } else /* size == 0 */ {
4950 if (load) {
f764718d 4951 tmp2 = NULL;
84496233 4952 for (n = 0; n < 4; n++) {
58ab8e96 4953 tmp = tcg_temp_new_i32();
12dcc321 4954 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4955 tcg_gen_addi_i32(addr, addr, stride);
4956 if (n == 0) {
4957 tmp2 = tmp;
4958 } else {
41ba8341
PB
4959 tcg_gen_shli_i32(tmp, tmp, n * 8);
4960 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4961 tcg_temp_free_i32(tmp);
84496233 4962 }
9ee6e8bb 4963 }
84496233
JR
4964 neon_store_reg(rd, pass, tmp2);
4965 } else {
4966 tmp2 = neon_load_reg(rd, pass);
4967 for (n = 0; n < 4; n++) {
7d1b0095 4968 tmp = tcg_temp_new_i32();
84496233
JR
4969 if (n == 0) {
4970 tcg_gen_mov_i32(tmp, tmp2);
4971 } else {
4972 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4973 }
12dcc321 4974 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4975 tcg_temp_free_i32(tmp);
84496233
JR
4976 tcg_gen_addi_i32(addr, addr, stride);
4977 }
7d1b0095 4978 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4979 }
4980 }
4981 }
4982 }
84496233 4983 rd += spacing;
9ee6e8bb 4984 }
e318a60b 4985 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4986 stride = nregs * 8;
4987 } else {
4988 size = (insn >> 10) & 3;
4989 if (size == 3) {
4990 /* Load single element to all lanes. */
8e18cde3
PM
4991 int a = (insn >> 4) & 1;
4992 if (!load) {
9ee6e8bb 4993 return 1;
8e18cde3 4994 }
9ee6e8bb
PB
4995 size = (insn >> 6) & 3;
4996 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4997
4998 if (size == 3) {
4999 if (nregs != 4 || a == 0) {
9ee6e8bb 5000 return 1;
99c475ab 5001 }
8e18cde3
PM
5002 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
5003 size = 2;
5004 }
5005 if (nregs == 1 && a == 1 && size == 0) {
5006 return 1;
5007 }
5008 if (nregs == 3 && a == 1) {
5009 return 1;
5010 }
e318a60b 5011 addr = tcg_temp_new_i32();
8e18cde3
PM
5012 load_reg_var(s, addr, rn);
5013 if (nregs == 1) {
5014 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
5015 tmp = gen_load_and_replicate(s, addr, size);
5016 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5017 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5018 if (insn & (1 << 5)) {
5019 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
5020 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
5021 }
5022 tcg_temp_free_i32(tmp);
5023 } else {
5024 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
5025 stride = (insn & (1 << 5)) ? 2 : 1;
5026 for (reg = 0; reg < nregs; reg++) {
5027 tmp = gen_load_and_replicate(s, addr, size);
5028 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
5029 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
5030 tcg_temp_free_i32(tmp);
5031 tcg_gen_addi_i32(addr, addr, 1 << size);
5032 rd += stride;
5033 }
9ee6e8bb 5034 }
e318a60b 5035 tcg_temp_free_i32(addr);
9ee6e8bb
PB
5036 stride = (1 << size) * nregs;
5037 } else {
5038 /* Single element. */
93262b16 5039 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
5040 pass = (insn >> 7) & 1;
5041 switch (size) {
5042 case 0:
5043 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
5044 stride = 1;
5045 break;
5046 case 1:
5047 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
5048 stride = (insn & (1 << 5)) ? 2 : 1;
5049 break;
5050 case 2:
5051 shift = 0;
9ee6e8bb
PB
5052 stride = (insn & (1 << 6)) ? 2 : 1;
5053 break;
5054 default:
5055 abort();
5056 }
5057 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
5058 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
5059 switch (nregs) {
5060 case 1:
5061 if (((idx & (1 << size)) != 0) ||
5062 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
5063 return 1;
5064 }
5065 break;
5066 case 3:
5067 if ((idx & 1) != 0) {
5068 return 1;
5069 }
5070 /* fall through */
5071 case 2:
5072 if (size == 2 && (idx & 2) != 0) {
5073 return 1;
5074 }
5075 break;
5076 case 4:
5077 if ((size == 2) && ((idx & 3) == 3)) {
5078 return 1;
5079 }
5080 break;
5081 default:
5082 abort();
5083 }
5084 if ((rd + stride * (nregs - 1)) > 31) {
5085 /* Attempts to write off the end of the register file
5086 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5087 * the neon_load_reg() would write off the end of the array.
5088 */
5089 return 1;
5090 }
e318a60b 5091 addr = tcg_temp_new_i32();
dcc65026 5092 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5093 for (reg = 0; reg < nregs; reg++) {
5094 if (load) {
58ab8e96 5095 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5096 switch (size) {
5097 case 0:
12dcc321 5098 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5099 break;
5100 case 1:
12dcc321 5101 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5102 break;
5103 case 2:
12dcc321 5104 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5105 break;
a50f5b91
PB
5106 default: /* Avoid compiler warnings. */
5107 abort();
9ee6e8bb
PB
5108 }
5109 if (size != 2) {
8f8e3aa4 5110 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5111 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5112 shift, size ? 16 : 8);
7d1b0095 5113 tcg_temp_free_i32(tmp2);
9ee6e8bb 5114 }
8f8e3aa4 5115 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5116 } else { /* Store */
8f8e3aa4
PB
5117 tmp = neon_load_reg(rd, pass);
5118 if (shift)
5119 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5120 switch (size) {
5121 case 0:
12dcc321 5122 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5123 break;
5124 case 1:
12dcc321 5125 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5126 break;
5127 case 2:
12dcc321 5128 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5129 break;
99c475ab 5130 }
58ab8e96 5131 tcg_temp_free_i32(tmp);
99c475ab 5132 }
9ee6e8bb 5133 rd += stride;
1b2b1e54 5134 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5135 }
e318a60b 5136 tcg_temp_free_i32(addr);
9ee6e8bb 5137 stride = nregs * (1 << size);
99c475ab 5138 }
9ee6e8bb
PB
5139 }
5140 if (rm != 15) {
39d5492a 5141 TCGv_i32 base;
b26eefb6
PB
5142
5143 base = load_reg(s, rn);
9ee6e8bb 5144 if (rm == 13) {
b26eefb6 5145 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5146 } else {
39d5492a 5147 TCGv_i32 index;
b26eefb6
PB
5148 index = load_reg(s, rm);
5149 tcg_gen_add_i32(base, base, index);
7d1b0095 5150 tcg_temp_free_i32(index);
9ee6e8bb 5151 }
b26eefb6 5152 store_reg(s, rn, base);
9ee6e8bb
PB
5153 }
5154 return 0;
5155}
3b46e624 5156
8f8e3aa4 5157/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5158static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5159{
5160 tcg_gen_and_i32(t, t, c);
f669df27 5161 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5162 tcg_gen_or_i32(dest, t, f);
5163}
5164
39d5492a 5165static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5166{
5167 switch (size) {
5168 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5169 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5170 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5171 default: abort();
5172 }
5173}
5174
39d5492a 5175static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5176{
5177 switch (size) {
02da0b2d
PM
5178 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5179 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5180 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5181 default: abort();
5182 }
5183}
5184
39d5492a 5185static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5186{
5187 switch (size) {
02da0b2d
PM
5188 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5189 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5190 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5191 default: abort();
5192 }
5193}
5194
39d5492a 5195static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5196{
5197 switch (size) {
02da0b2d
PM
5198 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5199 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5200 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5201 default: abort();
5202 }
5203}
5204
39d5492a 5205static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5206 int q, int u)
5207{
5208 if (q) {
5209 if (u) {
5210 switch (size) {
5211 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5212 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5213 default: abort();
5214 }
5215 } else {
5216 switch (size) {
5217 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5218 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5219 default: abort();
5220 }
5221 }
5222 } else {
5223 if (u) {
5224 switch (size) {
b408a9b0
CL
5225 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5226 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5227 default: abort();
5228 }
5229 } else {
5230 switch (size) {
5231 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5232 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5233 default: abort();
5234 }
5235 }
5236 }
5237}
5238
39d5492a 5239static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5240{
5241 if (u) {
5242 switch (size) {
5243 case 0: gen_helper_neon_widen_u8(dest, src); break;
5244 case 1: gen_helper_neon_widen_u16(dest, src); break;
5245 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5246 default: abort();
5247 }
5248 } else {
5249 switch (size) {
5250 case 0: gen_helper_neon_widen_s8(dest, src); break;
5251 case 1: gen_helper_neon_widen_s16(dest, src); break;
5252 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5253 default: abort();
5254 }
5255 }
7d1b0095 5256 tcg_temp_free_i32(src);
ad69471c
PB
5257}
5258
5259static inline void gen_neon_addl(int size)
5260{
5261 switch (size) {
5262 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5263 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5264 case 2: tcg_gen_add_i64(CPU_V001); break;
5265 default: abort();
5266 }
5267}
5268
5269static inline void gen_neon_subl(int size)
5270{
5271 switch (size) {
5272 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5273 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5274 case 2: tcg_gen_sub_i64(CPU_V001); break;
5275 default: abort();
5276 }
5277}
5278
a7812ae4 5279static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5280{
5281 switch (size) {
5282 case 0: gen_helper_neon_negl_u16(var, var); break;
5283 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5284 case 2:
5285 tcg_gen_neg_i64(var, var);
5286 break;
ad69471c
PB
5287 default: abort();
5288 }
5289}
5290
a7812ae4 5291static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5292{
5293 switch (size) {
02da0b2d
PM
5294 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5295 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5296 default: abort();
5297 }
5298}
5299
39d5492a
PM
5300static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5301 int size, int u)
ad69471c 5302{
a7812ae4 5303 TCGv_i64 tmp;
ad69471c
PB
5304
5305 switch ((size << 1) | u) {
5306 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5307 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5308 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5309 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5310 case 4:
5311 tmp = gen_muls_i64_i32(a, b);
5312 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5313 tcg_temp_free_i64(tmp);
ad69471c
PB
5314 break;
5315 case 5:
5316 tmp = gen_mulu_i64_i32(a, b);
5317 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5318 tcg_temp_free_i64(tmp);
ad69471c
PB
5319 break;
5320 default: abort();
5321 }
c6067f04
CL
5322
5323 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5324 Don't forget to clean them now. */
5325 if (size < 2) {
7d1b0095
PM
5326 tcg_temp_free_i32(a);
5327 tcg_temp_free_i32(b);
c6067f04 5328 }
ad69471c
PB
5329}
5330
39d5492a
PM
5331static void gen_neon_narrow_op(int op, int u, int size,
5332 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5333{
5334 if (op) {
5335 if (u) {
5336 gen_neon_unarrow_sats(size, dest, src);
5337 } else {
5338 gen_neon_narrow(size, dest, src);
5339 }
5340 } else {
5341 if (u) {
5342 gen_neon_narrow_satu(size, dest, src);
5343 } else {
5344 gen_neon_narrow_sats(size, dest, src);
5345 }
5346 }
5347}
5348
62698be3
PM
5349/* Symbolic constants for op fields for Neon 3-register same-length.
5350 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5351 * table A7-9.
5352 */
5353#define NEON_3R_VHADD 0
5354#define NEON_3R_VQADD 1
5355#define NEON_3R_VRHADD 2
5356#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5357#define NEON_3R_VHSUB 4
5358#define NEON_3R_VQSUB 5
5359#define NEON_3R_VCGT 6
5360#define NEON_3R_VCGE 7
5361#define NEON_3R_VSHL 8
5362#define NEON_3R_VQSHL 9
5363#define NEON_3R_VRSHL 10
5364#define NEON_3R_VQRSHL 11
5365#define NEON_3R_VMAX 12
5366#define NEON_3R_VMIN 13
5367#define NEON_3R_VABD 14
5368#define NEON_3R_VABA 15
5369#define NEON_3R_VADD_VSUB 16
5370#define NEON_3R_VTST_VCEQ 17
5371#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5372#define NEON_3R_VMUL 19
5373#define NEON_3R_VPMAX 20
5374#define NEON_3R_VPMIN 21
5375#define NEON_3R_VQDMULH_VQRDMULH 22
5376#define NEON_3R_VPADD 23
f1ecb913 5377#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5378#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5379#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5380#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5381#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5382#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5383#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5384#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5385
5386static const uint8_t neon_3r_sizes[] = {
5387 [NEON_3R_VHADD] = 0x7,
5388 [NEON_3R_VQADD] = 0xf,
5389 [NEON_3R_VRHADD] = 0x7,
5390 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5391 [NEON_3R_VHSUB] = 0x7,
5392 [NEON_3R_VQSUB] = 0xf,
5393 [NEON_3R_VCGT] = 0x7,
5394 [NEON_3R_VCGE] = 0x7,
5395 [NEON_3R_VSHL] = 0xf,
5396 [NEON_3R_VQSHL] = 0xf,
5397 [NEON_3R_VRSHL] = 0xf,
5398 [NEON_3R_VQRSHL] = 0xf,
5399 [NEON_3R_VMAX] = 0x7,
5400 [NEON_3R_VMIN] = 0x7,
5401 [NEON_3R_VABD] = 0x7,
5402 [NEON_3R_VABA] = 0x7,
5403 [NEON_3R_VADD_VSUB] = 0xf,
5404 [NEON_3R_VTST_VCEQ] = 0x7,
5405 [NEON_3R_VML] = 0x7,
5406 [NEON_3R_VMUL] = 0x7,
5407 [NEON_3R_VPMAX] = 0x7,
5408 [NEON_3R_VPMIN] = 0x7,
5409 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5410 [NEON_3R_VPADD] = 0x7,
f1ecb913 5411 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5412 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5413 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5414 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5415 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5416 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5417 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5418 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5419};
5420
600b828c
PM
5421/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5422 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5423 * table A7-13.
5424 */
5425#define NEON_2RM_VREV64 0
5426#define NEON_2RM_VREV32 1
5427#define NEON_2RM_VREV16 2
5428#define NEON_2RM_VPADDL 4
5429#define NEON_2RM_VPADDL_U 5
9d935509
AB
5430#define NEON_2RM_AESE 6 /* Includes AESD */
5431#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5432#define NEON_2RM_VCLS 8
5433#define NEON_2RM_VCLZ 9
5434#define NEON_2RM_VCNT 10
5435#define NEON_2RM_VMVN 11
5436#define NEON_2RM_VPADAL 12
5437#define NEON_2RM_VPADAL_U 13
5438#define NEON_2RM_VQABS 14
5439#define NEON_2RM_VQNEG 15
5440#define NEON_2RM_VCGT0 16
5441#define NEON_2RM_VCGE0 17
5442#define NEON_2RM_VCEQ0 18
5443#define NEON_2RM_VCLE0 19
5444#define NEON_2RM_VCLT0 20
f1ecb913 5445#define NEON_2RM_SHA1H 21
600b828c
PM
5446#define NEON_2RM_VABS 22
5447#define NEON_2RM_VNEG 23
5448#define NEON_2RM_VCGT0_F 24
5449#define NEON_2RM_VCGE0_F 25
5450#define NEON_2RM_VCEQ0_F 26
5451#define NEON_2RM_VCLE0_F 27
5452#define NEON_2RM_VCLT0_F 28
5453#define NEON_2RM_VABS_F 30
5454#define NEON_2RM_VNEG_F 31
5455#define NEON_2RM_VSWP 32
5456#define NEON_2RM_VTRN 33
5457#define NEON_2RM_VUZP 34
5458#define NEON_2RM_VZIP 35
5459#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5460#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5461#define NEON_2RM_VSHLL 38
f1ecb913 5462#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5463#define NEON_2RM_VRINTN 40
2ce70625 5464#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5465#define NEON_2RM_VRINTA 42
5466#define NEON_2RM_VRINTZ 43
600b828c 5467#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5468#define NEON_2RM_VRINTM 45
600b828c 5469#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5470#define NEON_2RM_VRINTP 47
901ad525
WN
5471#define NEON_2RM_VCVTAU 48
5472#define NEON_2RM_VCVTAS 49
5473#define NEON_2RM_VCVTNU 50
5474#define NEON_2RM_VCVTNS 51
5475#define NEON_2RM_VCVTPU 52
5476#define NEON_2RM_VCVTPS 53
5477#define NEON_2RM_VCVTMU 54
5478#define NEON_2RM_VCVTMS 55
600b828c
PM
5479#define NEON_2RM_VRECPE 56
5480#define NEON_2RM_VRSQRTE 57
5481#define NEON_2RM_VRECPE_F 58
5482#define NEON_2RM_VRSQRTE_F 59
5483#define NEON_2RM_VCVT_FS 60
5484#define NEON_2RM_VCVT_FU 61
5485#define NEON_2RM_VCVT_SF 62
5486#define NEON_2RM_VCVT_UF 63
5487
5488static int neon_2rm_is_float_op(int op)
5489{
5490 /* Return true if this neon 2reg-misc op is float-to-float */
5491 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5492 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5493 op == NEON_2RM_VRINTM ||
5494 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5495 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5496}
5497
fe8fcf3d
PM
5498static bool neon_2rm_is_v8_op(int op)
5499{
5500 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5501 switch (op) {
5502 case NEON_2RM_VRINTN:
5503 case NEON_2RM_VRINTA:
5504 case NEON_2RM_VRINTM:
5505 case NEON_2RM_VRINTP:
5506 case NEON_2RM_VRINTZ:
5507 case NEON_2RM_VRINTX:
5508 case NEON_2RM_VCVTAU:
5509 case NEON_2RM_VCVTAS:
5510 case NEON_2RM_VCVTNU:
5511 case NEON_2RM_VCVTNS:
5512 case NEON_2RM_VCVTPU:
5513 case NEON_2RM_VCVTPS:
5514 case NEON_2RM_VCVTMU:
5515 case NEON_2RM_VCVTMS:
5516 return true;
5517 default:
5518 return false;
5519 }
5520}
5521
600b828c
PM
5522/* Each entry in this array has bit n set if the insn allows
5523 * size value n (otherwise it will UNDEF). Since unallocated
5524 * op values will have no bits set they always UNDEF.
5525 */
5526static const uint8_t neon_2rm_sizes[] = {
5527 [NEON_2RM_VREV64] = 0x7,
5528 [NEON_2RM_VREV32] = 0x3,
5529 [NEON_2RM_VREV16] = 0x1,
5530 [NEON_2RM_VPADDL] = 0x7,
5531 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5532 [NEON_2RM_AESE] = 0x1,
5533 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5534 [NEON_2RM_VCLS] = 0x7,
5535 [NEON_2RM_VCLZ] = 0x7,
5536 [NEON_2RM_VCNT] = 0x1,
5537 [NEON_2RM_VMVN] = 0x1,
5538 [NEON_2RM_VPADAL] = 0x7,
5539 [NEON_2RM_VPADAL_U] = 0x7,
5540 [NEON_2RM_VQABS] = 0x7,
5541 [NEON_2RM_VQNEG] = 0x7,
5542 [NEON_2RM_VCGT0] = 0x7,
5543 [NEON_2RM_VCGE0] = 0x7,
5544 [NEON_2RM_VCEQ0] = 0x7,
5545 [NEON_2RM_VCLE0] = 0x7,
5546 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5547 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5548 [NEON_2RM_VABS] = 0x7,
5549 [NEON_2RM_VNEG] = 0x7,
5550 [NEON_2RM_VCGT0_F] = 0x4,
5551 [NEON_2RM_VCGE0_F] = 0x4,
5552 [NEON_2RM_VCEQ0_F] = 0x4,
5553 [NEON_2RM_VCLE0_F] = 0x4,
5554 [NEON_2RM_VCLT0_F] = 0x4,
5555 [NEON_2RM_VABS_F] = 0x4,
5556 [NEON_2RM_VNEG_F] = 0x4,
5557 [NEON_2RM_VSWP] = 0x1,
5558 [NEON_2RM_VTRN] = 0x7,
5559 [NEON_2RM_VUZP] = 0x7,
5560 [NEON_2RM_VZIP] = 0x7,
5561 [NEON_2RM_VMOVN] = 0x7,
5562 [NEON_2RM_VQMOVN] = 0x7,
5563 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5564 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5565 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5566 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5567 [NEON_2RM_VRINTA] = 0x4,
5568 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5569 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5570 [NEON_2RM_VRINTM] = 0x4,
600b828c 5571 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5572 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5573 [NEON_2RM_VCVTAU] = 0x4,
5574 [NEON_2RM_VCVTAS] = 0x4,
5575 [NEON_2RM_VCVTNU] = 0x4,
5576 [NEON_2RM_VCVTNS] = 0x4,
5577 [NEON_2RM_VCVTPU] = 0x4,
5578 [NEON_2RM_VCVTPS] = 0x4,
5579 [NEON_2RM_VCVTMU] = 0x4,
5580 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5581 [NEON_2RM_VRECPE] = 0x4,
5582 [NEON_2RM_VRSQRTE] = 0x4,
5583 [NEON_2RM_VRECPE_F] = 0x4,
5584 [NEON_2RM_VRSQRTE_F] = 0x4,
5585 [NEON_2RM_VCVT_FS] = 0x4,
5586 [NEON_2RM_VCVT_FU] = 0x4,
5587 [NEON_2RM_VCVT_SF] = 0x4,
5588 [NEON_2RM_VCVT_UF] = 0x4,
5589};
5590
9ee6e8bb
PB
5591/* Translate a NEON data processing instruction. Return nonzero if the
5592 instruction is invalid.
ad69471c
PB
5593 We process data in a mixture of 32-bit and 64-bit chunks.
5594 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5595
7dcc1f89 5596static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5597{
5598 int op;
5599 int q;
5600 int rd, rn, rm;
5601 int size;
5602 int shift;
5603 int pass;
5604 int count;
5605 int pairwise;
5606 int u;
ca9a32e4 5607 uint32_t imm, mask;
39d5492a 5608 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
1a66ac61 5609 TCGv_ptr ptr1, ptr2, ptr3;
a7812ae4 5610 TCGv_i64 tmp64;
9ee6e8bb 5611
2c7ffc41
PM
5612 /* FIXME: this access check should not take precedence over UNDEF
5613 * for invalid encodings; we will generate incorrect syndrome information
5614 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5615 */
9dbbc748 5616 if (s->fp_excp_el) {
2c7ffc41 5617 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5618 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5619 return 0;
5620 }
5621
5df8bac1 5622 if (!s->vfp_enabled)
9ee6e8bb
PB
5623 return 1;
5624 q = (insn & (1 << 6)) != 0;
5625 u = (insn >> 24) & 1;
5626 VFP_DREG_D(rd, insn);
5627 VFP_DREG_N(rn, insn);
5628 VFP_DREG_M(rm, insn);
5629 size = (insn >> 20) & 3;
5630 if ((insn & (1 << 23)) == 0) {
5631 /* Three register same length. */
5632 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5633 /* Catch invalid op and bad size combinations: UNDEF */
5634 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5635 return 1;
5636 }
25f84f79
PM
5637 /* All insns of this form UNDEF for either this condition or the
5638 * superset of cases "Q==1"; we catch the latter later.
5639 */
5640 if (q && ((rd | rn | rm) & 1)) {
5641 return 1;
5642 }
f1ecb913
AB
5643 /*
5644 * The SHA-1/SHA-256 3-register instructions require special treatment
5645 * here, as their size field is overloaded as an op type selector, and
5646 * they all consume their input in a single pass.
5647 */
5648 if (op == NEON_3R_SHA) {
5649 if (!q) {
5650 return 1;
5651 }
5652 if (!u) { /* SHA-1 */
d614a513 5653 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5654 return 1;
5655 }
1a66ac61
RH
5656 ptr1 = vfp_reg_ptr(true, rd);
5657 ptr2 = vfp_reg_ptr(true, rn);
5658 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913 5659 tmp4 = tcg_const_i32(size);
1a66ac61 5660 gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
f1ecb913
AB
5661 tcg_temp_free_i32(tmp4);
5662 } else { /* SHA-256 */
d614a513 5663 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5664 return 1;
5665 }
1a66ac61
RH
5666 ptr1 = vfp_reg_ptr(true, rd);
5667 ptr2 = vfp_reg_ptr(true, rn);
5668 ptr3 = vfp_reg_ptr(true, rm);
f1ecb913
AB
5669 switch (size) {
5670 case 0:
1a66ac61 5671 gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
f1ecb913
AB
5672 break;
5673 case 1:
1a66ac61 5674 gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
f1ecb913
AB
5675 break;
5676 case 2:
1a66ac61 5677 gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
f1ecb913
AB
5678 break;
5679 }
5680 }
1a66ac61
RH
5681 tcg_temp_free_ptr(ptr1);
5682 tcg_temp_free_ptr(ptr2);
5683 tcg_temp_free_ptr(ptr3);
f1ecb913
AB
5684 return 0;
5685 }
62698be3
PM
5686 if (size == 3 && op != NEON_3R_LOGIC) {
5687 /* 64-bit element instructions. */
9ee6e8bb 5688 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5689 neon_load_reg64(cpu_V0, rn + pass);
5690 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5691 switch (op) {
62698be3 5692 case NEON_3R_VQADD:
9ee6e8bb 5693 if (u) {
02da0b2d
PM
5694 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5695 cpu_V0, cpu_V1);
2c0262af 5696 } else {
02da0b2d
PM
5697 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5698 cpu_V0, cpu_V1);
2c0262af 5699 }
9ee6e8bb 5700 break;
62698be3 5701 case NEON_3R_VQSUB:
9ee6e8bb 5702 if (u) {
02da0b2d
PM
5703 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5704 cpu_V0, cpu_V1);
ad69471c 5705 } else {
02da0b2d
PM
5706 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5707 cpu_V0, cpu_V1);
ad69471c
PB
5708 }
5709 break;
62698be3 5710 case NEON_3R_VSHL:
ad69471c
PB
5711 if (u) {
5712 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5713 } else {
5714 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5715 }
5716 break;
62698be3 5717 case NEON_3R_VQSHL:
ad69471c 5718 if (u) {
02da0b2d
PM
5719 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5720 cpu_V1, cpu_V0);
ad69471c 5721 } else {
02da0b2d
PM
5722 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5723 cpu_V1, cpu_V0);
ad69471c
PB
5724 }
5725 break;
62698be3 5726 case NEON_3R_VRSHL:
ad69471c
PB
5727 if (u) {
5728 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5729 } else {
ad69471c
PB
5730 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5731 }
5732 break;
62698be3 5733 case NEON_3R_VQRSHL:
ad69471c 5734 if (u) {
02da0b2d
PM
5735 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5736 cpu_V1, cpu_V0);
ad69471c 5737 } else {
02da0b2d
PM
5738 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5739 cpu_V1, cpu_V0);
1e8d4eec 5740 }
9ee6e8bb 5741 break;
62698be3 5742 case NEON_3R_VADD_VSUB:
9ee6e8bb 5743 if (u) {
ad69471c 5744 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5745 } else {
ad69471c 5746 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5747 }
5748 break;
5749 default:
5750 abort();
2c0262af 5751 }
ad69471c 5752 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5753 }
9ee6e8bb 5754 return 0;
2c0262af 5755 }
25f84f79 5756 pairwise = 0;
9ee6e8bb 5757 switch (op) {
62698be3
PM
5758 case NEON_3R_VSHL:
5759 case NEON_3R_VQSHL:
5760 case NEON_3R_VRSHL:
5761 case NEON_3R_VQRSHL:
9ee6e8bb 5762 {
ad69471c
PB
5763 int rtmp;
5764 /* Shift instruction operands are reversed. */
5765 rtmp = rn;
9ee6e8bb 5766 rn = rm;
ad69471c 5767 rm = rtmp;
9ee6e8bb 5768 }
2c0262af 5769 break;
25f84f79
PM
5770 case NEON_3R_VPADD:
5771 if (u) {
5772 return 1;
5773 }
5774 /* Fall through */
62698be3
PM
5775 case NEON_3R_VPMAX:
5776 case NEON_3R_VPMIN:
9ee6e8bb 5777 pairwise = 1;
2c0262af 5778 break;
25f84f79
PM
5779 case NEON_3R_FLOAT_ARITH:
5780 pairwise = (u && size < 2); /* if VPADD (float) */
5781 break;
5782 case NEON_3R_FLOAT_MINMAX:
5783 pairwise = u; /* if VPMIN/VPMAX (float) */
5784 break;
5785 case NEON_3R_FLOAT_CMP:
5786 if (!u && size) {
5787 /* no encoding for U=0 C=1x */
5788 return 1;
5789 }
5790 break;
5791 case NEON_3R_FLOAT_ACMP:
5792 if (!u) {
5793 return 1;
5794 }
5795 break;
505935fc
WN
5796 case NEON_3R_FLOAT_MISC:
5797 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5798 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5799 return 1;
5800 }
2c0262af 5801 break;
25f84f79
PM
5802 case NEON_3R_VMUL:
5803 if (u && (size != 0)) {
5804 /* UNDEF on invalid size for polynomial subcase */
5805 return 1;
5806 }
2c0262af 5807 break;
da97f52c 5808 case NEON_3R_VFM:
d614a513 5809 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5810 return 1;
5811 }
5812 break;
9ee6e8bb 5813 default:
2c0262af 5814 break;
9ee6e8bb 5815 }
dd8fbd78 5816
25f84f79
PM
5817 if (pairwise && q) {
5818 /* All the pairwise insns UNDEF if Q is set */
5819 return 1;
5820 }
5821
9ee6e8bb
PB
5822 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5823
5824 if (pairwise) {
5825 /* Pairwise. */
a5a14945
JR
5826 if (pass < 1) {
5827 tmp = neon_load_reg(rn, 0);
5828 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5829 } else {
a5a14945
JR
5830 tmp = neon_load_reg(rm, 0);
5831 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5832 }
5833 } else {
5834 /* Elementwise. */
dd8fbd78
FN
5835 tmp = neon_load_reg(rn, pass);
5836 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5837 }
5838 switch (op) {
62698be3 5839 case NEON_3R_VHADD:
9ee6e8bb
PB
5840 GEN_NEON_INTEGER_OP(hadd);
5841 break;
62698be3 5842 case NEON_3R_VQADD:
02da0b2d 5843 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5844 break;
62698be3 5845 case NEON_3R_VRHADD:
9ee6e8bb 5846 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5847 break;
62698be3 5848 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5849 switch ((u << 2) | size) {
5850 case 0: /* VAND */
dd8fbd78 5851 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5852 break;
5853 case 1: /* BIC */
f669df27 5854 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5855 break;
5856 case 2: /* VORR */
dd8fbd78 5857 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5858 break;
5859 case 3: /* VORN */
f669df27 5860 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5861 break;
5862 case 4: /* VEOR */
dd8fbd78 5863 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5864 break;
5865 case 5: /* VBSL */
dd8fbd78
FN
5866 tmp3 = neon_load_reg(rd, pass);
5867 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5868 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5869 break;
5870 case 6: /* VBIT */
dd8fbd78
FN
5871 tmp3 = neon_load_reg(rd, pass);
5872 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5873 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5874 break;
5875 case 7: /* VBIF */
dd8fbd78
FN
5876 tmp3 = neon_load_reg(rd, pass);
5877 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5878 tcg_temp_free_i32(tmp3);
9ee6e8bb 5879 break;
2c0262af
FB
5880 }
5881 break;
62698be3 5882 case NEON_3R_VHSUB:
9ee6e8bb
PB
5883 GEN_NEON_INTEGER_OP(hsub);
5884 break;
62698be3 5885 case NEON_3R_VQSUB:
02da0b2d 5886 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5887 break;
62698be3 5888 case NEON_3R_VCGT:
9ee6e8bb
PB
5889 GEN_NEON_INTEGER_OP(cgt);
5890 break;
62698be3 5891 case NEON_3R_VCGE:
9ee6e8bb
PB
5892 GEN_NEON_INTEGER_OP(cge);
5893 break;
62698be3 5894 case NEON_3R_VSHL:
ad69471c 5895 GEN_NEON_INTEGER_OP(shl);
2c0262af 5896 break;
62698be3 5897 case NEON_3R_VQSHL:
02da0b2d 5898 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5899 break;
62698be3 5900 case NEON_3R_VRSHL:
ad69471c 5901 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5902 break;
62698be3 5903 case NEON_3R_VQRSHL:
02da0b2d 5904 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5905 break;
62698be3 5906 case NEON_3R_VMAX:
9ee6e8bb
PB
5907 GEN_NEON_INTEGER_OP(max);
5908 break;
62698be3 5909 case NEON_3R_VMIN:
9ee6e8bb
PB
5910 GEN_NEON_INTEGER_OP(min);
5911 break;
62698be3 5912 case NEON_3R_VABD:
9ee6e8bb
PB
5913 GEN_NEON_INTEGER_OP(abd);
5914 break;
62698be3 5915 case NEON_3R_VABA:
9ee6e8bb 5916 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5917 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5918 tmp2 = neon_load_reg(rd, pass);
5919 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5920 break;
62698be3 5921 case NEON_3R_VADD_VSUB:
9ee6e8bb 5922 if (!u) { /* VADD */
62698be3 5923 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5924 } else { /* VSUB */
5925 switch (size) {
dd8fbd78
FN
5926 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5927 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5928 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5929 default: abort();
9ee6e8bb
PB
5930 }
5931 }
5932 break;
62698be3 5933 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5934 if (!u) { /* VTST */
5935 switch (size) {
dd8fbd78
FN
5936 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5937 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5938 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5939 default: abort();
9ee6e8bb
PB
5940 }
5941 } else { /* VCEQ */
5942 switch (size) {
dd8fbd78
FN
5943 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5944 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5945 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5946 default: abort();
9ee6e8bb
PB
5947 }
5948 }
5949 break;
62698be3 5950 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5951 switch (size) {
dd8fbd78
FN
5952 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5953 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5954 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5955 default: abort();
9ee6e8bb 5956 }
7d1b0095 5957 tcg_temp_free_i32(tmp2);
dd8fbd78 5958 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5959 if (u) { /* VMLS */
dd8fbd78 5960 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5961 } else { /* VMLA */
dd8fbd78 5962 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5963 }
5964 break;
62698be3 5965 case NEON_3R_VMUL:
9ee6e8bb 5966 if (u) { /* polynomial */
dd8fbd78 5967 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5968 } else { /* Integer */
5969 switch (size) {
dd8fbd78
FN
5970 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5971 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5972 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5973 default: abort();
9ee6e8bb
PB
5974 }
5975 }
5976 break;
62698be3 5977 case NEON_3R_VPMAX:
9ee6e8bb
PB
5978 GEN_NEON_INTEGER_OP(pmax);
5979 break;
62698be3 5980 case NEON_3R_VPMIN:
9ee6e8bb
PB
5981 GEN_NEON_INTEGER_OP(pmin);
5982 break;
62698be3 5983 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5984 if (!u) { /* VQDMULH */
5985 switch (size) {
02da0b2d
PM
5986 case 1:
5987 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5988 break;
5989 case 2:
5990 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5991 break;
62698be3 5992 default: abort();
9ee6e8bb 5993 }
62698be3 5994 } else { /* VQRDMULH */
9ee6e8bb 5995 switch (size) {
02da0b2d
PM
5996 case 1:
5997 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5998 break;
5999 case 2:
6000 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
6001 break;
62698be3 6002 default: abort();
9ee6e8bb
PB
6003 }
6004 }
6005 break;
62698be3 6006 case NEON_3R_VPADD:
9ee6e8bb 6007 switch (size) {
dd8fbd78
FN
6008 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
6009 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
6010 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 6011 default: abort();
9ee6e8bb
PB
6012 }
6013 break;
62698be3 6014 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
6015 {
6016 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
6017 switch ((u << 2) | size) {
6018 case 0: /* VADD */
aa47cfdd
PM
6019 case 4: /* VPADD */
6020 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6021 break;
6022 case 2: /* VSUB */
aa47cfdd 6023 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6024 break;
6025 case 6: /* VABD */
aa47cfdd 6026 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
6027 break;
6028 default:
62698be3 6029 abort();
9ee6e8bb 6030 }
aa47cfdd 6031 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6032 break;
aa47cfdd 6033 }
62698be3 6034 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
6035 {
6036 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6037 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6038 if (!u) {
7d1b0095 6039 tcg_temp_free_i32(tmp2);
dd8fbd78 6040 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6041 if (size == 0) {
aa47cfdd 6042 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 6043 } else {
aa47cfdd 6044 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
6045 }
6046 }
aa47cfdd 6047 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6048 break;
aa47cfdd 6049 }
62698be3 6050 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
6051 {
6052 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 6053 if (!u) {
aa47cfdd 6054 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 6055 } else {
aa47cfdd
PM
6056 if (size == 0) {
6057 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6058 } else {
6059 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6060 }
b5ff1b31 6061 }
aa47cfdd 6062 tcg_temp_free_ptr(fpstatus);
2c0262af 6063 break;
aa47cfdd 6064 }
62698be3 6065 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6066 {
6067 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6068 if (size == 0) {
6069 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6070 } else {
6071 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6072 }
6073 tcg_temp_free_ptr(fpstatus);
2c0262af 6074 break;
aa47cfdd 6075 }
62698be3 6076 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6077 {
6078 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6079 if (size == 0) {
f71a2ae5 6080 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6081 } else {
f71a2ae5 6082 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6083 }
6084 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6085 break;
aa47cfdd 6086 }
505935fc
WN
6087 case NEON_3R_FLOAT_MISC:
6088 if (u) {
6089 /* VMAXNM/VMINNM */
6090 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6091 if (size == 0) {
f71a2ae5 6092 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6093 } else {
f71a2ae5 6094 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6095 }
6096 tcg_temp_free_ptr(fpstatus);
6097 } else {
6098 if (size == 0) {
6099 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6100 } else {
6101 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6102 }
6103 }
2c0262af 6104 break;
da97f52c
PM
6105 case NEON_3R_VFM:
6106 {
6107 /* VFMA, VFMS: fused multiply-add */
6108 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6109 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6110 if (size) {
6111 /* VFMS */
6112 gen_helper_vfp_negs(tmp, tmp);
6113 }
6114 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6115 tcg_temp_free_i32(tmp3);
6116 tcg_temp_free_ptr(fpstatus);
6117 break;
6118 }
9ee6e8bb
PB
6119 default:
6120 abort();
2c0262af 6121 }
7d1b0095 6122 tcg_temp_free_i32(tmp2);
dd8fbd78 6123
9ee6e8bb
PB
6124 /* Save the result. For elementwise operations we can put it
6125 straight into the destination register. For pairwise operations
6126 we have to be careful to avoid clobbering the source operands. */
6127 if (pairwise && rd == rm) {
dd8fbd78 6128 neon_store_scratch(pass, tmp);
9ee6e8bb 6129 } else {
dd8fbd78 6130 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6131 }
6132
6133 } /* for pass */
6134 if (pairwise && rd == rm) {
6135 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6136 tmp = neon_load_scratch(pass);
6137 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6138 }
6139 }
ad69471c 6140 /* End of 3 register same size operations. */
9ee6e8bb
PB
6141 } else if (insn & (1 << 4)) {
6142 if ((insn & 0x00380080) != 0) {
6143 /* Two registers and shift. */
6144 op = (insn >> 8) & 0xf;
6145 if (insn & (1 << 7)) {
cc13115b
PM
6146 /* 64-bit shift. */
6147 if (op > 7) {
6148 return 1;
6149 }
9ee6e8bb
PB
6150 size = 3;
6151 } else {
6152 size = 2;
6153 while ((insn & (1 << (size + 19))) == 0)
6154 size--;
6155 }
6156 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6157 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6158 by immediate using the variable shift operations. */
6159 if (op < 8) {
6160 /* Shift by immediate:
6161 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6162 if (q && ((rd | rm) & 1)) {
6163 return 1;
6164 }
6165 if (!u && (op == 4 || op == 6)) {
6166 return 1;
6167 }
9ee6e8bb
PB
6168 /* Right shifts are encoded as N - shift, where N is the
6169 element size in bits. */
6170 if (op <= 4)
6171 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6172 if (size == 3) {
6173 count = q + 1;
6174 } else {
6175 count = q ? 4: 2;
6176 }
6177 switch (size) {
6178 case 0:
6179 imm = (uint8_t) shift;
6180 imm |= imm << 8;
6181 imm |= imm << 16;
6182 break;
6183 case 1:
6184 imm = (uint16_t) shift;
6185 imm |= imm << 16;
6186 break;
6187 case 2:
6188 case 3:
6189 imm = shift;
6190 break;
6191 default:
6192 abort();
6193 }
6194
6195 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6196 if (size == 3) {
6197 neon_load_reg64(cpu_V0, rm + pass);
6198 tcg_gen_movi_i64(cpu_V1, imm);
6199 switch (op) {
6200 case 0: /* VSHR */
6201 case 1: /* VSRA */
6202 if (u)
6203 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6204 else
ad69471c 6205 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6206 break;
ad69471c
PB
6207 case 2: /* VRSHR */
6208 case 3: /* VRSRA */
6209 if (u)
6210 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6211 else
ad69471c 6212 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6213 break;
ad69471c 6214 case 4: /* VSRI */
ad69471c
PB
6215 case 5: /* VSHL, VSLI */
6216 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6217 break;
0322b26e 6218 case 6: /* VQSHLU */
02da0b2d
PM
6219 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6220 cpu_V0, cpu_V1);
ad69471c 6221 break;
0322b26e
PM
6222 case 7: /* VQSHL */
6223 if (u) {
02da0b2d 6224 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6225 cpu_V0, cpu_V1);
6226 } else {
02da0b2d 6227 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6228 cpu_V0, cpu_V1);
6229 }
9ee6e8bb 6230 break;
9ee6e8bb 6231 }
ad69471c
PB
6232 if (op == 1 || op == 3) {
6233 /* Accumulate. */
5371cb81 6234 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6235 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6236 } else if (op == 4 || (op == 5 && u)) {
6237 /* Insert */
923e6509
CL
6238 neon_load_reg64(cpu_V1, rd + pass);
6239 uint64_t mask;
6240 if (shift < -63 || shift > 63) {
6241 mask = 0;
6242 } else {
6243 if (op == 4) {
6244 mask = 0xffffffffffffffffull >> -shift;
6245 } else {
6246 mask = 0xffffffffffffffffull << shift;
6247 }
6248 }
6249 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6250 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6251 }
6252 neon_store_reg64(cpu_V0, rd + pass);
6253 } else { /* size < 3 */
6254 /* Operands in T0 and T1. */
dd8fbd78 6255 tmp = neon_load_reg(rm, pass);
7d1b0095 6256 tmp2 = tcg_temp_new_i32();
dd8fbd78 6257 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6258 switch (op) {
6259 case 0: /* VSHR */
6260 case 1: /* VSRA */
6261 GEN_NEON_INTEGER_OP(shl);
6262 break;
6263 case 2: /* VRSHR */
6264 case 3: /* VRSRA */
6265 GEN_NEON_INTEGER_OP(rshl);
6266 break;
6267 case 4: /* VSRI */
ad69471c
PB
6268 case 5: /* VSHL, VSLI */
6269 switch (size) {
dd8fbd78
FN
6270 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6271 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6272 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6273 default: abort();
ad69471c
PB
6274 }
6275 break;
0322b26e 6276 case 6: /* VQSHLU */
ad69471c 6277 switch (size) {
0322b26e 6278 case 0:
02da0b2d
PM
6279 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6280 tmp, tmp2);
0322b26e
PM
6281 break;
6282 case 1:
02da0b2d
PM
6283 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6284 tmp, tmp2);
0322b26e
PM
6285 break;
6286 case 2:
02da0b2d
PM
6287 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6288 tmp, tmp2);
0322b26e
PM
6289 break;
6290 default:
cc13115b 6291 abort();
ad69471c
PB
6292 }
6293 break;
0322b26e 6294 case 7: /* VQSHL */
02da0b2d 6295 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6296 break;
ad69471c 6297 }
7d1b0095 6298 tcg_temp_free_i32(tmp2);
ad69471c
PB
6299
6300 if (op == 1 || op == 3) {
6301 /* Accumulate. */
dd8fbd78 6302 tmp2 = neon_load_reg(rd, pass);
5371cb81 6303 gen_neon_add(size, tmp, tmp2);
7d1b0095 6304 tcg_temp_free_i32(tmp2);
ad69471c
PB
6305 } else if (op == 4 || (op == 5 && u)) {
6306 /* Insert */
6307 switch (size) {
6308 case 0:
6309 if (op == 4)
ca9a32e4 6310 mask = 0xff >> -shift;
ad69471c 6311 else
ca9a32e4
JR
6312 mask = (uint8_t)(0xff << shift);
6313 mask |= mask << 8;
6314 mask |= mask << 16;
ad69471c
PB
6315 break;
6316 case 1:
6317 if (op == 4)
ca9a32e4 6318 mask = 0xffff >> -shift;
ad69471c 6319 else
ca9a32e4
JR
6320 mask = (uint16_t)(0xffff << shift);
6321 mask |= mask << 16;
ad69471c
PB
6322 break;
6323 case 2:
ca9a32e4
JR
6324 if (shift < -31 || shift > 31) {
6325 mask = 0;
6326 } else {
6327 if (op == 4)
6328 mask = 0xffffffffu >> -shift;
6329 else
6330 mask = 0xffffffffu << shift;
6331 }
ad69471c
PB
6332 break;
6333 default:
6334 abort();
6335 }
dd8fbd78 6336 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6337 tcg_gen_andi_i32(tmp, tmp, mask);
6338 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6339 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6340 tcg_temp_free_i32(tmp2);
ad69471c 6341 }
dd8fbd78 6342 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6343 }
6344 } /* for pass */
6345 } else if (op < 10) {
ad69471c 6346 /* Shift by immediate and narrow:
9ee6e8bb 6347 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6348 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6349 if (rm & 1) {
6350 return 1;
6351 }
9ee6e8bb
PB
6352 shift = shift - (1 << (size + 3));
6353 size++;
92cdfaeb 6354 if (size == 3) {
a7812ae4 6355 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6356 neon_load_reg64(cpu_V0, rm);
6357 neon_load_reg64(cpu_V1, rm + 1);
6358 for (pass = 0; pass < 2; pass++) {
6359 TCGv_i64 in;
6360 if (pass == 0) {
6361 in = cpu_V0;
6362 } else {
6363 in = cpu_V1;
6364 }
ad69471c 6365 if (q) {
0b36f4cd 6366 if (input_unsigned) {
92cdfaeb 6367 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6368 } else {
92cdfaeb 6369 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6370 }
ad69471c 6371 } else {
0b36f4cd 6372 if (input_unsigned) {
92cdfaeb 6373 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6374 } else {
92cdfaeb 6375 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6376 }
ad69471c 6377 }
7d1b0095 6378 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6379 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6380 neon_store_reg(rd, pass, tmp);
6381 } /* for pass */
6382 tcg_temp_free_i64(tmp64);
6383 } else {
6384 if (size == 1) {
6385 imm = (uint16_t)shift;
6386 imm |= imm << 16;
2c0262af 6387 } else {
92cdfaeb
PM
6388 /* size == 2 */
6389 imm = (uint32_t)shift;
6390 }
6391 tmp2 = tcg_const_i32(imm);
6392 tmp4 = neon_load_reg(rm + 1, 0);
6393 tmp5 = neon_load_reg(rm + 1, 1);
6394 for (pass = 0; pass < 2; pass++) {
6395 if (pass == 0) {
6396 tmp = neon_load_reg(rm, 0);
6397 } else {
6398 tmp = tmp4;
6399 }
0b36f4cd
CL
6400 gen_neon_shift_narrow(size, tmp, tmp2, q,
6401 input_unsigned);
92cdfaeb
PM
6402 if (pass == 0) {
6403 tmp3 = neon_load_reg(rm, 1);
6404 } else {
6405 tmp3 = tmp5;
6406 }
0b36f4cd
CL
6407 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6408 input_unsigned);
36aa55dc 6409 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6410 tcg_temp_free_i32(tmp);
6411 tcg_temp_free_i32(tmp3);
6412 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6413 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6414 neon_store_reg(rd, pass, tmp);
6415 } /* for pass */
c6067f04 6416 tcg_temp_free_i32(tmp2);
b75263d6 6417 }
9ee6e8bb 6418 } else if (op == 10) {
cc13115b
PM
6419 /* VSHLL, VMOVL */
6420 if (q || (rd & 1)) {
9ee6e8bb 6421 return 1;
cc13115b 6422 }
ad69471c
PB
6423 tmp = neon_load_reg(rm, 0);
6424 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6425 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6426 if (pass == 1)
6427 tmp = tmp2;
6428
6429 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6430
9ee6e8bb
PB
6431 if (shift != 0) {
6432 /* The shift is less than the width of the source
ad69471c
PB
6433 type, so we can just shift the whole register. */
6434 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6435 /* Widen the result of shift: we need to clear
6436 * the potential overflow bits resulting from
6437 * left bits of the narrow input appearing as
6438 * right bits of left the neighbour narrow
6439 * input. */
ad69471c
PB
6440 if (size < 2 || !u) {
6441 uint64_t imm64;
6442 if (size == 0) {
6443 imm = (0xffu >> (8 - shift));
6444 imm |= imm << 16;
acdf01ef 6445 } else if (size == 1) {
ad69471c 6446 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6447 } else {
6448 /* size == 2 */
6449 imm = 0xffffffff >> (32 - shift);
6450 }
6451 if (size < 2) {
6452 imm64 = imm | (((uint64_t)imm) << 32);
6453 } else {
6454 imm64 = imm;
9ee6e8bb 6455 }
acdf01ef 6456 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6457 }
6458 }
ad69471c 6459 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6460 }
f73534a5 6461 } else if (op >= 14) {
9ee6e8bb 6462 /* VCVT fixed-point. */
cc13115b
PM
6463 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6464 return 1;
6465 }
f73534a5
PM
6466 /* We have already masked out the must-be-1 top bit of imm6,
6467 * hence this 32-shift where the ARM ARM has 64-imm6.
6468 */
6469 shift = 32 - shift;
9ee6e8bb 6470 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6471 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6472 if (!(op & 1)) {
9ee6e8bb 6473 if (u)
5500b06c 6474 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6475 else
5500b06c 6476 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6477 } else {
6478 if (u)
5500b06c 6479 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6480 else
5500b06c 6481 gen_vfp_tosl(0, shift, 1);
2c0262af 6482 }
4373f3ce 6483 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6484 }
6485 } else {
9ee6e8bb
PB
6486 return 1;
6487 }
6488 } else { /* (insn & 0x00380080) == 0 */
6489 int invert;
7d80fee5
PM
6490 if (q && (rd & 1)) {
6491 return 1;
6492 }
9ee6e8bb
PB
6493
6494 op = (insn >> 8) & 0xf;
6495 /* One register and immediate. */
6496 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6497 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6498 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6499 * We choose to not special-case this and will behave as if a
6500 * valid constant encoding of 0 had been given.
6501 */
9ee6e8bb
PB
6502 switch (op) {
6503 case 0: case 1:
6504 /* no-op */
6505 break;
6506 case 2: case 3:
6507 imm <<= 8;
6508 break;
6509 case 4: case 5:
6510 imm <<= 16;
6511 break;
6512 case 6: case 7:
6513 imm <<= 24;
6514 break;
6515 case 8: case 9:
6516 imm |= imm << 16;
6517 break;
6518 case 10: case 11:
6519 imm = (imm << 8) | (imm << 24);
6520 break;
6521 case 12:
8e31209e 6522 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6523 break;
6524 case 13:
6525 imm = (imm << 16) | 0xffff;
6526 break;
6527 case 14:
6528 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6529 if (invert)
6530 imm = ~imm;
6531 break;
6532 case 15:
7d80fee5
PM
6533 if (invert) {
6534 return 1;
6535 }
9ee6e8bb
PB
6536 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6537 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6538 break;
6539 }
6540 if (invert)
6541 imm = ~imm;
6542
9ee6e8bb
PB
6543 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6544 if (op & 1 && op < 12) {
ad69471c 6545 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6546 if (invert) {
6547 /* The immediate value has already been inverted, so
6548 BIC becomes AND. */
ad69471c 6549 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6550 } else {
ad69471c 6551 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6552 }
9ee6e8bb 6553 } else {
ad69471c 6554 /* VMOV, VMVN. */
7d1b0095 6555 tmp = tcg_temp_new_i32();
9ee6e8bb 6556 if (op == 14 && invert) {
a5a14945 6557 int n;
ad69471c
PB
6558 uint32_t val;
6559 val = 0;
9ee6e8bb
PB
6560 for (n = 0; n < 4; n++) {
6561 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6562 val |= 0xff << (n * 8);
9ee6e8bb 6563 }
ad69471c
PB
6564 tcg_gen_movi_i32(tmp, val);
6565 } else {
6566 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6567 }
9ee6e8bb 6568 }
ad69471c 6569 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6570 }
6571 }
e4b3861d 6572 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6573 if (size != 3) {
6574 op = (insn >> 8) & 0xf;
6575 if ((insn & (1 << 6)) == 0) {
6576 /* Three registers of different lengths. */
6577 int src1_wide;
6578 int src2_wide;
6579 int prewiden;
526d0096
PM
6580 /* undefreq: bit 0 : UNDEF if size == 0
6581 * bit 1 : UNDEF if size == 1
6582 * bit 2 : UNDEF if size == 2
6583 * bit 3 : UNDEF if U == 1
6584 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6585 */
6586 int undefreq;
6587 /* prewiden, src1_wide, src2_wide, undefreq */
6588 static const int neon_3reg_wide[16][4] = {
6589 {1, 0, 0, 0}, /* VADDL */
6590 {1, 1, 0, 0}, /* VADDW */
6591 {1, 0, 0, 0}, /* VSUBL */
6592 {1, 1, 0, 0}, /* VSUBW */
6593 {0, 1, 1, 0}, /* VADDHN */
6594 {0, 0, 0, 0}, /* VABAL */
6595 {0, 1, 1, 0}, /* VSUBHN */
6596 {0, 0, 0, 0}, /* VABDL */
6597 {0, 0, 0, 0}, /* VMLAL */
526d0096 6598 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6599 {0, 0, 0, 0}, /* VMLSL */
526d0096 6600 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6601 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6602 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6603 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6604 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6605 };
6606
6607 prewiden = neon_3reg_wide[op][0];
6608 src1_wide = neon_3reg_wide[op][1];
6609 src2_wide = neon_3reg_wide[op][2];
695272dc 6610 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6611
526d0096
PM
6612 if ((undefreq & (1 << size)) ||
6613 ((undefreq & 8) && u)) {
695272dc
PM
6614 return 1;
6615 }
6616 if ((src1_wide && (rn & 1)) ||
6617 (src2_wide && (rm & 1)) ||
6618 (!src2_wide && (rd & 1))) {
ad69471c 6619 return 1;
695272dc 6620 }
ad69471c 6621
4e624eda
PM
6622 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6623 * outside the loop below as it only performs a single pass.
6624 */
6625 if (op == 14 && size == 2) {
6626 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6627
d614a513 6628 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6629 return 1;
6630 }
6631 tcg_rn = tcg_temp_new_i64();
6632 tcg_rm = tcg_temp_new_i64();
6633 tcg_rd = tcg_temp_new_i64();
6634 neon_load_reg64(tcg_rn, rn);
6635 neon_load_reg64(tcg_rm, rm);
6636 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6637 neon_store_reg64(tcg_rd, rd);
6638 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6639 neon_store_reg64(tcg_rd, rd + 1);
6640 tcg_temp_free_i64(tcg_rn);
6641 tcg_temp_free_i64(tcg_rm);
6642 tcg_temp_free_i64(tcg_rd);
6643 return 0;
6644 }
6645
9ee6e8bb
PB
6646 /* Avoid overlapping operands. Wide source operands are
6647 always aligned so will never overlap with wide
6648 destinations in problematic ways. */
8f8e3aa4 6649 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6650 tmp = neon_load_reg(rm, 1);
6651 neon_store_scratch(2, tmp);
8f8e3aa4 6652 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6653 tmp = neon_load_reg(rn, 1);
6654 neon_store_scratch(2, tmp);
9ee6e8bb 6655 }
f764718d 6656 tmp3 = NULL;
9ee6e8bb 6657 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6658 if (src1_wide) {
6659 neon_load_reg64(cpu_V0, rn + pass);
f764718d 6660 tmp = NULL;
9ee6e8bb 6661 } else {
ad69471c 6662 if (pass == 1 && rd == rn) {
dd8fbd78 6663 tmp = neon_load_scratch(2);
9ee6e8bb 6664 } else {
ad69471c
PB
6665 tmp = neon_load_reg(rn, pass);
6666 }
6667 if (prewiden) {
6668 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6669 }
6670 }
ad69471c
PB
6671 if (src2_wide) {
6672 neon_load_reg64(cpu_V1, rm + pass);
f764718d 6673 tmp2 = NULL;
9ee6e8bb 6674 } else {
ad69471c 6675 if (pass == 1 && rd == rm) {
dd8fbd78 6676 tmp2 = neon_load_scratch(2);
9ee6e8bb 6677 } else {
ad69471c
PB
6678 tmp2 = neon_load_reg(rm, pass);
6679 }
6680 if (prewiden) {
6681 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6682 }
9ee6e8bb
PB
6683 }
6684 switch (op) {
6685 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6686 gen_neon_addl(size);
9ee6e8bb 6687 break;
79b0e534 6688 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6689 gen_neon_subl(size);
9ee6e8bb
PB
6690 break;
6691 case 5: case 7: /* VABAL, VABDL */
6692 switch ((size << 1) | u) {
ad69471c
PB
6693 case 0:
6694 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6695 break;
6696 case 1:
6697 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6698 break;
6699 case 2:
6700 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6701 break;
6702 case 3:
6703 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6704 break;
6705 case 4:
6706 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6707 break;
6708 case 5:
6709 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6710 break;
9ee6e8bb
PB
6711 default: abort();
6712 }
7d1b0095
PM
6713 tcg_temp_free_i32(tmp2);
6714 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6715 break;
6716 case 8: case 9: case 10: case 11: case 12: case 13:
6717 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6718 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6719 break;
6720 case 14: /* Polynomial VMULL */
e5ca24cb 6721 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6722 tcg_temp_free_i32(tmp2);
6723 tcg_temp_free_i32(tmp);
e5ca24cb 6724 break;
695272dc
PM
6725 default: /* 15 is RESERVED: caught earlier */
6726 abort();
9ee6e8bb 6727 }
ebcd88ce
PM
6728 if (op == 13) {
6729 /* VQDMULL */
6730 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6731 neon_store_reg64(cpu_V0, rd + pass);
6732 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6733 /* Accumulate. */
ebcd88ce 6734 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6735 switch (op) {
4dc064e6
PM
6736 case 10: /* VMLSL */
6737 gen_neon_negl(cpu_V0, size);
6738 /* Fall through */
6739 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6740 gen_neon_addl(size);
9ee6e8bb
PB
6741 break;
6742 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6743 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6744 if (op == 11) {
6745 gen_neon_negl(cpu_V0, size);
6746 }
ad69471c
PB
6747 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6748 break;
9ee6e8bb
PB
6749 default:
6750 abort();
6751 }
ad69471c 6752 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6753 } else if (op == 4 || op == 6) {
6754 /* Narrowing operation. */
7d1b0095 6755 tmp = tcg_temp_new_i32();
79b0e534 6756 if (!u) {
9ee6e8bb 6757 switch (size) {
ad69471c
PB
6758 case 0:
6759 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6760 break;
6761 case 1:
6762 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6763 break;
6764 case 2:
6765 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6766 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6767 break;
9ee6e8bb
PB
6768 default: abort();
6769 }
6770 } else {
6771 switch (size) {
ad69471c
PB
6772 case 0:
6773 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6774 break;
6775 case 1:
6776 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6777 break;
6778 case 2:
6779 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6780 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6781 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6782 break;
9ee6e8bb
PB
6783 default: abort();
6784 }
6785 }
ad69471c
PB
6786 if (pass == 0) {
6787 tmp3 = tmp;
6788 } else {
6789 neon_store_reg(rd, 0, tmp3);
6790 neon_store_reg(rd, 1, tmp);
6791 }
9ee6e8bb
PB
6792 } else {
6793 /* Write back the result. */
ad69471c 6794 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6795 }
6796 }
6797 } else {
3e3326df
PM
6798 /* Two registers and a scalar. NB that for ops of this form
6799 * the ARM ARM labels bit 24 as Q, but it is in our variable
6800 * 'u', not 'q'.
6801 */
6802 if (size == 0) {
6803 return 1;
6804 }
9ee6e8bb 6805 switch (op) {
9ee6e8bb 6806 case 1: /* Float VMLA scalar */
9ee6e8bb 6807 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6808 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6809 if (size == 1) {
6810 return 1;
6811 }
6812 /* fall through */
6813 case 0: /* Integer VMLA scalar */
6814 case 4: /* Integer VMLS scalar */
6815 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6816 case 12: /* VQDMULH scalar */
6817 case 13: /* VQRDMULH scalar */
3e3326df
PM
6818 if (u && ((rd | rn) & 1)) {
6819 return 1;
6820 }
dd8fbd78
FN
6821 tmp = neon_get_scalar(size, rm);
6822 neon_store_scratch(0, tmp);
9ee6e8bb 6823 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6824 tmp = neon_load_scratch(0);
6825 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6826 if (op == 12) {
6827 if (size == 1) {
02da0b2d 6828 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6829 } else {
02da0b2d 6830 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6831 }
6832 } else if (op == 13) {
6833 if (size == 1) {
02da0b2d 6834 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6835 } else {
02da0b2d 6836 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6837 }
6838 } else if (op & 1) {
aa47cfdd
PM
6839 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6840 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6841 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6842 } else {
6843 switch (size) {
dd8fbd78
FN
6844 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6845 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6846 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6847 default: abort();
9ee6e8bb
PB
6848 }
6849 }
7d1b0095 6850 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6851 if (op < 8) {
6852 /* Accumulate. */
dd8fbd78 6853 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6854 switch (op) {
6855 case 0:
dd8fbd78 6856 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6857 break;
6858 case 1:
aa47cfdd
PM
6859 {
6860 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6861 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6862 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6863 break;
aa47cfdd 6864 }
9ee6e8bb 6865 case 4:
dd8fbd78 6866 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6867 break;
6868 case 5:
aa47cfdd
PM
6869 {
6870 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6871 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6872 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6873 break;
aa47cfdd 6874 }
9ee6e8bb
PB
6875 default:
6876 abort();
6877 }
7d1b0095 6878 tcg_temp_free_i32(tmp2);
9ee6e8bb 6879 }
dd8fbd78 6880 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6881 }
6882 break;
9ee6e8bb 6883 case 3: /* VQDMLAL scalar */
9ee6e8bb 6884 case 7: /* VQDMLSL scalar */
9ee6e8bb 6885 case 11: /* VQDMULL scalar */
3e3326df 6886 if (u == 1) {
ad69471c 6887 return 1;
3e3326df
PM
6888 }
6889 /* fall through */
6890 case 2: /* VMLAL sclar */
6891 case 6: /* VMLSL scalar */
6892 case 10: /* VMULL scalar */
6893 if (rd & 1) {
6894 return 1;
6895 }
dd8fbd78 6896 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6897 /* We need a copy of tmp2 because gen_neon_mull
6898 * deletes it during pass 0. */
7d1b0095 6899 tmp4 = tcg_temp_new_i32();
c6067f04 6900 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6901 tmp3 = neon_load_reg(rn, 1);
ad69471c 6902
9ee6e8bb 6903 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6904 if (pass == 0) {
6905 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6906 } else {
dd8fbd78 6907 tmp = tmp3;
c6067f04 6908 tmp2 = tmp4;
9ee6e8bb 6909 }
ad69471c 6910 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6911 if (op != 11) {
6912 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6913 }
9ee6e8bb 6914 switch (op) {
4dc064e6
PM
6915 case 6:
6916 gen_neon_negl(cpu_V0, size);
6917 /* Fall through */
6918 case 2:
ad69471c 6919 gen_neon_addl(size);
9ee6e8bb
PB
6920 break;
6921 case 3: case 7:
ad69471c 6922 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6923 if (op == 7) {
6924 gen_neon_negl(cpu_V0, size);
6925 }
ad69471c 6926 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6927 break;
6928 case 10:
6929 /* no-op */
6930 break;
6931 case 11:
ad69471c 6932 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6933 break;
6934 default:
6935 abort();
6936 }
ad69471c 6937 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6938 }
dd8fbd78 6939
dd8fbd78 6940
9ee6e8bb
PB
6941 break;
6942 default: /* 14 and 15 are RESERVED */
6943 return 1;
6944 }
6945 }
6946 } else { /* size == 3 */
6947 if (!u) {
6948 /* Extract. */
9ee6e8bb 6949 imm = (insn >> 8) & 0xf;
ad69471c
PB
6950
6951 if (imm > 7 && !q)
6952 return 1;
6953
52579ea1
PM
6954 if (q && ((rd | rn | rm) & 1)) {
6955 return 1;
6956 }
6957
ad69471c
PB
6958 if (imm == 0) {
6959 neon_load_reg64(cpu_V0, rn);
6960 if (q) {
6961 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6962 }
ad69471c
PB
6963 } else if (imm == 8) {
6964 neon_load_reg64(cpu_V0, rn + 1);
6965 if (q) {
6966 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6967 }
ad69471c 6968 } else if (q) {
a7812ae4 6969 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6970 if (imm < 8) {
6971 neon_load_reg64(cpu_V0, rn);
a7812ae4 6972 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6973 } else {
6974 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6975 neon_load_reg64(tmp64, rm);
ad69471c
PB
6976 }
6977 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6978 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6979 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6980 if (imm < 8) {
6981 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6982 } else {
ad69471c
PB
6983 neon_load_reg64(cpu_V1, rm + 1);
6984 imm -= 8;
9ee6e8bb 6985 }
ad69471c 6986 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6987 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6988 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6989 tcg_temp_free_i64(tmp64);
ad69471c 6990 } else {
a7812ae4 6991 /* BUGFIX */
ad69471c 6992 neon_load_reg64(cpu_V0, rn);
a7812ae4 6993 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6994 neon_load_reg64(cpu_V1, rm);
a7812ae4 6995 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6996 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6997 }
6998 neon_store_reg64(cpu_V0, rd);
6999 if (q) {
7000 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
7001 }
7002 } else if ((insn & (1 << 11)) == 0) {
7003 /* Two register misc. */
7004 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
7005 size = (insn >> 18) & 3;
600b828c
PM
7006 /* UNDEF for unknown op values and bad op-size combinations */
7007 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
7008 return 1;
7009 }
fe8fcf3d
PM
7010 if (neon_2rm_is_v8_op(op) &&
7011 !arm_dc_feature(s, ARM_FEATURE_V8)) {
7012 return 1;
7013 }
fc2a9b37
PM
7014 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
7015 q && ((rm | rd) & 1)) {
7016 return 1;
7017 }
9ee6e8bb 7018 switch (op) {
600b828c 7019 case NEON_2RM_VREV64:
9ee6e8bb 7020 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
7021 tmp = neon_load_reg(rm, pass * 2);
7022 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 7023 switch (size) {
dd8fbd78
FN
7024 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7025 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
7026 case 2: /* no-op */ break;
7027 default: abort();
7028 }
dd8fbd78 7029 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 7030 if (size == 2) {
dd8fbd78 7031 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 7032 } else {
9ee6e8bb 7033 switch (size) {
dd8fbd78
FN
7034 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
7035 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
7036 default: abort();
7037 }
dd8fbd78 7038 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
7039 }
7040 }
7041 break;
600b828c
PM
7042 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
7043 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
7044 for (pass = 0; pass < q + 1; pass++) {
7045 tmp = neon_load_reg(rm, pass * 2);
7046 gen_neon_widen(cpu_V0, tmp, size, op & 1);
7047 tmp = neon_load_reg(rm, pass * 2 + 1);
7048 gen_neon_widen(cpu_V1, tmp, size, op & 1);
7049 switch (size) {
7050 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
7051 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
7052 case 2: tcg_gen_add_i64(CPU_V001); break;
7053 default: abort();
7054 }
600b828c 7055 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 7056 /* Accumulate. */
ad69471c
PB
7057 neon_load_reg64(cpu_V1, rd + pass);
7058 gen_neon_addl(size);
9ee6e8bb 7059 }
ad69471c 7060 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7061 }
7062 break;
600b828c 7063 case NEON_2RM_VTRN:
9ee6e8bb 7064 if (size == 2) {
a5a14945 7065 int n;
9ee6e8bb 7066 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7067 tmp = neon_load_reg(rm, n);
7068 tmp2 = neon_load_reg(rd, n + 1);
7069 neon_store_reg(rm, n, tmp2);
7070 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7071 }
7072 } else {
7073 goto elementwise;
7074 }
7075 break;
600b828c 7076 case NEON_2RM_VUZP:
02acedf9 7077 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7078 return 1;
9ee6e8bb
PB
7079 }
7080 break;
600b828c 7081 case NEON_2RM_VZIP:
d68a6f3a 7082 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7083 return 1;
9ee6e8bb
PB
7084 }
7085 break;
600b828c
PM
7086 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7087 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7088 if (rm & 1) {
7089 return 1;
7090 }
f764718d 7091 tmp2 = NULL;
9ee6e8bb 7092 for (pass = 0; pass < 2; pass++) {
ad69471c 7093 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7094 tmp = tcg_temp_new_i32();
600b828c
PM
7095 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7096 tmp, cpu_V0);
ad69471c
PB
7097 if (pass == 0) {
7098 tmp2 = tmp;
7099 } else {
7100 neon_store_reg(rd, 0, tmp2);
7101 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7102 }
9ee6e8bb
PB
7103 }
7104 break;
600b828c 7105 case NEON_2RM_VSHLL:
fc2a9b37 7106 if (q || (rd & 1)) {
9ee6e8bb 7107 return 1;
600b828c 7108 }
ad69471c
PB
7109 tmp = neon_load_reg(rm, 0);
7110 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7111 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7112 if (pass == 1)
7113 tmp = tmp2;
7114 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7115 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7116 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7117 }
7118 break;
600b828c 7119 case NEON_2RM_VCVT_F16_F32:
d614a513 7120 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7121 q || (rm & 1)) {
7122 return 1;
7123 }
7d1b0095
PM
7124 tmp = tcg_temp_new_i32();
7125 tmp2 = tcg_temp_new_i32();
60011498 7126 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7127 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7128 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7129 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7130 tcg_gen_shli_i32(tmp2, tmp2, 16);
7131 tcg_gen_or_i32(tmp2, tmp2, tmp);
7132 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7133 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7134 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7135 neon_store_reg(rd, 0, tmp2);
7d1b0095 7136 tmp2 = tcg_temp_new_i32();
2d981da7 7137 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7138 tcg_gen_shli_i32(tmp2, tmp2, 16);
7139 tcg_gen_or_i32(tmp2, tmp2, tmp);
7140 neon_store_reg(rd, 1, tmp2);
7d1b0095 7141 tcg_temp_free_i32(tmp);
60011498 7142 break;
600b828c 7143 case NEON_2RM_VCVT_F32_F16:
d614a513 7144 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7145 q || (rd & 1)) {
7146 return 1;
7147 }
7d1b0095 7148 tmp3 = tcg_temp_new_i32();
60011498
PB
7149 tmp = neon_load_reg(rm, 0);
7150 tmp2 = neon_load_reg(rm, 1);
7151 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7152 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7153 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7154 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7155 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7156 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7157 tcg_temp_free_i32(tmp);
60011498 7158 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7159 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7160 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7161 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7162 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7163 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7164 tcg_temp_free_i32(tmp2);
7165 tcg_temp_free_i32(tmp3);
60011498 7166 break;
9d935509 7167 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7168 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7169 || ((rm | rd) & 1)) {
7170 return 1;
7171 }
1a66ac61
RH
7172 ptr1 = vfp_reg_ptr(true, rd);
7173 ptr2 = vfp_reg_ptr(true, rm);
9d935509
AB
7174
7175 /* Bit 6 is the lowest opcode bit; it distinguishes between
7176 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7177 */
7178 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7179
7180 if (op == NEON_2RM_AESE) {
1a66ac61 7181 gen_helper_crypto_aese(ptr1, ptr2, tmp3);
9d935509 7182 } else {
1a66ac61 7183 gen_helper_crypto_aesmc(ptr1, ptr2, tmp3);
9d935509 7184 }
1a66ac61
RH
7185 tcg_temp_free_ptr(ptr1);
7186 tcg_temp_free_ptr(ptr2);
9d935509
AB
7187 tcg_temp_free_i32(tmp3);
7188 break;
f1ecb913 7189 case NEON_2RM_SHA1H:
d614a513 7190 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7191 || ((rm | rd) & 1)) {
7192 return 1;
7193 }
1a66ac61
RH
7194 ptr1 = vfp_reg_ptr(true, rd);
7195 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7196
1a66ac61 7197 gen_helper_crypto_sha1h(ptr1, ptr2);
f1ecb913 7198
1a66ac61
RH
7199 tcg_temp_free_ptr(ptr1);
7200 tcg_temp_free_ptr(ptr2);
f1ecb913
AB
7201 break;
7202 case NEON_2RM_SHA1SU1:
7203 if ((rm | rd) & 1) {
7204 return 1;
7205 }
7206 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7207 if (q) {
d614a513 7208 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7209 return 1;
7210 }
d614a513 7211 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7212 return 1;
7213 }
1a66ac61
RH
7214 ptr1 = vfp_reg_ptr(true, rd);
7215 ptr2 = vfp_reg_ptr(true, rm);
f1ecb913 7216 if (q) {
1a66ac61 7217 gen_helper_crypto_sha256su0(ptr1, ptr2);
f1ecb913 7218 } else {
1a66ac61 7219 gen_helper_crypto_sha1su1(ptr1, ptr2);
f1ecb913 7220 }
1a66ac61
RH
7221 tcg_temp_free_ptr(ptr1);
7222 tcg_temp_free_ptr(ptr2);
f1ecb913 7223 break;
9ee6e8bb
PB
7224 default:
7225 elementwise:
7226 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7227 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7228 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7229 neon_reg_offset(rm, pass));
f764718d 7230 tmp = NULL;
9ee6e8bb 7231 } else {
dd8fbd78 7232 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7233 }
7234 switch (op) {
600b828c 7235 case NEON_2RM_VREV32:
9ee6e8bb 7236 switch (size) {
dd8fbd78
FN
7237 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7238 case 1: gen_swap_half(tmp); break;
600b828c 7239 default: abort();
9ee6e8bb
PB
7240 }
7241 break;
600b828c 7242 case NEON_2RM_VREV16:
dd8fbd78 7243 gen_rev16(tmp);
9ee6e8bb 7244 break;
600b828c 7245 case NEON_2RM_VCLS:
9ee6e8bb 7246 switch (size) {
dd8fbd78
FN
7247 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7248 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7249 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7250 default: abort();
9ee6e8bb
PB
7251 }
7252 break;
600b828c 7253 case NEON_2RM_VCLZ:
9ee6e8bb 7254 switch (size) {
dd8fbd78
FN
7255 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7256 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7257 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7258 default: abort();
9ee6e8bb
PB
7259 }
7260 break;
600b828c 7261 case NEON_2RM_VCNT:
dd8fbd78 7262 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7263 break;
600b828c 7264 case NEON_2RM_VMVN:
dd8fbd78 7265 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7266 break;
600b828c 7267 case NEON_2RM_VQABS:
9ee6e8bb 7268 switch (size) {
02da0b2d
PM
7269 case 0:
7270 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7271 break;
7272 case 1:
7273 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7274 break;
7275 case 2:
7276 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7277 break;
600b828c 7278 default: abort();
9ee6e8bb
PB
7279 }
7280 break;
600b828c 7281 case NEON_2RM_VQNEG:
9ee6e8bb 7282 switch (size) {
02da0b2d
PM
7283 case 0:
7284 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7285 break;
7286 case 1:
7287 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7288 break;
7289 case 2:
7290 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7291 break;
600b828c 7292 default: abort();
9ee6e8bb
PB
7293 }
7294 break;
600b828c 7295 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7296 tmp2 = tcg_const_i32(0);
9ee6e8bb 7297 switch(size) {
dd8fbd78
FN
7298 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7299 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7300 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7301 default: abort();
9ee6e8bb 7302 }
39d5492a 7303 tcg_temp_free_i32(tmp2);
600b828c 7304 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7305 tcg_gen_not_i32(tmp, tmp);
600b828c 7306 }
9ee6e8bb 7307 break;
600b828c 7308 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7309 tmp2 = tcg_const_i32(0);
9ee6e8bb 7310 switch(size) {
dd8fbd78
FN
7311 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7312 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7313 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7314 default: abort();
9ee6e8bb 7315 }
39d5492a 7316 tcg_temp_free_i32(tmp2);
600b828c 7317 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7318 tcg_gen_not_i32(tmp, tmp);
600b828c 7319 }
9ee6e8bb 7320 break;
600b828c 7321 case NEON_2RM_VCEQ0:
dd8fbd78 7322 tmp2 = tcg_const_i32(0);
9ee6e8bb 7323 switch(size) {
dd8fbd78
FN
7324 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7325 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7326 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7327 default: abort();
9ee6e8bb 7328 }
39d5492a 7329 tcg_temp_free_i32(tmp2);
9ee6e8bb 7330 break;
600b828c 7331 case NEON_2RM_VABS:
9ee6e8bb 7332 switch(size) {
dd8fbd78
FN
7333 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7334 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7335 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7336 default: abort();
9ee6e8bb
PB
7337 }
7338 break;
600b828c 7339 case NEON_2RM_VNEG:
dd8fbd78
FN
7340 tmp2 = tcg_const_i32(0);
7341 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7342 tcg_temp_free_i32(tmp2);
9ee6e8bb 7343 break;
600b828c 7344 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7345 {
7346 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7347 tmp2 = tcg_const_i32(0);
aa47cfdd 7348 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7349 tcg_temp_free_i32(tmp2);
aa47cfdd 7350 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7351 break;
aa47cfdd 7352 }
600b828c 7353 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7354 {
7355 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7356 tmp2 = tcg_const_i32(0);
aa47cfdd 7357 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7358 tcg_temp_free_i32(tmp2);
aa47cfdd 7359 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7360 break;
aa47cfdd 7361 }
600b828c 7362 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7363 {
7364 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7365 tmp2 = tcg_const_i32(0);
aa47cfdd 7366 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7367 tcg_temp_free_i32(tmp2);
aa47cfdd 7368 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7369 break;
aa47cfdd 7370 }
600b828c 7371 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7372 {
7373 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7374 tmp2 = tcg_const_i32(0);
aa47cfdd 7375 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7376 tcg_temp_free_i32(tmp2);
aa47cfdd 7377 tcg_temp_free_ptr(fpstatus);
0e326109 7378 break;
aa47cfdd 7379 }
600b828c 7380 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7381 {
7382 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7383 tmp2 = tcg_const_i32(0);
aa47cfdd 7384 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7385 tcg_temp_free_i32(tmp2);
aa47cfdd 7386 tcg_temp_free_ptr(fpstatus);
0e326109 7387 break;
aa47cfdd 7388 }
600b828c 7389 case NEON_2RM_VABS_F:
4373f3ce 7390 gen_vfp_abs(0);
9ee6e8bb 7391 break;
600b828c 7392 case NEON_2RM_VNEG_F:
4373f3ce 7393 gen_vfp_neg(0);
9ee6e8bb 7394 break;
600b828c 7395 case NEON_2RM_VSWP:
dd8fbd78
FN
7396 tmp2 = neon_load_reg(rd, pass);
7397 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7398 break;
600b828c 7399 case NEON_2RM_VTRN:
dd8fbd78 7400 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7401 switch (size) {
dd8fbd78
FN
7402 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7403 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7404 default: abort();
9ee6e8bb 7405 }
dd8fbd78 7406 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7407 break;
34f7b0a2
WN
7408 case NEON_2RM_VRINTN:
7409 case NEON_2RM_VRINTA:
7410 case NEON_2RM_VRINTM:
7411 case NEON_2RM_VRINTP:
7412 case NEON_2RM_VRINTZ:
7413 {
7414 TCGv_i32 tcg_rmode;
7415 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7416 int rmode;
7417
7418 if (op == NEON_2RM_VRINTZ) {
7419 rmode = FPROUNDING_ZERO;
7420 } else {
7421 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7422 }
7423
7424 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7425 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7426 cpu_env);
7427 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7428 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7429 cpu_env);
7430 tcg_temp_free_ptr(fpstatus);
7431 tcg_temp_free_i32(tcg_rmode);
7432 break;
7433 }
2ce70625
WN
7434 case NEON_2RM_VRINTX:
7435 {
7436 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7437 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7438 tcg_temp_free_ptr(fpstatus);
7439 break;
7440 }
901ad525
WN
7441 case NEON_2RM_VCVTAU:
7442 case NEON_2RM_VCVTAS:
7443 case NEON_2RM_VCVTNU:
7444 case NEON_2RM_VCVTNS:
7445 case NEON_2RM_VCVTPU:
7446 case NEON_2RM_VCVTPS:
7447 case NEON_2RM_VCVTMU:
7448 case NEON_2RM_VCVTMS:
7449 {
7450 bool is_signed = !extract32(insn, 7, 1);
7451 TCGv_ptr fpst = get_fpstatus_ptr(1);
7452 TCGv_i32 tcg_rmode, tcg_shift;
7453 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7454
7455 tcg_shift = tcg_const_i32(0);
7456 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7457 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7458 cpu_env);
7459
7460 if (is_signed) {
7461 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7462 tcg_shift, fpst);
7463 } else {
7464 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7465 tcg_shift, fpst);
7466 }
7467
7468 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7469 cpu_env);
7470 tcg_temp_free_i32(tcg_rmode);
7471 tcg_temp_free_i32(tcg_shift);
7472 tcg_temp_free_ptr(fpst);
7473 break;
7474 }
600b828c 7475 case NEON_2RM_VRECPE:
b6d4443a
AB
7476 {
7477 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7478 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7479 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7480 break;
b6d4443a 7481 }
600b828c 7482 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7483 {
7484 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7485 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7486 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7487 break;
c2fb418e 7488 }
600b828c 7489 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7490 {
7491 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7492 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7493 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7494 break;
b6d4443a 7495 }
600b828c 7496 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7497 {
7498 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7499 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7500 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7501 break;
c2fb418e 7502 }
600b828c 7503 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7504 gen_vfp_sito(0, 1);
9ee6e8bb 7505 break;
600b828c 7506 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7507 gen_vfp_uito(0, 1);
9ee6e8bb 7508 break;
600b828c 7509 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7510 gen_vfp_tosiz(0, 1);
9ee6e8bb 7511 break;
600b828c 7512 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7513 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7514 break;
7515 default:
600b828c
PM
7516 /* Reserved op values were caught by the
7517 * neon_2rm_sizes[] check earlier.
7518 */
7519 abort();
9ee6e8bb 7520 }
600b828c 7521 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7522 tcg_gen_st_f32(cpu_F0s, cpu_env,
7523 neon_reg_offset(rd, pass));
9ee6e8bb 7524 } else {
dd8fbd78 7525 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7526 }
7527 }
7528 break;
7529 }
7530 } else if ((insn & (1 << 10)) == 0) {
7531 /* VTBL, VTBX. */
56907d77
PM
7532 int n = ((insn >> 8) & 3) + 1;
7533 if ((rn + n) > 32) {
7534 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7535 * helper function running off the end of the register file.
7536 */
7537 return 1;
7538 }
7539 n <<= 3;
9ee6e8bb 7540 if (insn & (1 << 6)) {
8f8e3aa4 7541 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7542 } else {
7d1b0095 7543 tmp = tcg_temp_new_i32();
8f8e3aa4 7544 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7545 }
8f8e3aa4 7546 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7547 tmp4 = tcg_const_i32(rn);
7548 tmp5 = tcg_const_i32(n);
9ef39277 7549 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7550 tcg_temp_free_i32(tmp);
9ee6e8bb 7551 if (insn & (1 << 6)) {
8f8e3aa4 7552 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7553 } else {
7d1b0095 7554 tmp = tcg_temp_new_i32();
8f8e3aa4 7555 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7556 }
8f8e3aa4 7557 tmp3 = neon_load_reg(rm, 1);
9ef39277 7558 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7559 tcg_temp_free_i32(tmp5);
7560 tcg_temp_free_i32(tmp4);
8f8e3aa4 7561 neon_store_reg(rd, 0, tmp2);
3018f259 7562 neon_store_reg(rd, 1, tmp3);
7d1b0095 7563 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7564 } else if ((insn & 0x380) == 0) {
7565 /* VDUP */
133da6aa
JR
7566 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7567 return 1;
7568 }
9ee6e8bb 7569 if (insn & (1 << 19)) {
dd8fbd78 7570 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7571 } else {
dd8fbd78 7572 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7573 }
7574 if (insn & (1 << 16)) {
dd8fbd78 7575 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7576 } else if (insn & (1 << 17)) {
7577 if ((insn >> 18) & 1)
dd8fbd78 7578 gen_neon_dup_high16(tmp);
9ee6e8bb 7579 else
dd8fbd78 7580 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7581 }
7582 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7583 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7584 tcg_gen_mov_i32(tmp2, tmp);
7585 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7586 }
7d1b0095 7587 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7588 } else {
7589 return 1;
7590 }
7591 }
7592 }
7593 return 0;
7594}
7595
7dcc1f89 7596static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7597{
4b6a83fb
PM
7598 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7599 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7600
7601 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7602
7603 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7604 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7605 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7606 return 1;
7607 }
d614a513 7608 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7609 return disas_iwmmxt_insn(s, insn);
d614a513 7610 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7611 return disas_dsp_insn(s, insn);
c0f4af17
PM
7612 }
7613 return 1;
4b6a83fb
PM
7614 }
7615
7616 /* Otherwise treat as a generic register access */
7617 is64 = (insn & (1 << 25)) == 0;
7618 if (!is64 && ((insn & (1 << 4)) == 0)) {
7619 /* cdp */
7620 return 1;
7621 }
7622
7623 crm = insn & 0xf;
7624 if (is64) {
7625 crn = 0;
7626 opc1 = (insn >> 4) & 0xf;
7627 opc2 = 0;
7628 rt2 = (insn >> 16) & 0xf;
7629 } else {
7630 crn = (insn >> 16) & 0xf;
7631 opc1 = (insn >> 21) & 7;
7632 opc2 = (insn >> 5) & 7;
7633 rt2 = 0;
7634 }
7635 isread = (insn >> 20) & 1;
7636 rt = (insn >> 12) & 0xf;
7637
60322b39 7638 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7639 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7640 if (ri) {
7641 /* Check access permissions */
dcbff19b 7642 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7643 return 1;
7644 }
7645
c0f4af17 7646 if (ri->accessfn ||
d614a513 7647 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7648 /* Emit code to perform further access permissions checks at
7649 * runtime; this may result in an exception.
c0f4af17
PM
7650 * Note that on XScale all cp0..c13 registers do an access check
7651 * call in order to handle c15_cpar.
f59df3f2
PM
7652 */
7653 TCGv_ptr tmpptr;
3f208fd7 7654 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7655 uint32_t syndrome;
7656
7657 /* Note that since we are an implementation which takes an
7658 * exception on a trapped conditional instruction only if the
7659 * instruction passes its condition code check, we can take
7660 * advantage of the clause in the ARM ARM that allows us to set
7661 * the COND field in the instruction to 0xE in all cases.
7662 * We could fish the actual condition out of the insn (ARM)
7663 * or the condexec bits (Thumb) but it isn't necessary.
7664 */
7665 switch (cpnum) {
7666 case 14:
7667 if (is64) {
7668 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7669 isread, false);
8bcbf37c
PM
7670 } else {
7671 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7672 rt, isread, false);
8bcbf37c
PM
7673 }
7674 break;
7675 case 15:
7676 if (is64) {
7677 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7678 isread, false);
8bcbf37c
PM
7679 } else {
7680 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7681 rt, isread, false);
8bcbf37c
PM
7682 }
7683 break;
7684 default:
7685 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7686 * so this can only happen if this is an ARMv7 or earlier CPU,
7687 * in which case the syndrome information won't actually be
7688 * guest visible.
7689 */
d614a513 7690 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7691 syndrome = syn_uncategorized();
7692 break;
7693 }
7694
43bfa4a1 7695 gen_set_condexec(s);
3977ee5d 7696 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7697 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7698 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7699 tcg_isread = tcg_const_i32(isread);
7700 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7701 tcg_isread);
f59df3f2 7702 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7703 tcg_temp_free_i32(tcg_syn);
3f208fd7 7704 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7705 }
7706
4b6a83fb
PM
7707 /* Handle special cases first */
7708 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7709 case ARM_CP_NOP:
7710 return 0;
7711 case ARM_CP_WFI:
7712 if (isread) {
7713 return 1;
7714 }
eaed129d 7715 gen_set_pc_im(s, s->pc);
dcba3a8d 7716 s->base.is_jmp = DISAS_WFI;
2bee5105 7717 return 0;
4b6a83fb
PM
7718 default:
7719 break;
7720 }
7721
c5a49c63 7722 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7723 gen_io_start();
7724 }
7725
4b6a83fb
PM
7726 if (isread) {
7727 /* Read */
7728 if (is64) {
7729 TCGv_i64 tmp64;
7730 TCGv_i32 tmp;
7731 if (ri->type & ARM_CP_CONST) {
7732 tmp64 = tcg_const_i64(ri->resetvalue);
7733 } else if (ri->readfn) {
7734 TCGv_ptr tmpptr;
4b6a83fb
PM
7735 tmp64 = tcg_temp_new_i64();
7736 tmpptr = tcg_const_ptr(ri);
7737 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7738 tcg_temp_free_ptr(tmpptr);
7739 } else {
7740 tmp64 = tcg_temp_new_i64();
7741 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7742 }
7743 tmp = tcg_temp_new_i32();
ecc7b3aa 7744 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7745 store_reg(s, rt, tmp);
7746 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7747 tmp = tcg_temp_new_i32();
ecc7b3aa 7748 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7749 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7750 store_reg(s, rt2, tmp);
7751 } else {
39d5492a 7752 TCGv_i32 tmp;
4b6a83fb
PM
7753 if (ri->type & ARM_CP_CONST) {
7754 tmp = tcg_const_i32(ri->resetvalue);
7755 } else if (ri->readfn) {
7756 TCGv_ptr tmpptr;
4b6a83fb
PM
7757 tmp = tcg_temp_new_i32();
7758 tmpptr = tcg_const_ptr(ri);
7759 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7760 tcg_temp_free_ptr(tmpptr);
7761 } else {
7762 tmp = load_cpu_offset(ri->fieldoffset);
7763 }
7764 if (rt == 15) {
7765 /* Destination register of r15 for 32 bit loads sets
7766 * the condition codes from the high 4 bits of the value
7767 */
7768 gen_set_nzcv(tmp);
7769 tcg_temp_free_i32(tmp);
7770 } else {
7771 store_reg(s, rt, tmp);
7772 }
7773 }
7774 } else {
7775 /* Write */
7776 if (ri->type & ARM_CP_CONST) {
7777 /* If not forbidden by access permissions, treat as WI */
7778 return 0;
7779 }
7780
7781 if (is64) {
39d5492a 7782 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7783 TCGv_i64 tmp64 = tcg_temp_new_i64();
7784 tmplo = load_reg(s, rt);
7785 tmphi = load_reg(s, rt2);
7786 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7787 tcg_temp_free_i32(tmplo);
7788 tcg_temp_free_i32(tmphi);
7789 if (ri->writefn) {
7790 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7791 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7792 tcg_temp_free_ptr(tmpptr);
7793 } else {
7794 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7795 }
7796 tcg_temp_free_i64(tmp64);
7797 } else {
7798 if (ri->writefn) {
39d5492a 7799 TCGv_i32 tmp;
4b6a83fb 7800 TCGv_ptr tmpptr;
4b6a83fb
PM
7801 tmp = load_reg(s, rt);
7802 tmpptr = tcg_const_ptr(ri);
7803 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7804 tcg_temp_free_ptr(tmpptr);
7805 tcg_temp_free_i32(tmp);
7806 } else {
39d5492a 7807 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7808 store_cpu_offset(tmp, ri->fieldoffset);
7809 }
7810 }
2452731c
PM
7811 }
7812
c5a49c63 7813 if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7814 /* I/O operations must end the TB here (whether read or write) */
7815 gen_io_end();
7816 gen_lookup_tb(s);
7817 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7818 /* We default to ending the TB on a coprocessor register write,
7819 * but allow this to be suppressed by the register definition
7820 * (usually only necessary to work around guest bugs).
7821 */
2452731c 7822 gen_lookup_tb(s);
4b6a83fb 7823 }
2452731c 7824
4b6a83fb
PM
7825 return 0;
7826 }
7827
626187d8
PM
7828 /* Unknown register; this might be a guest error or a QEMU
7829 * unimplemented feature.
7830 */
7831 if (is64) {
7832 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7833 "64 bit system register cp:%d opc1: %d crm:%d "
7834 "(%s)\n",
7835 isread ? "read" : "write", cpnum, opc1, crm,
7836 s->ns ? "non-secure" : "secure");
626187d8
PM
7837 } else {
7838 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7839 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7840 "(%s)\n",
7841 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7842 s->ns ? "non-secure" : "secure");
626187d8
PM
7843 }
7844
4a9a539f 7845 return 1;
9ee6e8bb
PB
7846}
7847
5e3f878a
PB
7848
7849/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7850static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7851{
39d5492a 7852 TCGv_i32 tmp;
7d1b0095 7853 tmp = tcg_temp_new_i32();
ecc7b3aa 7854 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7855 store_reg(s, rlow, tmp);
7d1b0095 7856 tmp = tcg_temp_new_i32();
5e3f878a 7857 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7858 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7859 store_reg(s, rhigh, tmp);
7860}
7861
7862/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7863static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7864{
a7812ae4 7865 TCGv_i64 tmp;
39d5492a 7866 TCGv_i32 tmp2;
5e3f878a 7867
36aa55dc 7868 /* Load value and extend to 64 bits. */
a7812ae4 7869 tmp = tcg_temp_new_i64();
5e3f878a
PB
7870 tmp2 = load_reg(s, rlow);
7871 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7872 tcg_temp_free_i32(tmp2);
5e3f878a 7873 tcg_gen_add_i64(val, val, tmp);
b75263d6 7874 tcg_temp_free_i64(tmp);
5e3f878a
PB
7875}
7876
7877/* load and add a 64-bit value from a register pair. */
a7812ae4 7878static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7879{
a7812ae4 7880 TCGv_i64 tmp;
39d5492a
PM
7881 TCGv_i32 tmpl;
7882 TCGv_i32 tmph;
5e3f878a
PB
7883
7884 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7885 tmpl = load_reg(s, rlow);
7886 tmph = load_reg(s, rhigh);
a7812ae4 7887 tmp = tcg_temp_new_i64();
36aa55dc 7888 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7889 tcg_temp_free_i32(tmpl);
7890 tcg_temp_free_i32(tmph);
5e3f878a 7891 tcg_gen_add_i64(val, val, tmp);
b75263d6 7892 tcg_temp_free_i64(tmp);
5e3f878a
PB
7893}
7894
c9f10124 7895/* Set N and Z flags from hi|lo. */
39d5492a 7896static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7897{
c9f10124
RH
7898 tcg_gen_mov_i32(cpu_NF, hi);
7899 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7900}
7901
426f5abc
PB
7902/* Load/Store exclusive instructions are implemented by remembering
7903 the value/address loaded, and seeing if these are the same
354161b3 7904 when the store is performed. This should be sufficient to implement
426f5abc 7905 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7906 regular stores. The compare vs the remembered value is done during
7907 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7908static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7909 TCGv_i32 addr, int size)
426f5abc 7910{
94ee24e7 7911 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7912 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7913
50225ad0
PM
7914 s->is_ldex = true;
7915
426f5abc 7916 if (size == 3) {
39d5492a 7917 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7918 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7919
3448d47b
PM
7920 /* For AArch32, architecturally the 32-bit word at the lowest
7921 * address is always Rt and the one at addr+4 is Rt2, even if
7922 * the CPU is big-endian. That means we don't want to do a
7923 * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
7924 * for an architecturally 64-bit access, but instead do a
7925 * 64-bit access using MO_BE if appropriate and then split
7926 * the two halves.
7927 * This only makes a difference for BE32 user-mode, where
7928 * frob64() must not flip the two halves of the 64-bit data
7929 * but this code must treat BE32 user-mode like BE32 system.
7930 */
7931 TCGv taddr = gen_aa32_addr(s, addr, opc);
7932
7933 tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
7934 tcg_temp_free(taddr);
354161b3 7935 tcg_gen_mov_i64(cpu_exclusive_val, t64);
3448d47b
PM
7936 if (s->be_data == MO_BE) {
7937 tcg_gen_extr_i64_i32(tmp2, tmp, t64);
7938 } else {
7939 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7940 }
354161b3
EC
7941 tcg_temp_free_i64(t64);
7942
7943 store_reg(s, rt2, tmp2);
03d05e2d 7944 } else {
354161b3 7945 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7946 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7947 }
03d05e2d
PM
7948
7949 store_reg(s, rt, tmp);
7950 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7951}
7952
7953static void gen_clrex(DisasContext *s)
7954{
03d05e2d 7955 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7956}
7957
426f5abc 7958static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7959 TCGv_i32 addr, int size)
426f5abc 7960{
354161b3
EC
7961 TCGv_i32 t0, t1, t2;
7962 TCGv_i64 extaddr;
7963 TCGv taddr;
42a268c2
RH
7964 TCGLabel *done_label;
7965 TCGLabel *fail_label;
354161b3 7966 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7967
7968 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7969 [addr] = {Rt};
7970 {Rd} = 0;
7971 } else {
7972 {Rd} = 1;
7973 } */
7974 fail_label = gen_new_label();
7975 done_label = gen_new_label();
03d05e2d
PM
7976 extaddr = tcg_temp_new_i64();
7977 tcg_gen_extu_i32_i64(extaddr, addr);
7978 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7979 tcg_temp_free_i64(extaddr);
7980
354161b3
EC
7981 taddr = gen_aa32_addr(s, addr, opc);
7982 t0 = tcg_temp_new_i32();
7983 t1 = load_reg(s, rt);
426f5abc 7984 if (size == 3) {
354161b3
EC
7985 TCGv_i64 o64 = tcg_temp_new_i64();
7986 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7987
354161b3 7988 t2 = load_reg(s, rt2);
3448d47b
PM
7989 /* For AArch32, architecturally the 32-bit word at the lowest
7990 * address is always Rt and the one at addr+4 is Rt2, even if
7991 * the CPU is big-endian. Since we're going to treat this as a
7992 * single 64-bit BE store, we need to put the two halves in the
7993 * opposite order for BE to LE, so that they end up in the right
7994 * places.
7995 * We don't want gen_aa32_frob64() because that does the wrong
7996 * thing for BE32 usermode.
7997 */
7998 if (s->be_data == MO_BE) {
7999 tcg_gen_concat_i32_i64(n64, t2, t1);
8000 } else {
8001 tcg_gen_concat_i32_i64(n64, t1, t2);
8002 }
354161b3 8003 tcg_temp_free_i32(t2);
03d05e2d 8004
354161b3
EC
8005 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
8006 get_mem_index(s), opc);
8007 tcg_temp_free_i64(n64);
8008
354161b3
EC
8009 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
8010 tcg_gen_extrl_i64_i32(t0, o64);
8011
8012 tcg_temp_free_i64(o64);
8013 } else {
8014 t2 = tcg_temp_new_i32();
8015 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
8016 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
8017 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
8018 tcg_temp_free_i32(t2);
426f5abc 8019 }
354161b3
EC
8020 tcg_temp_free_i32(t1);
8021 tcg_temp_free(taddr);
8022 tcg_gen_mov_i32(cpu_R[rd], t0);
8023 tcg_temp_free_i32(t0);
426f5abc 8024 tcg_gen_br(done_label);
354161b3 8025
426f5abc
PB
8026 gen_set_label(fail_label);
8027 tcg_gen_movi_i32(cpu_R[rd], 1);
8028 gen_set_label(done_label);
03d05e2d 8029 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 8030}
426f5abc 8031
81465888
PM
8032/* gen_srs:
8033 * @env: CPUARMState
8034 * @s: DisasContext
8035 * @mode: mode field from insn (which stack to store to)
8036 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
8037 * @writeback: true if writeback bit set
8038 *
8039 * Generate code for the SRS (Store Return State) insn.
8040 */
8041static void gen_srs(DisasContext *s,
8042 uint32_t mode, uint32_t amode, bool writeback)
8043{
8044 int32_t offset;
cbc0326b
PM
8045 TCGv_i32 addr, tmp;
8046 bool undef = false;
8047
8048 /* SRS is:
8049 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 8050 * and specified mode is monitor mode
cbc0326b
PM
8051 * - UNDEFINED in Hyp mode
8052 * - UNPREDICTABLE in User or System mode
8053 * - UNPREDICTABLE if the specified mode is:
8054 * -- not implemented
8055 * -- not a valid mode number
8056 * -- a mode that's at a higher exception level
8057 * -- Monitor, if we are Non-secure
f01377f5 8058 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 8059 */
ba63cf47 8060 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
8061 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
8062 return;
8063 }
8064
8065 if (s->current_el == 0 || s->current_el == 2) {
8066 undef = true;
8067 }
8068
8069 switch (mode) {
8070 case ARM_CPU_MODE_USR:
8071 case ARM_CPU_MODE_FIQ:
8072 case ARM_CPU_MODE_IRQ:
8073 case ARM_CPU_MODE_SVC:
8074 case ARM_CPU_MODE_ABT:
8075 case ARM_CPU_MODE_UND:
8076 case ARM_CPU_MODE_SYS:
8077 break;
8078 case ARM_CPU_MODE_HYP:
8079 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
8080 undef = true;
8081 }
8082 break;
8083 case ARM_CPU_MODE_MON:
8084 /* No need to check specifically for "are we non-secure" because
8085 * we've already made EL0 UNDEF and handled the trap for S-EL1;
8086 * so if this isn't EL3 then we must be non-secure.
8087 */
8088 if (s->current_el != 3) {
8089 undef = true;
8090 }
8091 break;
8092 default:
8093 undef = true;
8094 }
8095
8096 if (undef) {
8097 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8098 default_exception_el(s));
8099 return;
8100 }
8101
8102 addr = tcg_temp_new_i32();
8103 tmp = tcg_const_i32(mode);
f01377f5
PM
8104 /* get_r13_banked() will raise an exception if called from System mode */
8105 gen_set_condexec(s);
8106 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8107 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8108 tcg_temp_free_i32(tmp);
8109 switch (amode) {
8110 case 0: /* DA */
8111 offset = -4;
8112 break;
8113 case 1: /* IA */
8114 offset = 0;
8115 break;
8116 case 2: /* DB */
8117 offset = -8;
8118 break;
8119 case 3: /* IB */
8120 offset = 4;
8121 break;
8122 default:
8123 abort();
8124 }
8125 tcg_gen_addi_i32(addr, addr, offset);
8126 tmp = load_reg(s, 14);
12dcc321 8127 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8128 tcg_temp_free_i32(tmp);
81465888
PM
8129 tmp = load_cpu_field(spsr);
8130 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8131 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8132 tcg_temp_free_i32(tmp);
81465888
PM
8133 if (writeback) {
8134 switch (amode) {
8135 case 0:
8136 offset = -8;
8137 break;
8138 case 1:
8139 offset = 4;
8140 break;
8141 case 2:
8142 offset = -4;
8143 break;
8144 case 3:
8145 offset = 0;
8146 break;
8147 default:
8148 abort();
8149 }
8150 tcg_gen_addi_i32(addr, addr, offset);
8151 tmp = tcg_const_i32(mode);
8152 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8153 tcg_temp_free_i32(tmp);
8154 }
8155 tcg_temp_free_i32(addr);
dcba3a8d 8156 s->base.is_jmp = DISAS_UPDATE;
81465888
PM
8157}
8158
f4df2210 8159static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8160{
f4df2210 8161 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8162 TCGv_i32 tmp;
8163 TCGv_i32 tmp2;
8164 TCGv_i32 tmp3;
8165 TCGv_i32 addr;
a7812ae4 8166 TCGv_i64 tmp64;
9ee6e8bb 8167
e13886e3
PM
8168 /* M variants do not implement ARM mode; this must raise the INVSTATE
8169 * UsageFault exception.
8170 */
b53d8923 8171 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8172 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8173 default_exception_el(s));
8174 return;
b53d8923 8175 }
9ee6e8bb
PB
8176 cond = insn >> 28;
8177 if (cond == 0xf){
be5e7a76
DES
8178 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8179 * choose to UNDEF. In ARMv5 and above the space is used
8180 * for miscellaneous unconditional instructions.
8181 */
8182 ARCH(5);
8183
9ee6e8bb
PB
8184 /* Unconditional instructions. */
8185 if (((insn >> 25) & 7) == 1) {
8186 /* NEON Data processing. */
d614a513 8187 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8188 goto illegal_op;
d614a513 8189 }
9ee6e8bb 8190
7dcc1f89 8191 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8192 goto illegal_op;
7dcc1f89 8193 }
9ee6e8bb
PB
8194 return;
8195 }
8196 if ((insn & 0x0f100000) == 0x04000000) {
8197 /* NEON load/store. */
d614a513 8198 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8199 goto illegal_op;
d614a513 8200 }
9ee6e8bb 8201
7dcc1f89 8202 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8203 goto illegal_op;
7dcc1f89 8204 }
9ee6e8bb
PB
8205 return;
8206 }
6a57f3eb
WN
8207 if ((insn & 0x0f000e10) == 0x0e000a00) {
8208 /* VFP. */
7dcc1f89 8209 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8210 goto illegal_op;
8211 }
8212 return;
8213 }
3d185e5d
PM
8214 if (((insn & 0x0f30f000) == 0x0510f000) ||
8215 ((insn & 0x0f30f010) == 0x0710f000)) {
8216 if ((insn & (1 << 22)) == 0) {
8217 /* PLDW; v7MP */
d614a513 8218 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8219 goto illegal_op;
8220 }
8221 }
8222 /* Otherwise PLD; v5TE+ */
be5e7a76 8223 ARCH(5TE);
3d185e5d
PM
8224 return;
8225 }
8226 if (((insn & 0x0f70f000) == 0x0450f000) ||
8227 ((insn & 0x0f70f010) == 0x0650f000)) {
8228 ARCH(7);
8229 return; /* PLI; V7 */
8230 }
8231 if (((insn & 0x0f700000) == 0x04100000) ||
8232 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8233 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8234 goto illegal_op;
8235 }
8236 return; /* v7MP: Unallocated memory hint: must NOP */
8237 }
8238
8239 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8240 ARCH(6);
8241 /* setend */
9886ecdf
PB
8242 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8243 gen_helper_setend(cpu_env);
dcba3a8d 8244 s->base.is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8245 }
8246 return;
8247 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8248 switch ((insn >> 4) & 0xf) {
8249 case 1: /* clrex */
8250 ARCH(6K);
426f5abc 8251 gen_clrex(s);
9ee6e8bb
PB
8252 return;
8253 case 4: /* dsb */
8254 case 5: /* dmb */
9ee6e8bb 8255 ARCH(7);
61e4c432 8256 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8257 return;
6df99dec
SS
8258 case 6: /* isb */
8259 /* We need to break the TB after this insn to execute
8260 * self-modifying code correctly and also to take
8261 * any pending interrupts immediately.
8262 */
0b609cc1 8263 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 8264 return;
9ee6e8bb
PB
8265 default:
8266 goto illegal_op;
8267 }
8268 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8269 /* srs */
81465888
PM
8270 ARCH(6);
8271 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8272 return;
ea825eee 8273 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8274 /* rfe */
c67b6b71 8275 int32_t offset;
9ee6e8bb
PB
8276 if (IS_USER(s))
8277 goto illegal_op;
8278 ARCH(6);
8279 rn = (insn >> 16) & 0xf;
b0109805 8280 addr = load_reg(s, rn);
9ee6e8bb
PB
8281 i = (insn >> 23) & 3;
8282 switch (i) {
b0109805 8283 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8284 case 1: offset = 0; break; /* IA */
8285 case 2: offset = -8; break; /* DB */
b0109805 8286 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8287 default: abort();
8288 }
8289 if (offset)
b0109805
PB
8290 tcg_gen_addi_i32(addr, addr, offset);
8291 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8292 tmp = tcg_temp_new_i32();
12dcc321 8293 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8294 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8295 tmp2 = tcg_temp_new_i32();
12dcc321 8296 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8297 if (insn & (1 << 21)) {
8298 /* Base writeback. */
8299 switch (i) {
b0109805 8300 case 0: offset = -8; break;
c67b6b71
FN
8301 case 1: offset = 4; break;
8302 case 2: offset = -4; break;
b0109805 8303 case 3: offset = 0; break;
9ee6e8bb
PB
8304 default: abort();
8305 }
8306 if (offset)
b0109805
PB
8307 tcg_gen_addi_i32(addr, addr, offset);
8308 store_reg(s, rn, addr);
8309 } else {
7d1b0095 8310 tcg_temp_free_i32(addr);
9ee6e8bb 8311 }
b0109805 8312 gen_rfe(s, tmp, tmp2);
c67b6b71 8313 return;
9ee6e8bb
PB
8314 } else if ((insn & 0x0e000000) == 0x0a000000) {
8315 /* branch link and change to thumb (blx <offset>) */
8316 int32_t offset;
8317
8318 val = (uint32_t)s->pc;
7d1b0095 8319 tmp = tcg_temp_new_i32();
d9ba4830
PB
8320 tcg_gen_movi_i32(tmp, val);
8321 store_reg(s, 14, tmp);
9ee6e8bb
PB
8322 /* Sign-extend the 24-bit offset */
8323 offset = (((int32_t)insn) << 8) >> 8;
8324 /* offset * 4 + bit24 * 2 + (thumb bit) */
8325 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8326 /* pipeline offset */
8327 val += 4;
be5e7a76 8328 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8329 gen_bx_im(s, val);
9ee6e8bb
PB
8330 return;
8331 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8332 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8333 /* iWMMXt register transfer. */
c0f4af17 8334 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8335 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8336 return;
c0f4af17
PM
8337 }
8338 }
9ee6e8bb
PB
8339 }
8340 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8341 /* Coprocessor double register transfer. */
be5e7a76 8342 ARCH(5TE);
9ee6e8bb
PB
8343 } else if ((insn & 0x0f000010) == 0x0e000010) {
8344 /* Additional coprocessor register transfer. */
7997d92f 8345 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8346 uint32_t mask;
8347 uint32_t val;
8348 /* cps (privileged) */
8349 if (IS_USER(s))
8350 return;
8351 mask = val = 0;
8352 if (insn & (1 << 19)) {
8353 if (insn & (1 << 8))
8354 mask |= CPSR_A;
8355 if (insn & (1 << 7))
8356 mask |= CPSR_I;
8357 if (insn & (1 << 6))
8358 mask |= CPSR_F;
8359 if (insn & (1 << 18))
8360 val |= mask;
8361 }
7997d92f 8362 if (insn & (1 << 17)) {
9ee6e8bb
PB
8363 mask |= CPSR_M;
8364 val |= (insn & 0x1f);
8365 }
8366 if (mask) {
2fbac54b 8367 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8368 }
8369 return;
8370 }
8371 goto illegal_op;
8372 }
8373 if (cond != 0xe) {
8374 /* if not always execute, we generate a conditional jump to
8375 next instruction */
8376 s->condlabel = gen_new_label();
39fb730a 8377 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8378 s->condjmp = 1;
8379 }
8380 if ((insn & 0x0f900000) == 0x03000000) {
8381 if ((insn & (1 << 21)) == 0) {
8382 ARCH(6T2);
8383 rd = (insn >> 12) & 0xf;
8384 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8385 if ((insn & (1 << 22)) == 0) {
8386 /* MOVW */
7d1b0095 8387 tmp = tcg_temp_new_i32();
5e3f878a 8388 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8389 } else {
8390 /* MOVT */
5e3f878a 8391 tmp = load_reg(s, rd);
86831435 8392 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8393 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8394 }
5e3f878a 8395 store_reg(s, rd, tmp);
9ee6e8bb
PB
8396 } else {
8397 if (((insn >> 12) & 0xf) != 0xf)
8398 goto illegal_op;
8399 if (((insn >> 16) & 0xf) == 0) {
8400 gen_nop_hint(s, insn & 0xff);
8401 } else {
8402 /* CPSR = immediate */
8403 val = insn & 0xff;
8404 shift = ((insn >> 8) & 0xf) * 2;
8405 if (shift)
8406 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8407 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8408 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8409 i, val)) {
9ee6e8bb 8410 goto illegal_op;
7dcc1f89 8411 }
9ee6e8bb
PB
8412 }
8413 }
8414 } else if ((insn & 0x0f900000) == 0x01000000
8415 && (insn & 0x00000090) != 0x00000090) {
8416 /* miscellaneous instructions */
8417 op1 = (insn >> 21) & 3;
8418 sh = (insn >> 4) & 0xf;
8419 rm = insn & 0xf;
8420 switch (sh) {
8bfd0550
PM
8421 case 0x0: /* MSR, MRS */
8422 if (insn & (1 << 9)) {
8423 /* MSR (banked) and MRS (banked) */
8424 int sysm = extract32(insn, 16, 4) |
8425 (extract32(insn, 8, 1) << 4);
8426 int r = extract32(insn, 22, 1);
8427
8428 if (op1 & 1) {
8429 /* MSR (banked) */
8430 gen_msr_banked(s, r, sysm, rm);
8431 } else {
8432 /* MRS (banked) */
8433 int rd = extract32(insn, 12, 4);
8434
8435 gen_mrs_banked(s, r, sysm, rd);
8436 }
8437 break;
8438 }
8439
8440 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8441 if (op1 & 1) {
8442 /* PSR = reg */
2fbac54b 8443 tmp = load_reg(s, rm);
9ee6e8bb 8444 i = ((op1 & 2) != 0);
7dcc1f89 8445 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8446 goto illegal_op;
8447 } else {
8448 /* reg = PSR */
8449 rd = (insn >> 12) & 0xf;
8450 if (op1 & 2) {
8451 if (IS_USER(s))
8452 goto illegal_op;
d9ba4830 8453 tmp = load_cpu_field(spsr);
9ee6e8bb 8454 } else {
7d1b0095 8455 tmp = tcg_temp_new_i32();
9ef39277 8456 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8457 }
d9ba4830 8458 store_reg(s, rd, tmp);
9ee6e8bb
PB
8459 }
8460 break;
8461 case 0x1:
8462 if (op1 == 1) {
8463 /* branch/exchange thumb (bx). */
be5e7a76 8464 ARCH(4T);
d9ba4830
PB
8465 tmp = load_reg(s, rm);
8466 gen_bx(s, tmp);
9ee6e8bb
PB
8467 } else if (op1 == 3) {
8468 /* clz */
be5e7a76 8469 ARCH(5);
9ee6e8bb 8470 rd = (insn >> 12) & 0xf;
1497c961 8471 tmp = load_reg(s, rm);
7539a012 8472 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8473 store_reg(s, rd, tmp);
9ee6e8bb
PB
8474 } else {
8475 goto illegal_op;
8476 }
8477 break;
8478 case 0x2:
8479 if (op1 == 1) {
8480 ARCH(5J); /* bxj */
8481 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8482 tmp = load_reg(s, rm);
8483 gen_bx(s, tmp);
9ee6e8bb
PB
8484 } else {
8485 goto illegal_op;
8486 }
8487 break;
8488 case 0x3:
8489 if (op1 != 1)
8490 goto illegal_op;
8491
be5e7a76 8492 ARCH(5);
9ee6e8bb 8493 /* branch link/exchange thumb (blx) */
d9ba4830 8494 tmp = load_reg(s, rm);
7d1b0095 8495 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8496 tcg_gen_movi_i32(tmp2, s->pc);
8497 store_reg(s, 14, tmp2);
8498 gen_bx(s, tmp);
9ee6e8bb 8499 break;
eb0ecd5a
WN
8500 case 0x4:
8501 {
8502 /* crc32/crc32c */
8503 uint32_t c = extract32(insn, 8, 4);
8504
8505 /* Check this CPU supports ARMv8 CRC instructions.
8506 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8507 * Bits 8, 10 and 11 should be zero.
8508 */
d614a513 8509 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8510 (c & 0xd) != 0) {
8511 goto illegal_op;
8512 }
8513
8514 rn = extract32(insn, 16, 4);
8515 rd = extract32(insn, 12, 4);
8516
8517 tmp = load_reg(s, rn);
8518 tmp2 = load_reg(s, rm);
aa633469
PM
8519 if (op1 == 0) {
8520 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8521 } else if (op1 == 1) {
8522 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8523 }
eb0ecd5a
WN
8524 tmp3 = tcg_const_i32(1 << op1);
8525 if (c & 0x2) {
8526 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8527 } else {
8528 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8529 }
8530 tcg_temp_free_i32(tmp2);
8531 tcg_temp_free_i32(tmp3);
8532 store_reg(s, rd, tmp);
8533 break;
8534 }
9ee6e8bb 8535 case 0x5: /* saturating add/subtract */
be5e7a76 8536 ARCH(5TE);
9ee6e8bb
PB
8537 rd = (insn >> 12) & 0xf;
8538 rn = (insn >> 16) & 0xf;
b40d0353 8539 tmp = load_reg(s, rm);
5e3f878a 8540 tmp2 = load_reg(s, rn);
9ee6e8bb 8541 if (op1 & 2)
9ef39277 8542 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8543 if (op1 & 1)
9ef39277 8544 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8545 else
9ef39277 8546 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8547 tcg_temp_free_i32(tmp2);
5e3f878a 8548 store_reg(s, rd, tmp);
9ee6e8bb 8549 break;
49e14940 8550 case 7:
d4a2dc67
PM
8551 {
8552 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8553 switch (op1) {
19a6e31c
PM
8554 case 0:
8555 /* HLT */
8556 gen_hlt(s, imm16);
8557 break;
37e6456e
PM
8558 case 1:
8559 /* bkpt */
8560 ARCH(5);
8561 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8562 syn_aa32_bkpt(imm16, false),
8563 default_exception_el(s));
37e6456e
PM
8564 break;
8565 case 2:
8566 /* Hypervisor call (v7) */
8567 ARCH(7);
8568 if (IS_USER(s)) {
8569 goto illegal_op;
8570 }
8571 gen_hvc(s, imm16);
8572 break;
8573 case 3:
8574 /* Secure monitor call (v6+) */
8575 ARCH(6K);
8576 if (IS_USER(s)) {
8577 goto illegal_op;
8578 }
8579 gen_smc(s);
8580 break;
8581 default:
19a6e31c 8582 g_assert_not_reached();
49e14940 8583 }
9ee6e8bb 8584 break;
d4a2dc67 8585 }
9ee6e8bb
PB
8586 case 0x8: /* signed multiply */
8587 case 0xa:
8588 case 0xc:
8589 case 0xe:
be5e7a76 8590 ARCH(5TE);
9ee6e8bb
PB
8591 rs = (insn >> 8) & 0xf;
8592 rn = (insn >> 12) & 0xf;
8593 rd = (insn >> 16) & 0xf;
8594 if (op1 == 1) {
8595 /* (32 * 16) >> 16 */
5e3f878a
PB
8596 tmp = load_reg(s, rm);
8597 tmp2 = load_reg(s, rs);
9ee6e8bb 8598 if (sh & 4)
5e3f878a 8599 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8600 else
5e3f878a 8601 gen_sxth(tmp2);
a7812ae4
PB
8602 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8603 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8604 tmp = tcg_temp_new_i32();
ecc7b3aa 8605 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8606 tcg_temp_free_i64(tmp64);
9ee6e8bb 8607 if ((sh & 2) == 0) {
5e3f878a 8608 tmp2 = load_reg(s, rn);
9ef39277 8609 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8610 tcg_temp_free_i32(tmp2);
9ee6e8bb 8611 }
5e3f878a 8612 store_reg(s, rd, tmp);
9ee6e8bb
PB
8613 } else {
8614 /* 16 * 16 */
5e3f878a
PB
8615 tmp = load_reg(s, rm);
8616 tmp2 = load_reg(s, rs);
8617 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8618 tcg_temp_free_i32(tmp2);
9ee6e8bb 8619 if (op1 == 2) {
a7812ae4
PB
8620 tmp64 = tcg_temp_new_i64();
8621 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8622 tcg_temp_free_i32(tmp);
a7812ae4
PB
8623 gen_addq(s, tmp64, rn, rd);
8624 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8625 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8626 } else {
8627 if (op1 == 0) {
5e3f878a 8628 tmp2 = load_reg(s, rn);
9ef39277 8629 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8630 tcg_temp_free_i32(tmp2);
9ee6e8bb 8631 }
5e3f878a 8632 store_reg(s, rd, tmp);
9ee6e8bb
PB
8633 }
8634 }
8635 break;
8636 default:
8637 goto illegal_op;
8638 }
8639 } else if (((insn & 0x0e000000) == 0 &&
8640 (insn & 0x00000090) != 0x90) ||
8641 ((insn & 0x0e000000) == (1 << 25))) {
8642 int set_cc, logic_cc, shiftop;
8643
8644 op1 = (insn >> 21) & 0xf;
8645 set_cc = (insn >> 20) & 1;
8646 logic_cc = table_logic_cc[op1] & set_cc;
8647
8648 /* data processing instruction */
8649 if (insn & (1 << 25)) {
8650 /* immediate operand */
8651 val = insn & 0xff;
8652 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8653 if (shift) {
9ee6e8bb 8654 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8655 }
7d1b0095 8656 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8657 tcg_gen_movi_i32(tmp2, val);
8658 if (logic_cc && shift) {
8659 gen_set_CF_bit31(tmp2);
8660 }
9ee6e8bb
PB
8661 } else {
8662 /* register */
8663 rm = (insn) & 0xf;
e9bb4aa9 8664 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8665 shiftop = (insn >> 5) & 3;
8666 if (!(insn & (1 << 4))) {
8667 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8668 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8669 } else {
8670 rs = (insn >> 8) & 0xf;
8984bd2e 8671 tmp = load_reg(s, rs);
e9bb4aa9 8672 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8673 }
8674 }
8675 if (op1 != 0x0f && op1 != 0x0d) {
8676 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8677 tmp = load_reg(s, rn);
8678 } else {
f764718d 8679 tmp = NULL;
9ee6e8bb
PB
8680 }
8681 rd = (insn >> 12) & 0xf;
8682 switch(op1) {
8683 case 0x00:
e9bb4aa9
JR
8684 tcg_gen_and_i32(tmp, tmp, tmp2);
8685 if (logic_cc) {
8686 gen_logic_CC(tmp);
8687 }
7dcc1f89 8688 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8689 break;
8690 case 0x01:
e9bb4aa9
JR
8691 tcg_gen_xor_i32(tmp, tmp, tmp2);
8692 if (logic_cc) {
8693 gen_logic_CC(tmp);
8694 }
7dcc1f89 8695 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8696 break;
8697 case 0x02:
8698 if (set_cc && rd == 15) {
8699 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8700 if (IS_USER(s)) {
9ee6e8bb 8701 goto illegal_op;
e9bb4aa9 8702 }
72485ec4 8703 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8704 gen_exception_return(s, tmp);
9ee6e8bb 8705 } else {
e9bb4aa9 8706 if (set_cc) {
72485ec4 8707 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8708 } else {
8709 tcg_gen_sub_i32(tmp, tmp, tmp2);
8710 }
7dcc1f89 8711 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8712 }
8713 break;
8714 case 0x03:
e9bb4aa9 8715 if (set_cc) {
72485ec4 8716 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8717 } else {
8718 tcg_gen_sub_i32(tmp, tmp2, tmp);
8719 }
7dcc1f89 8720 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8721 break;
8722 case 0x04:
e9bb4aa9 8723 if (set_cc) {
72485ec4 8724 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8725 } else {
8726 tcg_gen_add_i32(tmp, tmp, tmp2);
8727 }
7dcc1f89 8728 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8729 break;
8730 case 0x05:
e9bb4aa9 8731 if (set_cc) {
49b4c31e 8732 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8733 } else {
8734 gen_add_carry(tmp, tmp, tmp2);
8735 }
7dcc1f89 8736 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8737 break;
8738 case 0x06:
e9bb4aa9 8739 if (set_cc) {
2de68a49 8740 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8741 } else {
8742 gen_sub_carry(tmp, tmp, tmp2);
8743 }
7dcc1f89 8744 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8745 break;
8746 case 0x07:
e9bb4aa9 8747 if (set_cc) {
2de68a49 8748 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8749 } else {
8750 gen_sub_carry(tmp, tmp2, tmp);
8751 }
7dcc1f89 8752 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8753 break;
8754 case 0x08:
8755 if (set_cc) {
e9bb4aa9
JR
8756 tcg_gen_and_i32(tmp, tmp, tmp2);
8757 gen_logic_CC(tmp);
9ee6e8bb 8758 }
7d1b0095 8759 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8760 break;
8761 case 0x09:
8762 if (set_cc) {
e9bb4aa9
JR
8763 tcg_gen_xor_i32(tmp, tmp, tmp2);
8764 gen_logic_CC(tmp);
9ee6e8bb 8765 }
7d1b0095 8766 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8767 break;
8768 case 0x0a:
8769 if (set_cc) {
72485ec4 8770 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8771 }
7d1b0095 8772 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8773 break;
8774 case 0x0b:
8775 if (set_cc) {
72485ec4 8776 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8777 }
7d1b0095 8778 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8779 break;
8780 case 0x0c:
e9bb4aa9
JR
8781 tcg_gen_or_i32(tmp, tmp, tmp2);
8782 if (logic_cc) {
8783 gen_logic_CC(tmp);
8784 }
7dcc1f89 8785 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8786 break;
8787 case 0x0d:
8788 if (logic_cc && rd == 15) {
8789 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8790 if (IS_USER(s)) {
9ee6e8bb 8791 goto illegal_op;
e9bb4aa9
JR
8792 }
8793 gen_exception_return(s, tmp2);
9ee6e8bb 8794 } else {
e9bb4aa9
JR
8795 if (logic_cc) {
8796 gen_logic_CC(tmp2);
8797 }
7dcc1f89 8798 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8799 }
8800 break;
8801 case 0x0e:
f669df27 8802 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8803 if (logic_cc) {
8804 gen_logic_CC(tmp);
8805 }
7dcc1f89 8806 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8807 break;
8808 default:
8809 case 0x0f:
e9bb4aa9
JR
8810 tcg_gen_not_i32(tmp2, tmp2);
8811 if (logic_cc) {
8812 gen_logic_CC(tmp2);
8813 }
7dcc1f89 8814 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8815 break;
8816 }
e9bb4aa9 8817 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8818 tcg_temp_free_i32(tmp2);
e9bb4aa9 8819 }
9ee6e8bb
PB
8820 } else {
8821 /* other instructions */
8822 op1 = (insn >> 24) & 0xf;
8823 switch(op1) {
8824 case 0x0:
8825 case 0x1:
8826 /* multiplies, extra load/stores */
8827 sh = (insn >> 5) & 3;
8828 if (sh == 0) {
8829 if (op1 == 0x0) {
8830 rd = (insn >> 16) & 0xf;
8831 rn = (insn >> 12) & 0xf;
8832 rs = (insn >> 8) & 0xf;
8833 rm = (insn) & 0xf;
8834 op1 = (insn >> 20) & 0xf;
8835 switch (op1) {
8836 case 0: case 1: case 2: case 3: case 6:
8837 /* 32 bit mul */
5e3f878a
PB
8838 tmp = load_reg(s, rs);
8839 tmp2 = load_reg(s, rm);
8840 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8841 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8842 if (insn & (1 << 22)) {
8843 /* Subtract (mls) */
8844 ARCH(6T2);
5e3f878a
PB
8845 tmp2 = load_reg(s, rn);
8846 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8847 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8848 } else if (insn & (1 << 21)) {
8849 /* Add */
5e3f878a
PB
8850 tmp2 = load_reg(s, rn);
8851 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8852 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8853 }
8854 if (insn & (1 << 20))
5e3f878a
PB
8855 gen_logic_CC(tmp);
8856 store_reg(s, rd, tmp);
9ee6e8bb 8857 break;
8aac08b1
AJ
8858 case 4:
8859 /* 64 bit mul double accumulate (UMAAL) */
8860 ARCH(6);
8861 tmp = load_reg(s, rs);
8862 tmp2 = load_reg(s, rm);
8863 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8864 gen_addq_lo(s, tmp64, rn);
8865 gen_addq_lo(s, tmp64, rd);
8866 gen_storeq_reg(s, rn, rd, tmp64);
8867 tcg_temp_free_i64(tmp64);
8868 break;
8869 case 8: case 9: case 10: case 11:
8870 case 12: case 13: case 14: case 15:
8871 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8872 tmp = load_reg(s, rs);
8873 tmp2 = load_reg(s, rm);
8aac08b1 8874 if (insn & (1 << 22)) {
c9f10124 8875 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8876 } else {
c9f10124 8877 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8878 }
8879 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8880 TCGv_i32 al = load_reg(s, rn);
8881 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8882 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8883 tcg_temp_free_i32(al);
8884 tcg_temp_free_i32(ah);
9ee6e8bb 8885 }
8aac08b1 8886 if (insn & (1 << 20)) {
c9f10124 8887 gen_logicq_cc(tmp, tmp2);
8aac08b1 8888 }
c9f10124
RH
8889 store_reg(s, rn, tmp);
8890 store_reg(s, rd, tmp2);
9ee6e8bb 8891 break;
8aac08b1
AJ
8892 default:
8893 goto illegal_op;
9ee6e8bb
PB
8894 }
8895 } else {
8896 rn = (insn >> 16) & 0xf;
8897 rd = (insn >> 12) & 0xf;
8898 if (insn & (1 << 23)) {
8899 /* load/store exclusive */
2359bf80 8900 int op2 = (insn >> 8) & 3;
86753403 8901 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8902
8903 switch (op2) {
8904 case 0: /* lda/stl */
8905 if (op1 == 1) {
8906 goto illegal_op;
8907 }
8908 ARCH(8);
8909 break;
8910 case 1: /* reserved */
8911 goto illegal_op;
8912 case 2: /* ldaex/stlex */
8913 ARCH(8);
8914 break;
8915 case 3: /* ldrex/strex */
8916 if (op1) {
8917 ARCH(6K);
8918 } else {
8919 ARCH(6);
8920 }
8921 break;
8922 }
8923
3174f8e9 8924 addr = tcg_temp_local_new_i32();
98a46317 8925 load_reg_var(s, addr, rn);
2359bf80
MR
8926
8927 /* Since the emulation does not have barriers,
8928 the acquire/release semantics need no special
8929 handling */
8930 if (op2 == 0) {
8931 if (insn & (1 << 20)) {
8932 tmp = tcg_temp_new_i32();
8933 switch (op1) {
8934 case 0: /* lda */
9bb6558a
PM
8935 gen_aa32_ld32u_iss(s, tmp, addr,
8936 get_mem_index(s),
8937 rd | ISSIsAcqRel);
2359bf80
MR
8938 break;
8939 case 2: /* ldab */
9bb6558a
PM
8940 gen_aa32_ld8u_iss(s, tmp, addr,
8941 get_mem_index(s),
8942 rd | ISSIsAcqRel);
2359bf80
MR
8943 break;
8944 case 3: /* ldah */
9bb6558a
PM
8945 gen_aa32_ld16u_iss(s, tmp, addr,
8946 get_mem_index(s),
8947 rd | ISSIsAcqRel);
2359bf80
MR
8948 break;
8949 default:
8950 abort();
8951 }
8952 store_reg(s, rd, tmp);
8953 } else {
8954 rm = insn & 0xf;
8955 tmp = load_reg(s, rm);
8956 switch (op1) {
8957 case 0: /* stl */
9bb6558a
PM
8958 gen_aa32_st32_iss(s, tmp, addr,
8959 get_mem_index(s),
8960 rm | ISSIsAcqRel);
2359bf80
MR
8961 break;
8962 case 2: /* stlb */
9bb6558a
PM
8963 gen_aa32_st8_iss(s, tmp, addr,
8964 get_mem_index(s),
8965 rm | ISSIsAcqRel);
2359bf80
MR
8966 break;
8967 case 3: /* stlh */
9bb6558a
PM
8968 gen_aa32_st16_iss(s, tmp, addr,
8969 get_mem_index(s),
8970 rm | ISSIsAcqRel);
2359bf80
MR
8971 break;
8972 default:
8973 abort();
8974 }
8975 tcg_temp_free_i32(tmp);
8976 }
8977 } else if (insn & (1 << 20)) {
86753403
PB
8978 switch (op1) {
8979 case 0: /* ldrex */
426f5abc 8980 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8981 break;
8982 case 1: /* ldrexd */
426f5abc 8983 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8984 break;
8985 case 2: /* ldrexb */
426f5abc 8986 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8987 break;
8988 case 3: /* ldrexh */
426f5abc 8989 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8990 break;
8991 default:
8992 abort();
8993 }
9ee6e8bb
PB
8994 } else {
8995 rm = insn & 0xf;
86753403
PB
8996 switch (op1) {
8997 case 0: /* strex */
426f5abc 8998 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8999 break;
9000 case 1: /* strexd */
502e64fe 9001 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
9002 break;
9003 case 2: /* strexb */
426f5abc 9004 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
9005 break;
9006 case 3: /* strexh */
426f5abc 9007 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
9008 break;
9009 default:
9010 abort();
9011 }
9ee6e8bb 9012 }
39d5492a 9013 tcg_temp_free_i32(addr);
9ee6e8bb 9014 } else {
cf12bce0
EC
9015 TCGv taddr;
9016 TCGMemOp opc = s->be_data;
9017
9ee6e8bb
PB
9018 /* SWP instruction */
9019 rm = (insn) & 0xf;
9020
9ee6e8bb 9021 if (insn & (1 << 22)) {
cf12bce0 9022 opc |= MO_UB;
9ee6e8bb 9023 } else {
cf12bce0 9024 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 9025 }
cf12bce0
EC
9026
9027 addr = load_reg(s, rn);
9028 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 9029 tcg_temp_free_i32(addr);
cf12bce0
EC
9030
9031 tmp = load_reg(s, rm);
9032 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
9033 get_mem_index(s), opc);
9034 tcg_temp_free(taddr);
9035 store_reg(s, rd, tmp);
9ee6e8bb
PB
9036 }
9037 }
9038 } else {
9039 int address_offset;
3960c336 9040 bool load = insn & (1 << 20);
63f26fcf
PM
9041 bool wbit = insn & (1 << 21);
9042 bool pbit = insn & (1 << 24);
3960c336 9043 bool doubleword = false;
9bb6558a
PM
9044 ISSInfo issinfo;
9045
9ee6e8bb
PB
9046 /* Misc load/store */
9047 rn = (insn >> 16) & 0xf;
9048 rd = (insn >> 12) & 0xf;
3960c336 9049
9bb6558a
PM
9050 /* ISS not valid if writeback */
9051 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
9052
3960c336
PM
9053 if (!load && (sh & 2)) {
9054 /* doubleword */
9055 ARCH(5TE);
9056 if (rd & 1) {
9057 /* UNPREDICTABLE; we choose to UNDEF */
9058 goto illegal_op;
9059 }
9060 load = (sh & 1) == 0;
9061 doubleword = true;
9062 }
9063
b0109805 9064 addr = load_reg(s, rn);
63f26fcf 9065 if (pbit) {
b0109805 9066 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 9067 }
9ee6e8bb 9068 address_offset = 0;
3960c336
PM
9069
9070 if (doubleword) {
9071 if (!load) {
9ee6e8bb 9072 /* store */
b0109805 9073 tmp = load_reg(s, rd);
12dcc321 9074 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9075 tcg_temp_free_i32(tmp);
b0109805
PB
9076 tcg_gen_addi_i32(addr, addr, 4);
9077 tmp = load_reg(s, rd + 1);
12dcc321 9078 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9079 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9080 } else {
9081 /* load */
5a839c0d 9082 tmp = tcg_temp_new_i32();
12dcc321 9083 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9084 store_reg(s, rd, tmp);
9085 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 9086 tmp = tcg_temp_new_i32();
12dcc321 9087 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9088 rd++;
9ee6e8bb
PB
9089 }
9090 address_offset = -4;
3960c336
PM
9091 } else if (load) {
9092 /* load */
9093 tmp = tcg_temp_new_i32();
9094 switch (sh) {
9095 case 1:
9bb6558a
PM
9096 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9097 issinfo);
3960c336
PM
9098 break;
9099 case 2:
9bb6558a
PM
9100 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9101 issinfo);
3960c336
PM
9102 break;
9103 default:
9104 case 3:
9bb6558a
PM
9105 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9106 issinfo);
3960c336
PM
9107 break;
9108 }
9ee6e8bb
PB
9109 } else {
9110 /* store */
b0109805 9111 tmp = load_reg(s, rd);
9bb6558a 9112 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9113 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9114 }
9115 /* Perform base writeback before the loaded value to
9116 ensure correct behavior with overlapping index registers.
b6af0975 9117 ldrd with base writeback is undefined if the
9ee6e8bb 9118 destination and index registers overlap. */
63f26fcf 9119 if (!pbit) {
b0109805
PB
9120 gen_add_datah_offset(s, insn, address_offset, addr);
9121 store_reg(s, rn, addr);
63f26fcf 9122 } else if (wbit) {
9ee6e8bb 9123 if (address_offset)
b0109805
PB
9124 tcg_gen_addi_i32(addr, addr, address_offset);
9125 store_reg(s, rn, addr);
9126 } else {
7d1b0095 9127 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9128 }
9129 if (load) {
9130 /* Complete the load. */
b0109805 9131 store_reg(s, rd, tmp);
9ee6e8bb
PB
9132 }
9133 }
9134 break;
9135 case 0x4:
9136 case 0x5:
9137 goto do_ldst;
9138 case 0x6:
9139 case 0x7:
9140 if (insn & (1 << 4)) {
9141 ARCH(6);
9142 /* Armv6 Media instructions. */
9143 rm = insn & 0xf;
9144 rn = (insn >> 16) & 0xf;
2c0262af 9145 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9146 rs = (insn >> 8) & 0xf;
9147 switch ((insn >> 23) & 3) {
9148 case 0: /* Parallel add/subtract. */
9149 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9150 tmp = load_reg(s, rn);
9151 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9152 sh = (insn >> 5) & 7;
9153 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9154 goto illegal_op;
6ddbc6e4 9155 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9156 tcg_temp_free_i32(tmp2);
6ddbc6e4 9157 store_reg(s, rd, tmp);
9ee6e8bb
PB
9158 break;
9159 case 1:
9160 if ((insn & 0x00700020) == 0) {
6c95676b 9161 /* Halfword pack. */
3670669c
PB
9162 tmp = load_reg(s, rn);
9163 tmp2 = load_reg(s, rm);
9ee6e8bb 9164 shift = (insn >> 7) & 0x1f;
3670669c
PB
9165 if (insn & (1 << 6)) {
9166 /* pkhtb */
22478e79
AZ
9167 if (shift == 0)
9168 shift = 31;
9169 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9170 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9171 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9172 } else {
9173 /* pkhbt */
22478e79
AZ
9174 if (shift)
9175 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9176 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9177 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9178 }
9179 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9180 tcg_temp_free_i32(tmp2);
3670669c 9181 store_reg(s, rd, tmp);
9ee6e8bb
PB
9182 } else if ((insn & 0x00200020) == 0x00200000) {
9183 /* [us]sat */
6ddbc6e4 9184 tmp = load_reg(s, rm);
9ee6e8bb
PB
9185 shift = (insn >> 7) & 0x1f;
9186 if (insn & (1 << 6)) {
9187 if (shift == 0)
9188 shift = 31;
6ddbc6e4 9189 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9190 } else {
6ddbc6e4 9191 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9192 }
9193 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9194 tmp2 = tcg_const_i32(sh);
9195 if (insn & (1 << 22))
9ef39277 9196 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9197 else
9ef39277 9198 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9199 tcg_temp_free_i32(tmp2);
6ddbc6e4 9200 store_reg(s, rd, tmp);
9ee6e8bb
PB
9201 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9202 /* [us]sat16 */
6ddbc6e4 9203 tmp = load_reg(s, rm);
9ee6e8bb 9204 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9205 tmp2 = tcg_const_i32(sh);
9206 if (insn & (1 << 22))
9ef39277 9207 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9208 else
9ef39277 9209 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9210 tcg_temp_free_i32(tmp2);
6ddbc6e4 9211 store_reg(s, rd, tmp);
9ee6e8bb
PB
9212 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9213 /* Select bytes. */
6ddbc6e4
PB
9214 tmp = load_reg(s, rn);
9215 tmp2 = load_reg(s, rm);
7d1b0095 9216 tmp3 = tcg_temp_new_i32();
0ecb72a5 9217 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9218 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9219 tcg_temp_free_i32(tmp3);
9220 tcg_temp_free_i32(tmp2);
6ddbc6e4 9221 store_reg(s, rd, tmp);
9ee6e8bb 9222 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9223 tmp = load_reg(s, rm);
9ee6e8bb 9224 shift = (insn >> 10) & 3;
1301f322 9225 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9226 rotate, a shift is sufficient. */
9227 if (shift != 0)
f669df27 9228 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9229 op1 = (insn >> 20) & 7;
9230 switch (op1) {
5e3f878a
PB
9231 case 0: gen_sxtb16(tmp); break;
9232 case 2: gen_sxtb(tmp); break;
9233 case 3: gen_sxth(tmp); break;
9234 case 4: gen_uxtb16(tmp); break;
9235 case 6: gen_uxtb(tmp); break;
9236 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9237 default: goto illegal_op;
9238 }
9239 if (rn != 15) {
5e3f878a 9240 tmp2 = load_reg(s, rn);
9ee6e8bb 9241 if ((op1 & 3) == 0) {
5e3f878a 9242 gen_add16(tmp, tmp2);
9ee6e8bb 9243 } else {
5e3f878a 9244 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9245 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9246 }
9247 }
6c95676b 9248 store_reg(s, rd, tmp);
9ee6e8bb
PB
9249 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9250 /* rev */
b0109805 9251 tmp = load_reg(s, rm);
9ee6e8bb
PB
9252 if (insn & (1 << 22)) {
9253 if (insn & (1 << 7)) {
b0109805 9254 gen_revsh(tmp);
9ee6e8bb
PB
9255 } else {
9256 ARCH(6T2);
b0109805 9257 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9258 }
9259 } else {
9260 if (insn & (1 << 7))
b0109805 9261 gen_rev16(tmp);
9ee6e8bb 9262 else
66896cb8 9263 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9264 }
b0109805 9265 store_reg(s, rd, tmp);
9ee6e8bb
PB
9266 } else {
9267 goto illegal_op;
9268 }
9269 break;
9270 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9271 switch ((insn >> 20) & 0x7) {
9272 case 5:
9273 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9274 /* op2 not 00x or 11x : UNDEF */
9275 goto illegal_op;
9276 }
838fa72d
AJ
9277 /* Signed multiply most significant [accumulate].
9278 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9279 tmp = load_reg(s, rm);
9280 tmp2 = load_reg(s, rs);
a7812ae4 9281 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9282
955a7dd5 9283 if (rd != 15) {
838fa72d 9284 tmp = load_reg(s, rd);
9ee6e8bb 9285 if (insn & (1 << 6)) {
838fa72d 9286 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9287 } else {
838fa72d 9288 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9289 }
9290 }
838fa72d
AJ
9291 if (insn & (1 << 5)) {
9292 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9293 }
9294 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9295 tmp = tcg_temp_new_i32();
ecc7b3aa 9296 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9297 tcg_temp_free_i64(tmp64);
955a7dd5 9298 store_reg(s, rn, tmp);
41e9564d
PM
9299 break;
9300 case 0:
9301 case 4:
9302 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9303 if (insn & (1 << 7)) {
9304 goto illegal_op;
9305 }
9306 tmp = load_reg(s, rm);
9307 tmp2 = load_reg(s, rs);
9ee6e8bb 9308 if (insn & (1 << 5))
5e3f878a
PB
9309 gen_swap_half(tmp2);
9310 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9311 if (insn & (1 << 22)) {
5e3f878a 9312 /* smlald, smlsld */
33bbd75a
PC
9313 TCGv_i64 tmp64_2;
9314
a7812ae4 9315 tmp64 = tcg_temp_new_i64();
33bbd75a 9316 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9317 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9318 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9319 tcg_temp_free_i32(tmp);
33bbd75a
PC
9320 tcg_temp_free_i32(tmp2);
9321 if (insn & (1 << 6)) {
9322 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9323 } else {
9324 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9325 }
9326 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9327 gen_addq(s, tmp64, rd, rn);
9328 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9329 tcg_temp_free_i64(tmp64);
9ee6e8bb 9330 } else {
5e3f878a 9331 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9332 if (insn & (1 << 6)) {
9333 /* This subtraction cannot overflow. */
9334 tcg_gen_sub_i32(tmp, tmp, tmp2);
9335 } else {
9336 /* This addition cannot overflow 32 bits;
9337 * however it may overflow considered as a
9338 * signed operation, in which case we must set
9339 * the Q flag.
9340 */
9341 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9342 }
9343 tcg_temp_free_i32(tmp2);
22478e79 9344 if (rd != 15)
9ee6e8bb 9345 {
22478e79 9346 tmp2 = load_reg(s, rd);
9ef39277 9347 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9348 tcg_temp_free_i32(tmp2);
9ee6e8bb 9349 }
22478e79 9350 store_reg(s, rn, tmp);
9ee6e8bb 9351 }
41e9564d 9352 break;
b8b8ea05
PM
9353 case 1:
9354 case 3:
9355 /* SDIV, UDIV */
d614a513 9356 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9357 goto illegal_op;
9358 }
9359 if (((insn >> 5) & 7) || (rd != 15)) {
9360 goto illegal_op;
9361 }
9362 tmp = load_reg(s, rm);
9363 tmp2 = load_reg(s, rs);
9364 if (insn & (1 << 21)) {
9365 gen_helper_udiv(tmp, tmp, tmp2);
9366 } else {
9367 gen_helper_sdiv(tmp, tmp, tmp2);
9368 }
9369 tcg_temp_free_i32(tmp2);
9370 store_reg(s, rn, tmp);
9371 break;
41e9564d
PM
9372 default:
9373 goto illegal_op;
9ee6e8bb
PB
9374 }
9375 break;
9376 case 3:
9377 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9378 switch (op1) {
9379 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9380 ARCH(6);
9381 tmp = load_reg(s, rm);
9382 tmp2 = load_reg(s, rs);
9383 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9384 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9385 if (rd != 15) {
9386 tmp2 = load_reg(s, rd);
6ddbc6e4 9387 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9388 tcg_temp_free_i32(tmp2);
9ee6e8bb 9389 }
ded9d295 9390 store_reg(s, rn, tmp);
9ee6e8bb
PB
9391 break;
9392 case 0x20: case 0x24: case 0x28: case 0x2c:
9393 /* Bitfield insert/clear. */
9394 ARCH(6T2);
9395 shift = (insn >> 7) & 0x1f;
9396 i = (insn >> 16) & 0x1f;
45140a57
KB
9397 if (i < shift) {
9398 /* UNPREDICTABLE; we choose to UNDEF */
9399 goto illegal_op;
9400 }
9ee6e8bb
PB
9401 i = i + 1 - shift;
9402 if (rm == 15) {
7d1b0095 9403 tmp = tcg_temp_new_i32();
5e3f878a 9404 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9405 } else {
5e3f878a 9406 tmp = load_reg(s, rm);
9ee6e8bb
PB
9407 }
9408 if (i != 32) {
5e3f878a 9409 tmp2 = load_reg(s, rd);
d593c48e 9410 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9411 tcg_temp_free_i32(tmp2);
9ee6e8bb 9412 }
5e3f878a 9413 store_reg(s, rd, tmp);
9ee6e8bb
PB
9414 break;
9415 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9416 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9417 ARCH(6T2);
5e3f878a 9418 tmp = load_reg(s, rm);
9ee6e8bb
PB
9419 shift = (insn >> 7) & 0x1f;
9420 i = ((insn >> 16) & 0x1f) + 1;
9421 if (shift + i > 32)
9422 goto illegal_op;
9423 if (i < 32) {
9424 if (op1 & 0x20) {
59a71b4c 9425 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9426 } else {
59a71b4c 9427 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9428 }
9429 }
5e3f878a 9430 store_reg(s, rd, tmp);
9ee6e8bb
PB
9431 break;
9432 default:
9433 goto illegal_op;
9434 }
9435 break;
9436 }
9437 break;
9438 }
9439 do_ldst:
9440 /* Check for undefined extension instructions
9441 * per the ARM Bible IE:
9442 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9443 */
9444 sh = (0xf << 20) | (0xf << 4);
9445 if (op1 == 0x7 && ((insn & sh) == sh))
9446 {
9447 goto illegal_op;
9448 }
9449 /* load/store byte/word */
9450 rn = (insn >> 16) & 0xf;
9451 rd = (insn >> 12) & 0xf;
b0109805 9452 tmp2 = load_reg(s, rn);
a99caa48
PM
9453 if ((insn & 0x01200000) == 0x00200000) {
9454 /* ldrt/strt */
579d21cc 9455 i = get_a32_user_mem_index(s);
a99caa48
PM
9456 } else {
9457 i = get_mem_index(s);
9458 }
9ee6e8bb 9459 if (insn & (1 << 24))
b0109805 9460 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9461 if (insn & (1 << 20)) {
9462 /* load */
5a839c0d 9463 tmp = tcg_temp_new_i32();
9ee6e8bb 9464 if (insn & (1 << 22)) {
9bb6558a 9465 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9466 } else {
9bb6558a 9467 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9468 }
9ee6e8bb
PB
9469 } else {
9470 /* store */
b0109805 9471 tmp = load_reg(s, rd);
5a839c0d 9472 if (insn & (1 << 22)) {
9bb6558a 9473 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9474 } else {
9bb6558a 9475 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9476 }
9477 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9478 }
9479 if (!(insn & (1 << 24))) {
b0109805
PB
9480 gen_add_data_offset(s, insn, tmp2);
9481 store_reg(s, rn, tmp2);
9482 } else if (insn & (1 << 21)) {
9483 store_reg(s, rn, tmp2);
9484 } else {
7d1b0095 9485 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9486 }
9487 if (insn & (1 << 20)) {
9488 /* Complete the load. */
7dcc1f89 9489 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9490 }
9491 break;
9492 case 0x08:
9493 case 0x09:
9494 {
da3e53dd
PM
9495 int j, n, loaded_base;
9496 bool exc_return = false;
9497 bool is_load = extract32(insn, 20, 1);
9498 bool user = false;
39d5492a 9499 TCGv_i32 loaded_var;
9ee6e8bb
PB
9500 /* load/store multiple words */
9501 /* XXX: store correct base if write back */
9ee6e8bb 9502 if (insn & (1 << 22)) {
da3e53dd 9503 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9504 if (IS_USER(s))
9505 goto illegal_op; /* only usable in supervisor mode */
9506
da3e53dd
PM
9507 if (is_load && extract32(insn, 15, 1)) {
9508 exc_return = true;
9509 } else {
9510 user = true;
9511 }
9ee6e8bb
PB
9512 }
9513 rn = (insn >> 16) & 0xf;
b0109805 9514 addr = load_reg(s, rn);
9ee6e8bb
PB
9515
9516 /* compute total size */
9517 loaded_base = 0;
f764718d 9518 loaded_var = NULL;
9ee6e8bb
PB
9519 n = 0;
9520 for(i=0;i<16;i++) {
9521 if (insn & (1 << i))
9522 n++;
9523 }
9524 /* XXX: test invalid n == 0 case ? */
9525 if (insn & (1 << 23)) {
9526 if (insn & (1 << 24)) {
9527 /* pre increment */
b0109805 9528 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9529 } else {
9530 /* post increment */
9531 }
9532 } else {
9533 if (insn & (1 << 24)) {
9534 /* pre decrement */
b0109805 9535 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9536 } else {
9537 /* post decrement */
9538 if (n != 1)
b0109805 9539 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9540 }
9541 }
9542 j = 0;
9543 for(i=0;i<16;i++) {
9544 if (insn & (1 << i)) {
da3e53dd 9545 if (is_load) {
9ee6e8bb 9546 /* load */
5a839c0d 9547 tmp = tcg_temp_new_i32();
12dcc321 9548 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9549 if (user) {
b75263d6 9550 tmp2 = tcg_const_i32(i);
1ce94f81 9551 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9552 tcg_temp_free_i32(tmp2);
7d1b0095 9553 tcg_temp_free_i32(tmp);
9ee6e8bb 9554 } else if (i == rn) {
b0109805 9555 loaded_var = tmp;
9ee6e8bb 9556 loaded_base = 1;
fb0e8e79
PM
9557 } else if (rn == 15 && exc_return) {
9558 store_pc_exc_ret(s, tmp);
9ee6e8bb 9559 } else {
7dcc1f89 9560 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9561 }
9562 } else {
9563 /* store */
9564 if (i == 15) {
9565 /* special case: r15 = PC + 8 */
9566 val = (long)s->pc + 4;
7d1b0095 9567 tmp = tcg_temp_new_i32();
b0109805 9568 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9569 } else if (user) {
7d1b0095 9570 tmp = tcg_temp_new_i32();
b75263d6 9571 tmp2 = tcg_const_i32(i);
9ef39277 9572 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9573 tcg_temp_free_i32(tmp2);
9ee6e8bb 9574 } else {
b0109805 9575 tmp = load_reg(s, i);
9ee6e8bb 9576 }
12dcc321 9577 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9578 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9579 }
9580 j++;
9581 /* no need to add after the last transfer */
9582 if (j != n)
b0109805 9583 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9584 }
9585 }
9586 if (insn & (1 << 21)) {
9587 /* write back */
9588 if (insn & (1 << 23)) {
9589 if (insn & (1 << 24)) {
9590 /* pre increment */
9591 } else {
9592 /* post increment */
b0109805 9593 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9594 }
9595 } else {
9596 if (insn & (1 << 24)) {
9597 /* pre decrement */
9598 if (n != 1)
b0109805 9599 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9600 } else {
9601 /* post decrement */
b0109805 9602 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9603 }
9604 }
b0109805
PB
9605 store_reg(s, rn, addr);
9606 } else {
7d1b0095 9607 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9608 }
9609 if (loaded_base) {
b0109805 9610 store_reg(s, rn, loaded_var);
9ee6e8bb 9611 }
da3e53dd 9612 if (exc_return) {
9ee6e8bb 9613 /* Restore CPSR from SPSR. */
d9ba4830 9614 tmp = load_cpu_field(spsr);
235ea1f5 9615 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9616 tcg_temp_free_i32(tmp);
b29fd33d 9617 /* Must exit loop to check un-masked IRQs */
dcba3a8d 9618 s->base.is_jmp = DISAS_EXIT;
9ee6e8bb
PB
9619 }
9620 }
9621 break;
9622 case 0xa:
9623 case 0xb:
9624 {
9625 int32_t offset;
9626
9627 /* branch (and link) */
9628 val = (int32_t)s->pc;
9629 if (insn & (1 << 24)) {
7d1b0095 9630 tmp = tcg_temp_new_i32();
5e3f878a
PB
9631 tcg_gen_movi_i32(tmp, val);
9632 store_reg(s, 14, tmp);
9ee6e8bb 9633 }
534df156
PM
9634 offset = sextract32(insn << 2, 0, 26);
9635 val += offset + 4;
9ee6e8bb
PB
9636 gen_jmp(s, val);
9637 }
9638 break;
9639 case 0xc:
9640 case 0xd:
9641 case 0xe:
6a57f3eb
WN
9642 if (((insn >> 8) & 0xe) == 10) {
9643 /* VFP. */
7dcc1f89 9644 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9645 goto illegal_op;
9646 }
7dcc1f89 9647 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9648 /* Coprocessor. */
9ee6e8bb 9649 goto illegal_op;
6a57f3eb 9650 }
9ee6e8bb
PB
9651 break;
9652 case 0xf:
9653 /* swi */
eaed129d 9654 gen_set_pc_im(s, s->pc);
d4a2dc67 9655 s->svc_imm = extract32(insn, 0, 24);
dcba3a8d 9656 s->base.is_jmp = DISAS_SWI;
9ee6e8bb
PB
9657 break;
9658 default:
9659 illegal_op:
73710361
GB
9660 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9661 default_exception_el(s));
9ee6e8bb
PB
9662 break;
9663 }
9664 }
9665}
9666
296e5a0a
PM
9667static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
9668{
9669 /* Return true if this is a 16 bit instruction. We must be precise
9670 * about this (matching the decode). We assume that s->pc still
9671 * points to the first 16 bits of the insn.
9672 */
9673 if ((insn >> 11) < 0x1d) {
9674 /* Definitely a 16-bit instruction */
9675 return true;
9676 }
9677
9678 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
9679 * first half of a 32-bit Thumb insn. Thumb-1 cores might
9680 * end up actually treating this as two 16-bit insns, though,
9681 * if it's half of a bl/blx pair that might span a page boundary.
9682 */
9683 if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
9684 /* Thumb2 cores (including all M profile ones) always treat
9685 * 32-bit insns as 32-bit.
9686 */
9687 return false;
9688 }
9689
9690 if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
9691 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
9692 * is not on the next page; we merge this into a 32-bit
9693 * insn.
9694 */
9695 return false;
9696 }
9697 /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
9698 * 0b1111_1xxx_xxxx_xxxx : BL suffix;
9699 * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
9700 * -- handle as single 16 bit insn
9701 */
9702 return true;
9703}
9704
9ee6e8bb
PB
9705/* Return true if this is a Thumb-2 logical op. */
9706static int
9707thumb2_logic_op(int op)
9708{
9709 return (op < 8);
9710}
9711
9712/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9713 then set condition code flags based on the result of the operation.
9714 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9715 to the high bit of T1.
9716 Returns zero if the opcode is valid. */
9717
9718static int
39d5492a
PM
9719gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9720 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9721{
9722 int logic_cc;
9723
9724 logic_cc = 0;
9725 switch (op) {
9726 case 0: /* and */
396e467c 9727 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9728 logic_cc = conds;
9729 break;
9730 case 1: /* bic */
f669df27 9731 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9732 logic_cc = conds;
9733 break;
9734 case 2: /* orr */
396e467c 9735 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9736 logic_cc = conds;
9737 break;
9738 case 3: /* orn */
29501f1b 9739 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9740 logic_cc = conds;
9741 break;
9742 case 4: /* eor */
396e467c 9743 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9744 logic_cc = conds;
9745 break;
9746 case 8: /* add */
9747 if (conds)
72485ec4 9748 gen_add_CC(t0, t0, t1);
9ee6e8bb 9749 else
396e467c 9750 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9751 break;
9752 case 10: /* adc */
9753 if (conds)
49b4c31e 9754 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9755 else
396e467c 9756 gen_adc(t0, t1);
9ee6e8bb
PB
9757 break;
9758 case 11: /* sbc */
2de68a49
RH
9759 if (conds) {
9760 gen_sbc_CC(t0, t0, t1);
9761 } else {
396e467c 9762 gen_sub_carry(t0, t0, t1);
2de68a49 9763 }
9ee6e8bb
PB
9764 break;
9765 case 13: /* sub */
9766 if (conds)
72485ec4 9767 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9768 else
396e467c 9769 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9770 break;
9771 case 14: /* rsb */
9772 if (conds)
72485ec4 9773 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9774 else
396e467c 9775 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9776 break;
9777 default: /* 5, 6, 7, 9, 12, 15. */
9778 return 1;
9779 }
9780 if (logic_cc) {
396e467c 9781 gen_logic_CC(t0);
9ee6e8bb 9782 if (shifter_out)
396e467c 9783 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9784 }
9785 return 0;
9786}
9787
2eea841c
PM
9788/* Translate a 32-bit thumb instruction. */
9789static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 9790{
296e5a0a 9791 uint32_t imm, shift, offset;
9ee6e8bb 9792 uint32_t rd, rn, rm, rs;
39d5492a
PM
9793 TCGv_i32 tmp;
9794 TCGv_i32 tmp2;
9795 TCGv_i32 tmp3;
9796 TCGv_i32 addr;
a7812ae4 9797 TCGv_i64 tmp64;
9ee6e8bb
PB
9798 int op;
9799 int shiftop;
9800 int conds;
9801 int logic_cc;
9802
296e5a0a
PM
9803 /* The only 32 bit insn that's allowed for Thumb1 is the combined
9804 * BL/BLX prefix and suffix.
9805 */
9ee6e8bb
PB
9806 if ((insn & 0xf800e800) != 0xf000e800) {
9807 ARCH(6T2);
9808 }
9809
9810 rn = (insn >> 16) & 0xf;
9811 rs = (insn >> 12) & 0xf;
9812 rd = (insn >> 8) & 0xf;
9813 rm = insn & 0xf;
9814 switch ((insn >> 25) & 0xf) {
9815 case 0: case 1: case 2: case 3:
9816 /* 16-bit instructions. Should never happen. */
9817 abort();
9818 case 4:
9819 if (insn & (1 << 22)) {
ebfe27c5
PM
9820 /* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
9821 * - load/store doubleword, load/store exclusive, ldacq/strel,
5158de24 9822 * table branch, TT.
ebfe27c5 9823 */
76eff04d
PM
9824 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
9825 arm_dc_feature(s, ARM_FEATURE_V8)) {
9826 /* 0b1110_1001_0111_1111_1110_1001_0111_111
9827 * - SG (v8M only)
9828 * The bulk of the behaviour for this instruction is implemented
9829 * in v7m_handle_execute_nsc(), which deals with the insn when
9830 * it is executed by a CPU in non-secure state from memory
9831 * which is Secure & NonSecure-Callable.
9832 * Here we only need to handle the remaining cases:
9833 * * in NS memory (including the "security extension not
9834 * implemented" case) : NOP
9835 * * in S memory but CPU already secure (clear IT bits)
9836 * We know that the attribute for the memory this insn is
9837 * in must match the current CPU state, because otherwise
9838 * get_phys_addr_pmsav8 would have generated an exception.
9839 */
9840 if (s->v8m_secure) {
9841 /* Like the IT insn, we don't need to generate any code */
9842 s->condexec_cond = 0;
9843 s->condexec_mask = 0;
9844 }
9845 } else if (insn & 0x01200000) {
ebfe27c5
PM
9846 /* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9847 * - load/store dual (post-indexed)
9848 * 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
9849 * - load/store dual (literal and immediate)
9850 * 0b1111_1001_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
9851 * - load/store dual (pre-indexed)
9852 */
9ee6e8bb 9853 if (rn == 15) {
ebfe27c5
PM
9854 if (insn & (1 << 21)) {
9855 /* UNPREDICTABLE */
9856 goto illegal_op;
9857 }
7d1b0095 9858 addr = tcg_temp_new_i32();
b0109805 9859 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9860 } else {
b0109805 9861 addr = load_reg(s, rn);
9ee6e8bb
PB
9862 }
9863 offset = (insn & 0xff) * 4;
9864 if ((insn & (1 << 23)) == 0)
9865 offset = -offset;
9866 if (insn & (1 << 24)) {
b0109805 9867 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9868 offset = 0;
9869 }
9870 if (insn & (1 << 20)) {
9871 /* ldrd */
e2592fad 9872 tmp = tcg_temp_new_i32();
12dcc321 9873 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9874 store_reg(s, rs, tmp);
9875 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9876 tmp = tcg_temp_new_i32();
12dcc321 9877 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9878 store_reg(s, rd, tmp);
9ee6e8bb
PB
9879 } else {
9880 /* strd */
b0109805 9881 tmp = load_reg(s, rs);
12dcc321 9882 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9883 tcg_temp_free_i32(tmp);
b0109805
PB
9884 tcg_gen_addi_i32(addr, addr, 4);
9885 tmp = load_reg(s, rd);
12dcc321 9886 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9887 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9888 }
9889 if (insn & (1 << 21)) {
9890 /* Base writeback. */
b0109805
PB
9891 tcg_gen_addi_i32(addr, addr, offset - 4);
9892 store_reg(s, rn, addr);
9893 } else {
7d1b0095 9894 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9895 }
9896 } else if ((insn & (1 << 23)) == 0) {
ebfe27c5
PM
9897 /* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
9898 * - load/store exclusive word
5158de24 9899 * - TT (v8M only)
ebfe27c5
PM
9900 */
9901 if (rs == 15) {
5158de24
PM
9902 if (!(insn & (1 << 20)) &&
9903 arm_dc_feature(s, ARM_FEATURE_M) &&
9904 arm_dc_feature(s, ARM_FEATURE_V8)) {
9905 /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
9906 * - TT (v8M only)
9907 */
9908 bool alt = insn & (1 << 7);
9909 TCGv_i32 addr, op, ttresp;
9910
9911 if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
9912 /* we UNDEF for these UNPREDICTABLE cases */
9913 goto illegal_op;
9914 }
9915
9916 if (alt && !s->v8m_secure) {
9917 goto illegal_op;
9918 }
9919
9920 addr = load_reg(s, rn);
9921 op = tcg_const_i32(extract32(insn, 6, 2));
9922 ttresp = tcg_temp_new_i32();
9923 gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
9924 tcg_temp_free_i32(addr);
9925 tcg_temp_free_i32(op);
9926 store_reg(s, rd, ttresp);
9927 }
ebfe27c5
PM
9928 goto illegal_op;
9929 }
39d5492a 9930 addr = tcg_temp_local_new_i32();
98a46317 9931 load_reg_var(s, addr, rn);
426f5abc 9932 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9933 if (insn & (1 << 20)) {
426f5abc 9934 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9935 } else {
426f5abc 9936 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9937 }
39d5492a 9938 tcg_temp_free_i32(addr);
2359bf80 9939 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9940 /* Table Branch. */
9941 if (rn == 15) {
7d1b0095 9942 addr = tcg_temp_new_i32();
b0109805 9943 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9944 } else {
b0109805 9945 addr = load_reg(s, rn);
9ee6e8bb 9946 }
b26eefb6 9947 tmp = load_reg(s, rm);
b0109805 9948 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9949 if (insn & (1 << 4)) {
9950 /* tbh */
b0109805 9951 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9952 tcg_temp_free_i32(tmp);
e2592fad 9953 tmp = tcg_temp_new_i32();
12dcc321 9954 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9955 } else { /* tbb */
7d1b0095 9956 tcg_temp_free_i32(tmp);
e2592fad 9957 tmp = tcg_temp_new_i32();
12dcc321 9958 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9959 }
7d1b0095 9960 tcg_temp_free_i32(addr);
b0109805
PB
9961 tcg_gen_shli_i32(tmp, tmp, 1);
9962 tcg_gen_addi_i32(tmp, tmp, s->pc);
9963 store_reg(s, 15, tmp);
9ee6e8bb 9964 } else {
2359bf80 9965 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9966 op = (insn >> 4) & 0x3;
2359bf80
MR
9967 switch (op2) {
9968 case 0:
426f5abc 9969 goto illegal_op;
2359bf80
MR
9970 case 1:
9971 /* Load/store exclusive byte/halfword/doubleword */
9972 if (op == 2) {
9973 goto illegal_op;
9974 }
9975 ARCH(7);
9976 break;
9977 case 2:
9978 /* Load-acquire/store-release */
9979 if (op == 3) {
9980 goto illegal_op;
9981 }
9982 /* Fall through */
9983 case 3:
9984 /* Load-acquire/store-release exclusive */
9985 ARCH(8);
9986 break;
426f5abc 9987 }
39d5492a 9988 addr = tcg_temp_local_new_i32();
98a46317 9989 load_reg_var(s, addr, rn);
2359bf80
MR
9990 if (!(op2 & 1)) {
9991 if (insn & (1 << 20)) {
9992 tmp = tcg_temp_new_i32();
9993 switch (op) {
9994 case 0: /* ldab */
9bb6558a
PM
9995 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9996 rs | ISSIsAcqRel);
2359bf80
MR
9997 break;
9998 case 1: /* ldah */
9bb6558a
PM
9999 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
10000 rs | ISSIsAcqRel);
2359bf80
MR
10001 break;
10002 case 2: /* lda */
9bb6558a
PM
10003 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
10004 rs | ISSIsAcqRel);
2359bf80
MR
10005 break;
10006 default:
10007 abort();
10008 }
10009 store_reg(s, rs, tmp);
10010 } else {
10011 tmp = load_reg(s, rs);
10012 switch (op) {
10013 case 0: /* stlb */
9bb6558a
PM
10014 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
10015 rs | ISSIsAcqRel);
2359bf80
MR
10016 break;
10017 case 1: /* stlh */
9bb6558a
PM
10018 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
10019 rs | ISSIsAcqRel);
2359bf80
MR
10020 break;
10021 case 2: /* stl */
9bb6558a
PM
10022 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
10023 rs | ISSIsAcqRel);
2359bf80
MR
10024 break;
10025 default:
10026 abort();
10027 }
10028 tcg_temp_free_i32(tmp);
10029 }
10030 } else if (insn & (1 << 20)) {
426f5abc 10031 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 10032 } else {
426f5abc 10033 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 10034 }
39d5492a 10035 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10036 }
10037 } else {
10038 /* Load/store multiple, RFE, SRS. */
10039 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 10040 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 10041 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10042 goto illegal_op;
00115976 10043 }
9ee6e8bb
PB
10044 if (insn & (1 << 20)) {
10045 /* rfe */
b0109805
PB
10046 addr = load_reg(s, rn);
10047 if ((insn & (1 << 24)) == 0)
10048 tcg_gen_addi_i32(addr, addr, -8);
10049 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 10050 tmp = tcg_temp_new_i32();
12dcc321 10051 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 10052 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 10053 tmp2 = tcg_temp_new_i32();
12dcc321 10054 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
10055 if (insn & (1 << 21)) {
10056 /* Base writeback. */
b0109805
PB
10057 if (insn & (1 << 24)) {
10058 tcg_gen_addi_i32(addr, addr, 4);
10059 } else {
10060 tcg_gen_addi_i32(addr, addr, -4);
10061 }
10062 store_reg(s, rn, addr);
10063 } else {
7d1b0095 10064 tcg_temp_free_i32(addr);
9ee6e8bb 10065 }
b0109805 10066 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
10067 } else {
10068 /* srs */
81465888
PM
10069 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
10070 insn & (1 << 21));
9ee6e8bb
PB
10071 }
10072 } else {
5856d44e 10073 int i, loaded_base = 0;
39d5492a 10074 TCGv_i32 loaded_var;
9ee6e8bb 10075 /* Load/store multiple. */
b0109805 10076 addr = load_reg(s, rn);
9ee6e8bb
PB
10077 offset = 0;
10078 for (i = 0; i < 16; i++) {
10079 if (insn & (1 << i))
10080 offset += 4;
10081 }
10082 if (insn & (1 << 24)) {
b0109805 10083 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10084 }
10085
f764718d 10086 loaded_var = NULL;
9ee6e8bb
PB
10087 for (i = 0; i < 16; i++) {
10088 if ((insn & (1 << i)) == 0)
10089 continue;
10090 if (insn & (1 << 20)) {
10091 /* Load. */
e2592fad 10092 tmp = tcg_temp_new_i32();
12dcc321 10093 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 10094 if (i == 15) {
3bb8a96f 10095 gen_bx_excret(s, tmp);
5856d44e
YO
10096 } else if (i == rn) {
10097 loaded_var = tmp;
10098 loaded_base = 1;
9ee6e8bb 10099 } else {
b0109805 10100 store_reg(s, i, tmp);
9ee6e8bb
PB
10101 }
10102 } else {
10103 /* Store. */
b0109805 10104 tmp = load_reg(s, i);
12dcc321 10105 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 10106 tcg_temp_free_i32(tmp);
9ee6e8bb 10107 }
b0109805 10108 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 10109 }
5856d44e
YO
10110 if (loaded_base) {
10111 store_reg(s, rn, loaded_var);
10112 }
9ee6e8bb
PB
10113 if (insn & (1 << 21)) {
10114 /* Base register writeback. */
10115 if (insn & (1 << 24)) {
b0109805 10116 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
10117 }
10118 /* Fault if writeback register is in register list. */
10119 if (insn & (1 << rn))
10120 goto illegal_op;
b0109805
PB
10121 store_reg(s, rn, addr);
10122 } else {
7d1b0095 10123 tcg_temp_free_i32(addr);
9ee6e8bb
PB
10124 }
10125 }
10126 }
10127 break;
2af9ab77
JB
10128 case 5:
10129
9ee6e8bb 10130 op = (insn >> 21) & 0xf;
2af9ab77 10131 if (op == 6) {
62b44f05
AR
10132 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10133 goto illegal_op;
10134 }
2af9ab77
JB
10135 /* Halfword pack. */
10136 tmp = load_reg(s, rn);
10137 tmp2 = load_reg(s, rm);
10138 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
10139 if (insn & (1 << 5)) {
10140 /* pkhtb */
10141 if (shift == 0)
10142 shift = 31;
10143 tcg_gen_sari_i32(tmp2, tmp2, shift);
10144 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
10145 tcg_gen_ext16u_i32(tmp2, tmp2);
10146 } else {
10147 /* pkhbt */
10148 if (shift)
10149 tcg_gen_shli_i32(tmp2, tmp2, shift);
10150 tcg_gen_ext16u_i32(tmp, tmp);
10151 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
10152 }
10153 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10154 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10155 store_reg(s, rd, tmp);
10156 } else {
2af9ab77
JB
10157 /* Data processing register constant shift. */
10158 if (rn == 15) {
7d1b0095 10159 tmp = tcg_temp_new_i32();
2af9ab77
JB
10160 tcg_gen_movi_i32(tmp, 0);
10161 } else {
10162 tmp = load_reg(s, rn);
10163 }
10164 tmp2 = load_reg(s, rm);
10165
10166 shiftop = (insn >> 4) & 3;
10167 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10168 conds = (insn & (1 << 20)) != 0;
10169 logic_cc = (conds && thumb2_logic_op(op));
10170 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10171 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10172 goto illegal_op;
7d1b0095 10173 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10174 if (rd != 15) {
10175 store_reg(s, rd, tmp);
10176 } else {
7d1b0095 10177 tcg_temp_free_i32(tmp);
2af9ab77 10178 }
3174f8e9 10179 }
9ee6e8bb
PB
10180 break;
10181 case 13: /* Misc data processing. */
10182 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10183 if (op < 4 && (insn & 0xf000) != 0xf000)
10184 goto illegal_op;
10185 switch (op) {
10186 case 0: /* Register controlled shift. */
8984bd2e
PB
10187 tmp = load_reg(s, rn);
10188 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10189 if ((insn & 0x70) != 0)
10190 goto illegal_op;
10191 op = (insn >> 21) & 3;
8984bd2e
PB
10192 logic_cc = (insn & (1 << 20)) != 0;
10193 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10194 if (logic_cc)
10195 gen_logic_CC(tmp);
bedb8a6b 10196 store_reg(s, rd, tmp);
9ee6e8bb
PB
10197 break;
10198 case 1: /* Sign/zero extend. */
62b44f05
AR
10199 op = (insn >> 20) & 7;
10200 switch (op) {
10201 case 0: /* SXTAH, SXTH */
10202 case 1: /* UXTAH, UXTH */
10203 case 4: /* SXTAB, SXTB */
10204 case 5: /* UXTAB, UXTB */
10205 break;
10206 case 2: /* SXTAB16, SXTB16 */
10207 case 3: /* UXTAB16, UXTB16 */
10208 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10209 goto illegal_op;
10210 }
10211 break;
10212 default:
10213 goto illegal_op;
10214 }
10215 if (rn != 15) {
10216 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10217 goto illegal_op;
10218 }
10219 }
5e3f878a 10220 tmp = load_reg(s, rm);
9ee6e8bb 10221 shift = (insn >> 4) & 3;
1301f322 10222 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10223 rotate, a shift is sufficient. */
10224 if (shift != 0)
f669df27 10225 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10226 op = (insn >> 20) & 7;
10227 switch (op) {
5e3f878a
PB
10228 case 0: gen_sxth(tmp); break;
10229 case 1: gen_uxth(tmp); break;
10230 case 2: gen_sxtb16(tmp); break;
10231 case 3: gen_uxtb16(tmp); break;
10232 case 4: gen_sxtb(tmp); break;
10233 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10234 default:
10235 g_assert_not_reached();
9ee6e8bb
PB
10236 }
10237 if (rn != 15) {
5e3f878a 10238 tmp2 = load_reg(s, rn);
9ee6e8bb 10239 if ((op >> 1) == 1) {
5e3f878a 10240 gen_add16(tmp, tmp2);
9ee6e8bb 10241 } else {
5e3f878a 10242 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10243 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10244 }
10245 }
5e3f878a 10246 store_reg(s, rd, tmp);
9ee6e8bb
PB
10247 break;
10248 case 2: /* SIMD add/subtract. */
62b44f05
AR
10249 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10250 goto illegal_op;
10251 }
9ee6e8bb
PB
10252 op = (insn >> 20) & 7;
10253 shift = (insn >> 4) & 7;
10254 if ((op & 3) == 3 || (shift & 3) == 3)
10255 goto illegal_op;
6ddbc6e4
PB
10256 tmp = load_reg(s, rn);
10257 tmp2 = load_reg(s, rm);
10258 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10259 tcg_temp_free_i32(tmp2);
6ddbc6e4 10260 store_reg(s, rd, tmp);
9ee6e8bb
PB
10261 break;
10262 case 3: /* Other data processing. */
10263 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10264 if (op < 4) {
10265 /* Saturating add/subtract. */
62b44f05
AR
10266 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10267 goto illegal_op;
10268 }
d9ba4830
PB
10269 tmp = load_reg(s, rn);
10270 tmp2 = load_reg(s, rm);
9ee6e8bb 10271 if (op & 1)
9ef39277 10272 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10273 if (op & 2)
9ef39277 10274 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10275 else
9ef39277 10276 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10277 tcg_temp_free_i32(tmp2);
9ee6e8bb 10278 } else {
62b44f05
AR
10279 switch (op) {
10280 case 0x0a: /* rbit */
10281 case 0x08: /* rev */
10282 case 0x09: /* rev16 */
10283 case 0x0b: /* revsh */
10284 case 0x18: /* clz */
10285 break;
10286 case 0x10: /* sel */
10287 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10288 goto illegal_op;
10289 }
10290 break;
10291 case 0x20: /* crc32/crc32c */
10292 case 0x21:
10293 case 0x22:
10294 case 0x28:
10295 case 0x29:
10296 case 0x2a:
10297 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10298 goto illegal_op;
10299 }
10300 break;
10301 default:
10302 goto illegal_op;
10303 }
d9ba4830 10304 tmp = load_reg(s, rn);
9ee6e8bb
PB
10305 switch (op) {
10306 case 0x0a: /* rbit */
d9ba4830 10307 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10308 break;
10309 case 0x08: /* rev */
66896cb8 10310 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10311 break;
10312 case 0x09: /* rev16 */
d9ba4830 10313 gen_rev16(tmp);
9ee6e8bb
PB
10314 break;
10315 case 0x0b: /* revsh */
d9ba4830 10316 gen_revsh(tmp);
9ee6e8bb
PB
10317 break;
10318 case 0x10: /* sel */
d9ba4830 10319 tmp2 = load_reg(s, rm);
7d1b0095 10320 tmp3 = tcg_temp_new_i32();
0ecb72a5 10321 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10322 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10323 tcg_temp_free_i32(tmp3);
10324 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10325 break;
10326 case 0x18: /* clz */
7539a012 10327 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10328 break;
eb0ecd5a
WN
10329 case 0x20:
10330 case 0x21:
10331 case 0x22:
10332 case 0x28:
10333 case 0x29:
10334 case 0x2a:
10335 {
10336 /* crc32/crc32c */
10337 uint32_t sz = op & 0x3;
10338 uint32_t c = op & 0x8;
10339
eb0ecd5a 10340 tmp2 = load_reg(s, rm);
aa633469
PM
10341 if (sz == 0) {
10342 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10343 } else if (sz == 1) {
10344 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10345 }
eb0ecd5a
WN
10346 tmp3 = tcg_const_i32(1 << sz);
10347 if (c) {
10348 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10349 } else {
10350 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10351 }
10352 tcg_temp_free_i32(tmp2);
10353 tcg_temp_free_i32(tmp3);
10354 break;
10355 }
9ee6e8bb 10356 default:
62b44f05 10357 g_assert_not_reached();
9ee6e8bb
PB
10358 }
10359 }
d9ba4830 10360 store_reg(s, rd, tmp);
9ee6e8bb
PB
10361 break;
10362 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10363 switch ((insn >> 20) & 7) {
10364 case 0: /* 32 x 32 -> 32 */
10365 case 7: /* Unsigned sum of absolute differences. */
10366 break;
10367 case 1: /* 16 x 16 -> 32 */
10368 case 2: /* Dual multiply add. */
10369 case 3: /* 32 * 16 -> 32msb */
10370 case 4: /* Dual multiply subtract. */
10371 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10372 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10373 goto illegal_op;
10374 }
10375 break;
10376 }
9ee6e8bb 10377 op = (insn >> 4) & 0xf;
d9ba4830
PB
10378 tmp = load_reg(s, rn);
10379 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10380 switch ((insn >> 20) & 7) {
10381 case 0: /* 32 x 32 -> 32 */
d9ba4830 10382 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10383 tcg_temp_free_i32(tmp2);
9ee6e8bb 10384 if (rs != 15) {
d9ba4830 10385 tmp2 = load_reg(s, rs);
9ee6e8bb 10386 if (op)
d9ba4830 10387 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10388 else
d9ba4830 10389 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10390 tcg_temp_free_i32(tmp2);
9ee6e8bb 10391 }
9ee6e8bb
PB
10392 break;
10393 case 1: /* 16 x 16 -> 32 */
d9ba4830 10394 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10395 tcg_temp_free_i32(tmp2);
9ee6e8bb 10396 if (rs != 15) {
d9ba4830 10397 tmp2 = load_reg(s, rs);
9ef39277 10398 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10399 tcg_temp_free_i32(tmp2);
9ee6e8bb 10400 }
9ee6e8bb
PB
10401 break;
10402 case 2: /* Dual multiply add. */
10403 case 4: /* Dual multiply subtract. */
10404 if (op)
d9ba4830
PB
10405 gen_swap_half(tmp2);
10406 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10407 if (insn & (1 << 22)) {
e1d177b9 10408 /* This subtraction cannot overflow. */
d9ba4830 10409 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10410 } else {
e1d177b9
PM
10411 /* This addition cannot overflow 32 bits;
10412 * however it may overflow considered as a signed
10413 * operation, in which case we must set the Q flag.
10414 */
9ef39277 10415 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10416 }
7d1b0095 10417 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10418 if (rs != 15)
10419 {
d9ba4830 10420 tmp2 = load_reg(s, rs);
9ef39277 10421 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10422 tcg_temp_free_i32(tmp2);
9ee6e8bb 10423 }
9ee6e8bb
PB
10424 break;
10425 case 3: /* 32 * 16 -> 32msb */
10426 if (op)
d9ba4830 10427 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10428 else
d9ba4830 10429 gen_sxth(tmp2);
a7812ae4
PB
10430 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10431 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10432 tmp = tcg_temp_new_i32();
ecc7b3aa 10433 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10434 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10435 if (rs != 15)
10436 {
d9ba4830 10437 tmp2 = load_reg(s, rs);
9ef39277 10438 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10439 tcg_temp_free_i32(tmp2);
9ee6e8bb 10440 }
9ee6e8bb 10441 break;
838fa72d
AJ
10442 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10443 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10444 if (rs != 15) {
838fa72d
AJ
10445 tmp = load_reg(s, rs);
10446 if (insn & (1 << 20)) {
10447 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10448 } else {
838fa72d 10449 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10450 }
2c0262af 10451 }
838fa72d
AJ
10452 if (insn & (1 << 4)) {
10453 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10454 }
10455 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10456 tmp = tcg_temp_new_i32();
ecc7b3aa 10457 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10458 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10459 break;
10460 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10461 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10462 tcg_temp_free_i32(tmp2);
9ee6e8bb 10463 if (rs != 15) {
d9ba4830
PB
10464 tmp2 = load_reg(s, rs);
10465 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10466 tcg_temp_free_i32(tmp2);
5fd46862 10467 }
9ee6e8bb 10468 break;
2c0262af 10469 }
d9ba4830 10470 store_reg(s, rd, tmp);
2c0262af 10471 break;
9ee6e8bb
PB
10472 case 6: case 7: /* 64-bit multiply, Divide. */
10473 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10474 tmp = load_reg(s, rn);
10475 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10476 if ((op & 0x50) == 0x10) {
10477 /* sdiv, udiv */
d614a513 10478 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10479 goto illegal_op;
47789990 10480 }
9ee6e8bb 10481 if (op & 0x20)
5e3f878a 10482 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10483 else
5e3f878a 10484 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10485 tcg_temp_free_i32(tmp2);
5e3f878a 10486 store_reg(s, rd, tmp);
9ee6e8bb
PB
10487 } else if ((op & 0xe) == 0xc) {
10488 /* Dual multiply accumulate long. */
62b44f05
AR
10489 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10490 tcg_temp_free_i32(tmp);
10491 tcg_temp_free_i32(tmp2);
10492 goto illegal_op;
10493 }
9ee6e8bb 10494 if (op & 1)
5e3f878a
PB
10495 gen_swap_half(tmp2);
10496 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10497 if (op & 0x10) {
5e3f878a 10498 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10499 } else {
5e3f878a 10500 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10501 }
7d1b0095 10502 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10503 /* BUGFIX */
10504 tmp64 = tcg_temp_new_i64();
10505 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10506 tcg_temp_free_i32(tmp);
a7812ae4
PB
10507 gen_addq(s, tmp64, rs, rd);
10508 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10509 tcg_temp_free_i64(tmp64);
2c0262af 10510 } else {
9ee6e8bb
PB
10511 if (op & 0x20) {
10512 /* Unsigned 64-bit multiply */
a7812ae4 10513 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10514 } else {
9ee6e8bb
PB
10515 if (op & 8) {
10516 /* smlalxy */
62b44f05
AR
10517 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10518 tcg_temp_free_i32(tmp2);
10519 tcg_temp_free_i32(tmp);
10520 goto illegal_op;
10521 }
5e3f878a 10522 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10523 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10524 tmp64 = tcg_temp_new_i64();
10525 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10526 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10527 } else {
10528 /* Signed 64-bit multiply */
a7812ae4 10529 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10530 }
b5ff1b31 10531 }
9ee6e8bb
PB
10532 if (op & 4) {
10533 /* umaal */
62b44f05
AR
10534 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10535 tcg_temp_free_i64(tmp64);
10536 goto illegal_op;
10537 }
a7812ae4
PB
10538 gen_addq_lo(s, tmp64, rs);
10539 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10540 } else if (op & 0x40) {
10541 /* 64-bit accumulate. */
a7812ae4 10542 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10543 }
a7812ae4 10544 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10545 tcg_temp_free_i64(tmp64);
5fd46862 10546 }
2c0262af 10547 break;
9ee6e8bb
PB
10548 }
10549 break;
10550 case 6: case 7: case 14: case 15:
10551 /* Coprocessor. */
7517748e
PM
10552 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10553 /* We don't currently implement M profile FP support,
10554 * so this entire space should give a NOCP fault.
10555 */
10556 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10557 default_exception_el(s));
10558 break;
10559 }
9ee6e8bb
PB
10560 if (((insn >> 24) & 3) == 3) {
10561 /* Translate into the equivalent ARM encoding. */
f06053e3 10562 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10563 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10564 goto illegal_op;
7dcc1f89 10565 }
6a57f3eb 10566 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10567 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10568 goto illegal_op;
10569 }
9ee6e8bb
PB
10570 } else {
10571 if (insn & (1 << 28))
10572 goto illegal_op;
7dcc1f89 10573 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10574 goto illegal_op;
7dcc1f89 10575 }
9ee6e8bb
PB
10576 }
10577 break;
10578 case 8: case 9: case 10: case 11:
10579 if (insn & (1 << 15)) {
10580 /* Branches, misc control. */
10581 if (insn & 0x5000) {
10582 /* Unconditional branch. */
10583 /* signextend(hw1[10:0]) -> offset[:12]. */
10584 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10585 /* hw1[10:0] -> offset[11:1]. */
10586 offset |= (insn & 0x7ff) << 1;
10587 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10588 offset[24:22] already have the same value because of the
10589 sign extension above. */
10590 offset ^= ((~insn) & (1 << 13)) << 10;
10591 offset ^= ((~insn) & (1 << 11)) << 11;
10592
9ee6e8bb
PB
10593 if (insn & (1 << 14)) {
10594 /* Branch and link. */
3174f8e9 10595 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10596 }
3b46e624 10597
b0109805 10598 offset += s->pc;
9ee6e8bb
PB
10599 if (insn & (1 << 12)) {
10600 /* b/bl */
b0109805 10601 gen_jmp(s, offset);
9ee6e8bb
PB
10602 } else {
10603 /* blx */
b0109805 10604 offset &= ~(uint32_t)2;
be5e7a76 10605 /* thumb2 bx, no need to check */
b0109805 10606 gen_bx_im(s, offset);
2c0262af 10607 }
9ee6e8bb
PB
10608 } else if (((insn >> 23) & 7) == 7) {
10609 /* Misc control */
10610 if (insn & (1 << 13))
10611 goto illegal_op;
10612
10613 if (insn & (1 << 26)) {
001b3cab
PM
10614 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10615 goto illegal_op;
10616 }
37e6456e
PM
10617 if (!(insn & (1 << 20))) {
10618 /* Hypervisor call (v7) */
10619 int imm16 = extract32(insn, 16, 4) << 12
10620 | extract32(insn, 0, 12);
10621 ARCH(7);
10622 if (IS_USER(s)) {
10623 goto illegal_op;
10624 }
10625 gen_hvc(s, imm16);
10626 } else {
10627 /* Secure monitor call (v6+) */
10628 ARCH(6K);
10629 if (IS_USER(s)) {
10630 goto illegal_op;
10631 }
10632 gen_smc(s);
10633 }
2c0262af 10634 } else {
9ee6e8bb
PB
10635 op = (insn >> 20) & 7;
10636 switch (op) {
10637 case 0: /* msr cpsr. */
b53d8923 10638 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10639 tmp = load_reg(s, rn);
b28b3377
PM
10640 /* the constant is the mask and SYSm fields */
10641 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10642 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10643 tcg_temp_free_i32(addr);
7d1b0095 10644 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10645 gen_lookup_tb(s);
10646 break;
10647 }
10648 /* fall through */
10649 case 1: /* msr spsr. */
b53d8923 10650 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10651 goto illegal_op;
b53d8923 10652 }
8bfd0550
PM
10653
10654 if (extract32(insn, 5, 1)) {
10655 /* MSR (banked) */
10656 int sysm = extract32(insn, 8, 4) |
10657 (extract32(insn, 4, 1) << 4);
10658 int r = op & 1;
10659
10660 gen_msr_banked(s, r, sysm, rm);
10661 break;
10662 }
10663
10664 /* MSR (for PSRs) */
2fbac54b
FN
10665 tmp = load_reg(s, rn);
10666 if (gen_set_psr(s,
7dcc1f89 10667 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10668 op == 1, tmp))
9ee6e8bb
PB
10669 goto illegal_op;
10670 break;
10671 case 2: /* cps, nop-hint. */
10672 if (((insn >> 8) & 7) == 0) {
10673 gen_nop_hint(s, insn & 0xff);
10674 }
10675 /* Implemented as NOP in user mode. */
10676 if (IS_USER(s))
10677 break;
10678 offset = 0;
10679 imm = 0;
10680 if (insn & (1 << 10)) {
10681 if (insn & (1 << 7))
10682 offset |= CPSR_A;
10683 if (insn & (1 << 6))
10684 offset |= CPSR_I;
10685 if (insn & (1 << 5))
10686 offset |= CPSR_F;
10687 if (insn & (1 << 9))
10688 imm = CPSR_A | CPSR_I | CPSR_F;
10689 }
10690 if (insn & (1 << 8)) {
10691 offset |= 0x1f;
10692 imm |= (insn & 0x1f);
10693 }
10694 if (offset) {
2fbac54b 10695 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10696 }
10697 break;
10698 case 3: /* Special control operations. */
426f5abc 10699 ARCH(7);
9ee6e8bb
PB
10700 op = (insn >> 4) & 0xf;
10701 switch (op) {
10702 case 2: /* clrex */
426f5abc 10703 gen_clrex(s);
9ee6e8bb
PB
10704 break;
10705 case 4: /* dsb */
10706 case 5: /* dmb */
61e4c432 10707 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10708 break;
6df99dec
SS
10709 case 6: /* isb */
10710 /* We need to break the TB after this insn
10711 * to execute self-modifying code correctly
10712 * and also to take any pending interrupts
10713 * immediately.
10714 */
0b609cc1 10715 gen_goto_tb(s, 0, s->pc & ~1);
6df99dec 10716 break;
9ee6e8bb
PB
10717 default:
10718 goto illegal_op;
10719 }
10720 break;
10721 case 4: /* bxj */
9d7c59c8
PM
10722 /* Trivial implementation equivalent to bx.
10723 * This instruction doesn't exist at all for M-profile.
10724 */
10725 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10726 goto illegal_op;
10727 }
d9ba4830
PB
10728 tmp = load_reg(s, rn);
10729 gen_bx(s, tmp);
9ee6e8bb
PB
10730 break;
10731 case 5: /* Exception return. */
b8b45b68
RV
10732 if (IS_USER(s)) {
10733 goto illegal_op;
10734 }
10735 if (rn != 14 || rd != 15) {
10736 goto illegal_op;
10737 }
10738 tmp = load_reg(s, rn);
10739 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10740 gen_exception_return(s, tmp);
10741 break;
8bfd0550 10742 case 6: /* MRS */
43ac6574
PM
10743 if (extract32(insn, 5, 1) &&
10744 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10745 /* MRS (banked) */
10746 int sysm = extract32(insn, 16, 4) |
10747 (extract32(insn, 4, 1) << 4);
10748
10749 gen_mrs_banked(s, 0, sysm, rd);
10750 break;
10751 }
10752
3d54026f
PM
10753 if (extract32(insn, 16, 4) != 0xf) {
10754 goto illegal_op;
10755 }
10756 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10757 extract32(insn, 0, 8) != 0) {
10758 goto illegal_op;
10759 }
10760
8bfd0550 10761 /* mrs cpsr */
7d1b0095 10762 tmp = tcg_temp_new_i32();
b53d8923 10763 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10764 addr = tcg_const_i32(insn & 0xff);
10765 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10766 tcg_temp_free_i32(addr);
9ee6e8bb 10767 } else {
9ef39277 10768 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10769 }
8984bd2e 10770 store_reg(s, rd, tmp);
9ee6e8bb 10771 break;
8bfd0550 10772 case 7: /* MRS */
43ac6574
PM
10773 if (extract32(insn, 5, 1) &&
10774 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10775 /* MRS (banked) */
10776 int sysm = extract32(insn, 16, 4) |
10777 (extract32(insn, 4, 1) << 4);
10778
10779 gen_mrs_banked(s, 1, sysm, rd);
10780 break;
10781 }
10782
10783 /* mrs spsr. */
9ee6e8bb 10784 /* Not accessible in user mode. */
b53d8923 10785 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10786 goto illegal_op;
b53d8923 10787 }
3d54026f
PM
10788
10789 if (extract32(insn, 16, 4) != 0xf ||
10790 extract32(insn, 0, 8) != 0) {
10791 goto illegal_op;
10792 }
10793
d9ba4830
PB
10794 tmp = load_cpu_field(spsr);
10795 store_reg(s, rd, tmp);
9ee6e8bb 10796 break;
2c0262af
FB
10797 }
10798 }
9ee6e8bb
PB
10799 } else {
10800 /* Conditional branch. */
10801 op = (insn >> 22) & 0xf;
10802 /* Generate a conditional jump to next instruction. */
10803 s->condlabel = gen_new_label();
39fb730a 10804 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10805 s->condjmp = 1;
10806
10807 /* offset[11:1] = insn[10:0] */
10808 offset = (insn & 0x7ff) << 1;
10809 /* offset[17:12] = insn[21:16]. */
10810 offset |= (insn & 0x003f0000) >> 4;
10811 /* offset[31:20] = insn[26]. */
10812 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10813 /* offset[18] = insn[13]. */
10814 offset |= (insn & (1 << 13)) << 5;
10815 /* offset[19] = insn[11]. */
10816 offset |= (insn & (1 << 11)) << 8;
10817
10818 /* jump to the offset */
b0109805 10819 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10820 }
10821 } else {
10822 /* Data processing immediate. */
10823 if (insn & (1 << 25)) {
10824 if (insn & (1 << 24)) {
10825 if (insn & (1 << 20))
10826 goto illegal_op;
10827 /* Bitfield/Saturate. */
10828 op = (insn >> 21) & 7;
10829 imm = insn & 0x1f;
10830 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10831 if (rn == 15) {
7d1b0095 10832 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10833 tcg_gen_movi_i32(tmp, 0);
10834 } else {
10835 tmp = load_reg(s, rn);
10836 }
9ee6e8bb
PB
10837 switch (op) {
10838 case 2: /* Signed bitfield extract. */
10839 imm++;
10840 if (shift + imm > 32)
10841 goto illegal_op;
59a71b4c
RH
10842 if (imm < 32) {
10843 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10844 }
9ee6e8bb
PB
10845 break;
10846 case 6: /* Unsigned bitfield extract. */
10847 imm++;
10848 if (shift + imm > 32)
10849 goto illegal_op;
59a71b4c
RH
10850 if (imm < 32) {
10851 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10852 }
9ee6e8bb
PB
10853 break;
10854 case 3: /* Bitfield insert/clear. */
10855 if (imm < shift)
10856 goto illegal_op;
10857 imm = imm + 1 - shift;
10858 if (imm != 32) {
6ddbc6e4 10859 tmp2 = load_reg(s, rd);
d593c48e 10860 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10861 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10862 }
10863 break;
10864 case 7:
10865 goto illegal_op;
10866 default: /* Saturate. */
9ee6e8bb
PB
10867 if (shift) {
10868 if (op & 1)
6ddbc6e4 10869 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10870 else
6ddbc6e4 10871 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10872 }
6ddbc6e4 10873 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10874 if (op & 4) {
10875 /* Unsigned. */
62b44f05
AR
10876 if ((op & 1) && shift == 0) {
10877 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10878 tcg_temp_free_i32(tmp);
10879 tcg_temp_free_i32(tmp2);
10880 goto illegal_op;
10881 }
9ef39277 10882 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10883 } else {
9ef39277 10884 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10885 }
2c0262af 10886 } else {
9ee6e8bb 10887 /* Signed. */
62b44f05
AR
10888 if ((op & 1) && shift == 0) {
10889 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10890 tcg_temp_free_i32(tmp);
10891 tcg_temp_free_i32(tmp2);
10892 goto illegal_op;
10893 }
9ef39277 10894 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10895 } else {
9ef39277 10896 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10897 }
2c0262af 10898 }
b75263d6 10899 tcg_temp_free_i32(tmp2);
9ee6e8bb 10900 break;
2c0262af 10901 }
6ddbc6e4 10902 store_reg(s, rd, tmp);
9ee6e8bb
PB
10903 } else {
10904 imm = ((insn & 0x04000000) >> 15)
10905 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10906 if (insn & (1 << 22)) {
10907 /* 16-bit immediate. */
10908 imm |= (insn >> 4) & 0xf000;
10909 if (insn & (1 << 23)) {
10910 /* movt */
5e3f878a 10911 tmp = load_reg(s, rd);
86831435 10912 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10913 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10914 } else {
9ee6e8bb 10915 /* movw */
7d1b0095 10916 tmp = tcg_temp_new_i32();
5e3f878a 10917 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10918 }
10919 } else {
9ee6e8bb
PB
10920 /* Add/sub 12-bit immediate. */
10921 if (rn == 15) {
b0109805 10922 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10923 if (insn & (1 << 23))
b0109805 10924 offset -= imm;
9ee6e8bb 10925 else
b0109805 10926 offset += imm;
7d1b0095 10927 tmp = tcg_temp_new_i32();
5e3f878a 10928 tcg_gen_movi_i32(tmp, offset);
2c0262af 10929 } else {
5e3f878a 10930 tmp = load_reg(s, rn);
9ee6e8bb 10931 if (insn & (1 << 23))
5e3f878a 10932 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10933 else
5e3f878a 10934 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10935 }
9ee6e8bb 10936 }
5e3f878a 10937 store_reg(s, rd, tmp);
191abaa2 10938 }
9ee6e8bb
PB
10939 } else {
10940 int shifter_out = 0;
10941 /* modified 12-bit immediate. */
10942 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10943 imm = (insn & 0xff);
10944 switch (shift) {
10945 case 0: /* XY */
10946 /* Nothing to do. */
10947 break;
10948 case 1: /* 00XY00XY */
10949 imm |= imm << 16;
10950 break;
10951 case 2: /* XY00XY00 */
10952 imm |= imm << 16;
10953 imm <<= 8;
10954 break;
10955 case 3: /* XYXYXYXY */
10956 imm |= imm << 16;
10957 imm |= imm << 8;
10958 break;
10959 default: /* Rotated constant. */
10960 shift = (shift << 1) | (imm >> 7);
10961 imm |= 0x80;
10962 imm = imm << (32 - shift);
10963 shifter_out = 1;
10964 break;
b5ff1b31 10965 }
7d1b0095 10966 tmp2 = tcg_temp_new_i32();
3174f8e9 10967 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10968 rn = (insn >> 16) & 0xf;
3174f8e9 10969 if (rn == 15) {
7d1b0095 10970 tmp = tcg_temp_new_i32();
3174f8e9
FN
10971 tcg_gen_movi_i32(tmp, 0);
10972 } else {
10973 tmp = load_reg(s, rn);
10974 }
9ee6e8bb
PB
10975 op = (insn >> 21) & 0xf;
10976 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10977 shifter_out, tmp, tmp2))
9ee6e8bb 10978 goto illegal_op;
7d1b0095 10979 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10980 rd = (insn >> 8) & 0xf;
10981 if (rd != 15) {
3174f8e9
FN
10982 store_reg(s, rd, tmp);
10983 } else {
7d1b0095 10984 tcg_temp_free_i32(tmp);
2c0262af 10985 }
2c0262af 10986 }
9ee6e8bb
PB
10987 }
10988 break;
10989 case 12: /* Load/store single data item. */
10990 {
10991 int postinc = 0;
10992 int writeback = 0;
a99caa48 10993 int memidx;
9bb6558a
PM
10994 ISSInfo issinfo;
10995
9ee6e8bb 10996 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10997 if (disas_neon_ls_insn(s, insn)) {
c1713132 10998 goto illegal_op;
7dcc1f89 10999 }
9ee6e8bb
PB
11000 break;
11001 }
a2fdc890
PM
11002 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
11003 if (rs == 15) {
11004 if (!(insn & (1 << 20))) {
11005 goto illegal_op;
11006 }
11007 if (op != 2) {
11008 /* Byte or halfword load space with dest == r15 : memory hints.
11009 * Catch them early so we don't emit pointless addressing code.
11010 * This space is a mix of:
11011 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
11012 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
11013 * cores)
11014 * unallocated hints, which must be treated as NOPs
11015 * UNPREDICTABLE space, which we NOP or UNDEF depending on
11016 * which is easiest for the decoding logic
11017 * Some space which must UNDEF
11018 */
11019 int op1 = (insn >> 23) & 3;
11020 int op2 = (insn >> 6) & 0x3f;
11021 if (op & 2) {
11022 goto illegal_op;
11023 }
11024 if (rn == 15) {
02afbf64
PM
11025 /* UNPREDICTABLE, unallocated hint or
11026 * PLD/PLDW/PLI (literal)
11027 */
2eea841c 11028 return;
a2fdc890
PM
11029 }
11030 if (op1 & 1) {
2eea841c 11031 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11032 }
11033 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
2eea841c 11034 return; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
11035 }
11036 /* UNDEF space, or an UNPREDICTABLE */
2eea841c 11037 goto illegal_op;
a2fdc890
PM
11038 }
11039 }
a99caa48 11040 memidx = get_mem_index(s);
9ee6e8bb 11041 if (rn == 15) {
7d1b0095 11042 addr = tcg_temp_new_i32();
9ee6e8bb
PB
11043 /* PC relative. */
11044 /* s->pc has already been incremented by 4. */
11045 imm = s->pc & 0xfffffffc;
11046 if (insn & (1 << 23))
11047 imm += insn & 0xfff;
11048 else
11049 imm -= insn & 0xfff;
b0109805 11050 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 11051 } else {
b0109805 11052 addr = load_reg(s, rn);
9ee6e8bb
PB
11053 if (insn & (1 << 23)) {
11054 /* Positive offset. */
11055 imm = insn & 0xfff;
b0109805 11056 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 11057 } else {
9ee6e8bb 11058 imm = insn & 0xff;
2a0308c5
PM
11059 switch ((insn >> 8) & 0xf) {
11060 case 0x0: /* Shifted Register. */
9ee6e8bb 11061 shift = (insn >> 4) & 0xf;
2a0308c5
PM
11062 if (shift > 3) {
11063 tcg_temp_free_i32(addr);
18c9b560 11064 goto illegal_op;
2a0308c5 11065 }
b26eefb6 11066 tmp = load_reg(s, rm);
9ee6e8bb 11067 if (shift)
b26eefb6 11068 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 11069 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11070 tcg_temp_free_i32(tmp);
9ee6e8bb 11071 break;
2a0308c5 11072 case 0xc: /* Negative offset. */
b0109805 11073 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 11074 break;
2a0308c5 11075 case 0xe: /* User privilege. */
b0109805 11076 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 11077 memidx = get_a32_user_mem_index(s);
9ee6e8bb 11078 break;
2a0308c5 11079 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
11080 imm = -imm;
11081 /* Fall through. */
2a0308c5 11082 case 0xb: /* Post-increment. */
9ee6e8bb
PB
11083 postinc = 1;
11084 writeback = 1;
11085 break;
2a0308c5 11086 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
11087 imm = -imm;
11088 /* Fall through. */
2a0308c5 11089 case 0xf: /* Pre-increment. */
b0109805 11090 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
11091 writeback = 1;
11092 break;
11093 default:
2a0308c5 11094 tcg_temp_free_i32(addr);
b7bcbe95 11095 goto illegal_op;
9ee6e8bb
PB
11096 }
11097 }
11098 }
9bb6558a
PM
11099
11100 issinfo = writeback ? ISSInvalid : rs;
11101
9ee6e8bb
PB
11102 if (insn & (1 << 20)) {
11103 /* Load. */
5a839c0d 11104 tmp = tcg_temp_new_i32();
a2fdc890 11105 switch (op) {
5a839c0d 11106 case 0:
9bb6558a 11107 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11108 break;
11109 case 4:
9bb6558a 11110 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11111 break;
11112 case 1:
9bb6558a 11113 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11114 break;
11115 case 5:
9bb6558a 11116 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11117 break;
11118 case 2:
9bb6558a 11119 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11120 break;
2a0308c5 11121 default:
5a839c0d 11122 tcg_temp_free_i32(tmp);
2a0308c5
PM
11123 tcg_temp_free_i32(addr);
11124 goto illegal_op;
a2fdc890
PM
11125 }
11126 if (rs == 15) {
3bb8a96f 11127 gen_bx_excret(s, tmp);
9ee6e8bb 11128 } else {
a2fdc890 11129 store_reg(s, rs, tmp);
9ee6e8bb
PB
11130 }
11131 } else {
11132 /* Store. */
b0109805 11133 tmp = load_reg(s, rs);
9ee6e8bb 11134 switch (op) {
5a839c0d 11135 case 0:
9bb6558a 11136 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11137 break;
11138 case 1:
9bb6558a 11139 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
11140 break;
11141 case 2:
9bb6558a 11142 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 11143 break;
2a0308c5 11144 default:
5a839c0d 11145 tcg_temp_free_i32(tmp);
2a0308c5
PM
11146 tcg_temp_free_i32(addr);
11147 goto illegal_op;
b7bcbe95 11148 }
5a839c0d 11149 tcg_temp_free_i32(tmp);
2c0262af 11150 }
9ee6e8bb 11151 if (postinc)
b0109805
PB
11152 tcg_gen_addi_i32(addr, addr, imm);
11153 if (writeback) {
11154 store_reg(s, rn, addr);
11155 } else {
7d1b0095 11156 tcg_temp_free_i32(addr);
b0109805 11157 }
9ee6e8bb
PB
11158 }
11159 break;
11160 default:
11161 goto illegal_op;
2c0262af 11162 }
2eea841c 11163 return;
9ee6e8bb 11164illegal_op:
2eea841c
PM
11165 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11166 default_exception_el(s));
2c0262af
FB
11167}
11168
296e5a0a 11169static void disas_thumb_insn(DisasContext *s, uint32_t insn)
99c475ab 11170{
296e5a0a 11171 uint32_t val, op, rm, rn, rd, shift, cond;
99c475ab
FB
11172 int32_t offset;
11173 int i;
39d5492a
PM
11174 TCGv_i32 tmp;
11175 TCGv_i32 tmp2;
11176 TCGv_i32 addr;
99c475ab 11177
99c475ab
FB
11178 switch (insn >> 12) {
11179 case 0: case 1:
396e467c 11180
99c475ab
FB
11181 rd = insn & 7;
11182 op = (insn >> 11) & 3;
11183 if (op == 3) {
11184 /* add/subtract */
11185 rn = (insn >> 3) & 7;
396e467c 11186 tmp = load_reg(s, rn);
99c475ab
FB
11187 if (insn & (1 << 10)) {
11188 /* immediate */
7d1b0095 11189 tmp2 = tcg_temp_new_i32();
396e467c 11190 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11191 } else {
11192 /* reg */
11193 rm = (insn >> 6) & 7;
396e467c 11194 tmp2 = load_reg(s, rm);
99c475ab 11195 }
9ee6e8bb
PB
11196 if (insn & (1 << 9)) {
11197 if (s->condexec_mask)
396e467c 11198 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11199 else
72485ec4 11200 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11201 } else {
11202 if (s->condexec_mask)
396e467c 11203 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11204 else
72485ec4 11205 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11206 }
7d1b0095 11207 tcg_temp_free_i32(tmp2);
396e467c 11208 store_reg(s, rd, tmp);
99c475ab
FB
11209 } else {
11210 /* shift immediate */
11211 rm = (insn >> 3) & 7;
11212 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11213 tmp = load_reg(s, rm);
11214 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11215 if (!s->condexec_mask)
11216 gen_logic_CC(tmp);
11217 store_reg(s, rd, tmp);
99c475ab
FB
11218 }
11219 break;
11220 case 2: case 3:
11221 /* arithmetic large immediate */
11222 op = (insn >> 11) & 3;
11223 rd = (insn >> 8) & 0x7;
396e467c 11224 if (op == 0) { /* mov */
7d1b0095 11225 tmp = tcg_temp_new_i32();
396e467c 11226 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11227 if (!s->condexec_mask)
396e467c
FN
11228 gen_logic_CC(tmp);
11229 store_reg(s, rd, tmp);
11230 } else {
11231 tmp = load_reg(s, rd);
7d1b0095 11232 tmp2 = tcg_temp_new_i32();
396e467c
FN
11233 tcg_gen_movi_i32(tmp2, insn & 0xff);
11234 switch (op) {
11235 case 1: /* cmp */
72485ec4 11236 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11237 tcg_temp_free_i32(tmp);
11238 tcg_temp_free_i32(tmp2);
396e467c
FN
11239 break;
11240 case 2: /* add */
11241 if (s->condexec_mask)
11242 tcg_gen_add_i32(tmp, tmp, tmp2);
11243 else
72485ec4 11244 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11245 tcg_temp_free_i32(tmp2);
396e467c
FN
11246 store_reg(s, rd, tmp);
11247 break;
11248 case 3: /* sub */
11249 if (s->condexec_mask)
11250 tcg_gen_sub_i32(tmp, tmp, tmp2);
11251 else
72485ec4 11252 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11253 tcg_temp_free_i32(tmp2);
396e467c
FN
11254 store_reg(s, rd, tmp);
11255 break;
11256 }
99c475ab 11257 }
99c475ab
FB
11258 break;
11259 case 4:
11260 if (insn & (1 << 11)) {
11261 rd = (insn >> 8) & 7;
5899f386
FB
11262 /* load pc-relative. Bit 1 of PC is ignored. */
11263 val = s->pc + 2 + ((insn & 0xff) * 4);
11264 val &= ~(uint32_t)2;
7d1b0095 11265 addr = tcg_temp_new_i32();
b0109805 11266 tcg_gen_movi_i32(addr, val);
c40c8556 11267 tmp = tcg_temp_new_i32();
9bb6558a
PM
11268 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11269 rd | ISSIs16Bit);
7d1b0095 11270 tcg_temp_free_i32(addr);
b0109805 11271 store_reg(s, rd, tmp);
99c475ab
FB
11272 break;
11273 }
11274 if (insn & (1 << 10)) {
ebfe27c5
PM
11275 /* 0b0100_01xx_xxxx_xxxx
11276 * - data processing extended, branch and exchange
11277 */
99c475ab
FB
11278 rd = (insn & 7) | ((insn >> 4) & 8);
11279 rm = (insn >> 3) & 0xf;
11280 op = (insn >> 8) & 3;
11281 switch (op) {
11282 case 0: /* add */
396e467c
FN
11283 tmp = load_reg(s, rd);
11284 tmp2 = load_reg(s, rm);
11285 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11286 tcg_temp_free_i32(tmp2);
396e467c 11287 store_reg(s, rd, tmp);
99c475ab
FB
11288 break;
11289 case 1: /* cmp */
396e467c
FN
11290 tmp = load_reg(s, rd);
11291 tmp2 = load_reg(s, rm);
72485ec4 11292 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11293 tcg_temp_free_i32(tmp2);
11294 tcg_temp_free_i32(tmp);
99c475ab
FB
11295 break;
11296 case 2: /* mov/cpy */
396e467c
FN
11297 tmp = load_reg(s, rm);
11298 store_reg(s, rd, tmp);
99c475ab 11299 break;
ebfe27c5
PM
11300 case 3:
11301 {
11302 /* 0b0100_0111_xxxx_xxxx
11303 * - branch [and link] exchange thumb register
11304 */
11305 bool link = insn & (1 << 7);
11306
fb602cb7 11307 if (insn & 3) {
ebfe27c5
PM
11308 goto undef;
11309 }
11310 if (link) {
be5e7a76 11311 ARCH(5);
ebfe27c5 11312 }
fb602cb7
PM
11313 if ((insn & 4)) {
11314 /* BXNS/BLXNS: only exists for v8M with the
11315 * security extensions, and always UNDEF if NonSecure.
11316 * We don't implement these in the user-only mode
11317 * either (in theory you can use them from Secure User
11318 * mode but they are too tied in to system emulation.)
11319 */
11320 if (!s->v8m_secure || IS_USER_ONLY) {
11321 goto undef;
11322 }
11323 if (link) {
3e3fa230 11324 gen_blxns(s, rm);
fb602cb7
PM
11325 } else {
11326 gen_bxns(s, rm);
11327 }
11328 break;
11329 }
11330 /* BLX/BX */
ebfe27c5
PM
11331 tmp = load_reg(s, rm);
11332 if (link) {
99c475ab 11333 val = (uint32_t)s->pc | 1;
7d1b0095 11334 tmp2 = tcg_temp_new_i32();
b0109805
PB
11335 tcg_gen_movi_i32(tmp2, val);
11336 store_reg(s, 14, tmp2);
3bb8a96f
PM
11337 gen_bx(s, tmp);
11338 } else {
11339 /* Only BX works as exception-return, not BLX */
11340 gen_bx_excret(s, tmp);
99c475ab 11341 }
99c475ab
FB
11342 break;
11343 }
ebfe27c5 11344 }
99c475ab
FB
11345 break;
11346 }
11347
11348 /* data processing register */
11349 rd = insn & 7;
11350 rm = (insn >> 3) & 7;
11351 op = (insn >> 6) & 0xf;
11352 if (op == 2 || op == 3 || op == 4 || op == 7) {
11353 /* the shift/rotate ops want the operands backwards */
11354 val = rm;
11355 rm = rd;
11356 rd = val;
11357 val = 1;
11358 } else {
11359 val = 0;
11360 }
11361
396e467c 11362 if (op == 9) { /* neg */
7d1b0095 11363 tmp = tcg_temp_new_i32();
396e467c
FN
11364 tcg_gen_movi_i32(tmp, 0);
11365 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11366 tmp = load_reg(s, rd);
11367 } else {
f764718d 11368 tmp = NULL;
396e467c 11369 }
99c475ab 11370
396e467c 11371 tmp2 = load_reg(s, rm);
5899f386 11372 switch (op) {
99c475ab 11373 case 0x0: /* and */
396e467c 11374 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11375 if (!s->condexec_mask)
396e467c 11376 gen_logic_CC(tmp);
99c475ab
FB
11377 break;
11378 case 0x1: /* eor */
396e467c 11379 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11380 if (!s->condexec_mask)
396e467c 11381 gen_logic_CC(tmp);
99c475ab
FB
11382 break;
11383 case 0x2: /* lsl */
9ee6e8bb 11384 if (s->condexec_mask) {
365af80e 11385 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11386 } else {
9ef39277 11387 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11388 gen_logic_CC(tmp2);
9ee6e8bb 11389 }
99c475ab
FB
11390 break;
11391 case 0x3: /* lsr */
9ee6e8bb 11392 if (s->condexec_mask) {
365af80e 11393 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11394 } else {
9ef39277 11395 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11396 gen_logic_CC(tmp2);
9ee6e8bb 11397 }
99c475ab
FB
11398 break;
11399 case 0x4: /* asr */
9ee6e8bb 11400 if (s->condexec_mask) {
365af80e 11401 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11402 } else {
9ef39277 11403 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11404 gen_logic_CC(tmp2);
9ee6e8bb 11405 }
99c475ab
FB
11406 break;
11407 case 0x5: /* adc */
49b4c31e 11408 if (s->condexec_mask) {
396e467c 11409 gen_adc(tmp, tmp2);
49b4c31e
RH
11410 } else {
11411 gen_adc_CC(tmp, tmp, tmp2);
11412 }
99c475ab
FB
11413 break;
11414 case 0x6: /* sbc */
2de68a49 11415 if (s->condexec_mask) {
396e467c 11416 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11417 } else {
11418 gen_sbc_CC(tmp, tmp, tmp2);
11419 }
99c475ab
FB
11420 break;
11421 case 0x7: /* ror */
9ee6e8bb 11422 if (s->condexec_mask) {
f669df27
AJ
11423 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11424 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11425 } else {
9ef39277 11426 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11427 gen_logic_CC(tmp2);
9ee6e8bb 11428 }
99c475ab
FB
11429 break;
11430 case 0x8: /* tst */
396e467c
FN
11431 tcg_gen_and_i32(tmp, tmp, tmp2);
11432 gen_logic_CC(tmp);
99c475ab 11433 rd = 16;
5899f386 11434 break;
99c475ab 11435 case 0x9: /* neg */
9ee6e8bb 11436 if (s->condexec_mask)
396e467c 11437 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11438 else
72485ec4 11439 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11440 break;
11441 case 0xa: /* cmp */
72485ec4 11442 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11443 rd = 16;
11444 break;
11445 case 0xb: /* cmn */
72485ec4 11446 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11447 rd = 16;
11448 break;
11449 case 0xc: /* orr */
396e467c 11450 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11451 if (!s->condexec_mask)
396e467c 11452 gen_logic_CC(tmp);
99c475ab
FB
11453 break;
11454 case 0xd: /* mul */
7b2919a0 11455 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11456 if (!s->condexec_mask)
396e467c 11457 gen_logic_CC(tmp);
99c475ab
FB
11458 break;
11459 case 0xe: /* bic */
f669df27 11460 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11461 if (!s->condexec_mask)
396e467c 11462 gen_logic_CC(tmp);
99c475ab
FB
11463 break;
11464 case 0xf: /* mvn */
396e467c 11465 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11466 if (!s->condexec_mask)
396e467c 11467 gen_logic_CC(tmp2);
99c475ab 11468 val = 1;
5899f386 11469 rm = rd;
99c475ab
FB
11470 break;
11471 }
11472 if (rd != 16) {
396e467c
FN
11473 if (val) {
11474 store_reg(s, rm, tmp2);
11475 if (op != 0xf)
7d1b0095 11476 tcg_temp_free_i32(tmp);
396e467c
FN
11477 } else {
11478 store_reg(s, rd, tmp);
7d1b0095 11479 tcg_temp_free_i32(tmp2);
396e467c
FN
11480 }
11481 } else {
7d1b0095
PM
11482 tcg_temp_free_i32(tmp);
11483 tcg_temp_free_i32(tmp2);
99c475ab
FB
11484 }
11485 break;
11486
11487 case 5:
11488 /* load/store register offset. */
11489 rd = insn & 7;
11490 rn = (insn >> 3) & 7;
11491 rm = (insn >> 6) & 7;
11492 op = (insn >> 9) & 7;
b0109805 11493 addr = load_reg(s, rn);
b26eefb6 11494 tmp = load_reg(s, rm);
b0109805 11495 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11496 tcg_temp_free_i32(tmp);
99c475ab 11497
c40c8556 11498 if (op < 3) { /* store */
b0109805 11499 tmp = load_reg(s, rd);
c40c8556
PM
11500 } else {
11501 tmp = tcg_temp_new_i32();
11502 }
99c475ab
FB
11503
11504 switch (op) {
11505 case 0: /* str */
9bb6558a 11506 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11507 break;
11508 case 1: /* strh */
9bb6558a 11509 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11510 break;
11511 case 2: /* strb */
9bb6558a 11512 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11513 break;
11514 case 3: /* ldrsb */
9bb6558a 11515 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11516 break;
11517 case 4: /* ldr */
9bb6558a 11518 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11519 break;
11520 case 5: /* ldrh */
9bb6558a 11521 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11522 break;
11523 case 6: /* ldrb */
9bb6558a 11524 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11525 break;
11526 case 7: /* ldrsh */
9bb6558a 11527 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11528 break;
11529 }
c40c8556 11530 if (op >= 3) { /* load */
b0109805 11531 store_reg(s, rd, tmp);
c40c8556
PM
11532 } else {
11533 tcg_temp_free_i32(tmp);
11534 }
7d1b0095 11535 tcg_temp_free_i32(addr);
99c475ab
FB
11536 break;
11537
11538 case 6:
11539 /* load/store word immediate offset */
11540 rd = insn & 7;
11541 rn = (insn >> 3) & 7;
b0109805 11542 addr = load_reg(s, rn);
99c475ab 11543 val = (insn >> 4) & 0x7c;
b0109805 11544 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11545
11546 if (insn & (1 << 11)) {
11547 /* load */
c40c8556 11548 tmp = tcg_temp_new_i32();
12dcc321 11549 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11550 store_reg(s, rd, tmp);
99c475ab
FB
11551 } else {
11552 /* store */
b0109805 11553 tmp = load_reg(s, rd);
12dcc321 11554 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11555 tcg_temp_free_i32(tmp);
99c475ab 11556 }
7d1b0095 11557 tcg_temp_free_i32(addr);
99c475ab
FB
11558 break;
11559
11560 case 7:
11561 /* load/store byte immediate offset */
11562 rd = insn & 7;
11563 rn = (insn >> 3) & 7;
b0109805 11564 addr = load_reg(s, rn);
99c475ab 11565 val = (insn >> 6) & 0x1f;
b0109805 11566 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11567
11568 if (insn & (1 << 11)) {
11569 /* load */
c40c8556 11570 tmp = tcg_temp_new_i32();
9bb6558a 11571 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11572 store_reg(s, rd, tmp);
99c475ab
FB
11573 } else {
11574 /* store */
b0109805 11575 tmp = load_reg(s, rd);
9bb6558a 11576 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11577 tcg_temp_free_i32(tmp);
99c475ab 11578 }
7d1b0095 11579 tcg_temp_free_i32(addr);
99c475ab
FB
11580 break;
11581
11582 case 8:
11583 /* load/store halfword immediate offset */
11584 rd = insn & 7;
11585 rn = (insn >> 3) & 7;
b0109805 11586 addr = load_reg(s, rn);
99c475ab 11587 val = (insn >> 5) & 0x3e;
b0109805 11588 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11589
11590 if (insn & (1 << 11)) {
11591 /* load */
c40c8556 11592 tmp = tcg_temp_new_i32();
9bb6558a 11593 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11594 store_reg(s, rd, tmp);
99c475ab
FB
11595 } else {
11596 /* store */
b0109805 11597 tmp = load_reg(s, rd);
9bb6558a 11598 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11599 tcg_temp_free_i32(tmp);
99c475ab 11600 }
7d1b0095 11601 tcg_temp_free_i32(addr);
99c475ab
FB
11602 break;
11603
11604 case 9:
11605 /* load/store from stack */
11606 rd = (insn >> 8) & 7;
b0109805 11607 addr = load_reg(s, 13);
99c475ab 11608 val = (insn & 0xff) * 4;
b0109805 11609 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11610
11611 if (insn & (1 << 11)) {
11612 /* load */
c40c8556 11613 tmp = tcg_temp_new_i32();
9bb6558a 11614 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11615 store_reg(s, rd, tmp);
99c475ab
FB
11616 } else {
11617 /* store */
b0109805 11618 tmp = load_reg(s, rd);
9bb6558a 11619 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11620 tcg_temp_free_i32(tmp);
99c475ab 11621 }
7d1b0095 11622 tcg_temp_free_i32(addr);
99c475ab
FB
11623 break;
11624
11625 case 10:
11626 /* add to high reg */
11627 rd = (insn >> 8) & 7;
5899f386
FB
11628 if (insn & (1 << 11)) {
11629 /* SP */
5e3f878a 11630 tmp = load_reg(s, 13);
5899f386
FB
11631 } else {
11632 /* PC. bit 1 is ignored. */
7d1b0095 11633 tmp = tcg_temp_new_i32();
5e3f878a 11634 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11635 }
99c475ab 11636 val = (insn & 0xff) * 4;
5e3f878a
PB
11637 tcg_gen_addi_i32(tmp, tmp, val);
11638 store_reg(s, rd, tmp);
99c475ab
FB
11639 break;
11640
11641 case 11:
11642 /* misc */
11643 op = (insn >> 8) & 0xf;
11644 switch (op) {
11645 case 0:
11646 /* adjust stack pointer */
b26eefb6 11647 tmp = load_reg(s, 13);
99c475ab
FB
11648 val = (insn & 0x7f) * 4;
11649 if (insn & (1 << 7))
6a0d8a1d 11650 val = -(int32_t)val;
b26eefb6
PB
11651 tcg_gen_addi_i32(tmp, tmp, val);
11652 store_reg(s, 13, tmp);
99c475ab
FB
11653 break;
11654
9ee6e8bb
PB
11655 case 2: /* sign/zero extend. */
11656 ARCH(6);
11657 rd = insn & 7;
11658 rm = (insn >> 3) & 7;
b0109805 11659 tmp = load_reg(s, rm);
9ee6e8bb 11660 switch ((insn >> 6) & 3) {
b0109805
PB
11661 case 0: gen_sxth(tmp); break;
11662 case 1: gen_sxtb(tmp); break;
11663 case 2: gen_uxth(tmp); break;
11664 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11665 }
b0109805 11666 store_reg(s, rd, tmp);
9ee6e8bb 11667 break;
99c475ab
FB
11668 case 4: case 5: case 0xc: case 0xd:
11669 /* push/pop */
b0109805 11670 addr = load_reg(s, 13);
5899f386
FB
11671 if (insn & (1 << 8))
11672 offset = 4;
99c475ab 11673 else
5899f386
FB
11674 offset = 0;
11675 for (i = 0; i < 8; i++) {
11676 if (insn & (1 << i))
11677 offset += 4;
11678 }
11679 if ((insn & (1 << 11)) == 0) {
b0109805 11680 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11681 }
99c475ab
FB
11682 for (i = 0; i < 8; i++) {
11683 if (insn & (1 << i)) {
11684 if (insn & (1 << 11)) {
11685 /* pop */
c40c8556 11686 tmp = tcg_temp_new_i32();
12dcc321 11687 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11688 store_reg(s, i, tmp);
99c475ab
FB
11689 } else {
11690 /* push */
b0109805 11691 tmp = load_reg(s, i);
12dcc321 11692 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11693 tcg_temp_free_i32(tmp);
99c475ab 11694 }
5899f386 11695 /* advance to the next address. */
b0109805 11696 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11697 }
11698 }
f764718d 11699 tmp = NULL;
99c475ab
FB
11700 if (insn & (1 << 8)) {
11701 if (insn & (1 << 11)) {
11702 /* pop pc */
c40c8556 11703 tmp = tcg_temp_new_i32();
12dcc321 11704 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11705 /* don't set the pc until the rest of the instruction
11706 has completed */
11707 } else {
11708 /* push lr */
b0109805 11709 tmp = load_reg(s, 14);
12dcc321 11710 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11711 tcg_temp_free_i32(tmp);
99c475ab 11712 }
b0109805 11713 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11714 }
5899f386 11715 if ((insn & (1 << 11)) == 0) {
b0109805 11716 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11717 }
99c475ab 11718 /* write back the new stack pointer */
b0109805 11719 store_reg(s, 13, addr);
99c475ab 11720 /* set the new PC value */
be5e7a76 11721 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11722 store_reg_from_load(s, 15, tmp);
be5e7a76 11723 }
99c475ab
FB
11724 break;
11725
9ee6e8bb
PB
11726 case 1: case 3: case 9: case 11: /* czb */
11727 rm = insn & 7;
d9ba4830 11728 tmp = load_reg(s, rm);
9ee6e8bb
PB
11729 s->condlabel = gen_new_label();
11730 s->condjmp = 1;
11731 if (insn & (1 << 11))
cb63669a 11732 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11733 else
cb63669a 11734 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11735 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11736 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11737 val = (uint32_t)s->pc + 2;
11738 val += offset;
11739 gen_jmp(s, val);
11740 break;
11741
11742 case 15: /* IT, nop-hint. */
11743 if ((insn & 0xf) == 0) {
11744 gen_nop_hint(s, (insn >> 4) & 0xf);
11745 break;
11746 }
11747 /* If Then. */
11748 s->condexec_cond = (insn >> 4) & 0xe;
11749 s->condexec_mask = insn & 0x1f;
11750 /* No actual code generated for this insn, just setup state. */
11751 break;
11752
06c949e6 11753 case 0xe: /* bkpt */
d4a2dc67
PM
11754 {
11755 int imm8 = extract32(insn, 0, 8);
be5e7a76 11756 ARCH(5);
73710361
GB
11757 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11758 default_exception_el(s));
06c949e6 11759 break;
d4a2dc67 11760 }
06c949e6 11761
19a6e31c
PM
11762 case 0xa: /* rev, and hlt */
11763 {
11764 int op1 = extract32(insn, 6, 2);
11765
11766 if (op1 == 2) {
11767 /* HLT */
11768 int imm6 = extract32(insn, 0, 6);
11769
11770 gen_hlt(s, imm6);
11771 break;
11772 }
11773
11774 /* Otherwise this is rev */
9ee6e8bb
PB
11775 ARCH(6);
11776 rn = (insn >> 3) & 0x7;
11777 rd = insn & 0x7;
b0109805 11778 tmp = load_reg(s, rn);
19a6e31c 11779 switch (op1) {
66896cb8 11780 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11781 case 1: gen_rev16(tmp); break;
11782 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11783 default:
11784 g_assert_not_reached();
9ee6e8bb 11785 }
b0109805 11786 store_reg(s, rd, tmp);
9ee6e8bb 11787 break;
19a6e31c 11788 }
9ee6e8bb 11789
d9e028c1
PM
11790 case 6:
11791 switch ((insn >> 5) & 7) {
11792 case 2:
11793 /* setend */
11794 ARCH(6);
9886ecdf
PB
11795 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11796 gen_helper_setend(cpu_env);
dcba3a8d 11797 s->base.is_jmp = DISAS_UPDATE;
d9e028c1 11798 }
9ee6e8bb 11799 break;
d9e028c1
PM
11800 case 3:
11801 /* cps */
11802 ARCH(6);
11803 if (IS_USER(s)) {
11804 break;
8984bd2e 11805 }
b53d8923 11806 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11807 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11808 /* FAULTMASK */
11809 if (insn & 1) {
11810 addr = tcg_const_i32(19);
11811 gen_helper_v7m_msr(cpu_env, addr, tmp);
11812 tcg_temp_free_i32(addr);
11813 }
11814 /* PRIMASK */
11815 if (insn & 2) {
11816 addr = tcg_const_i32(16);
11817 gen_helper_v7m_msr(cpu_env, addr, tmp);
11818 tcg_temp_free_i32(addr);
11819 }
11820 tcg_temp_free_i32(tmp);
11821 gen_lookup_tb(s);
11822 } else {
11823 if (insn & (1 << 4)) {
11824 shift = CPSR_A | CPSR_I | CPSR_F;
11825 } else {
11826 shift = 0;
11827 }
11828 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11829 }
d9e028c1
PM
11830 break;
11831 default:
11832 goto undef;
9ee6e8bb
PB
11833 }
11834 break;
11835
99c475ab
FB
11836 default:
11837 goto undef;
11838 }
11839 break;
11840
11841 case 12:
a7d3970d 11842 {
99c475ab 11843 /* load/store multiple */
f764718d 11844 TCGv_i32 loaded_var = NULL;
99c475ab 11845 rn = (insn >> 8) & 0x7;
b0109805 11846 addr = load_reg(s, rn);
99c475ab
FB
11847 for (i = 0; i < 8; i++) {
11848 if (insn & (1 << i)) {
99c475ab
FB
11849 if (insn & (1 << 11)) {
11850 /* load */
c40c8556 11851 tmp = tcg_temp_new_i32();
12dcc321 11852 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11853 if (i == rn) {
11854 loaded_var = tmp;
11855 } else {
11856 store_reg(s, i, tmp);
11857 }
99c475ab
FB
11858 } else {
11859 /* store */
b0109805 11860 tmp = load_reg(s, i);
12dcc321 11861 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11862 tcg_temp_free_i32(tmp);
99c475ab 11863 }
5899f386 11864 /* advance to the next address */
b0109805 11865 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11866 }
11867 }
b0109805 11868 if ((insn & (1 << rn)) == 0) {
a7d3970d 11869 /* base reg not in list: base register writeback */
b0109805
PB
11870 store_reg(s, rn, addr);
11871 } else {
a7d3970d
PM
11872 /* base reg in list: if load, complete it now */
11873 if (insn & (1 << 11)) {
11874 store_reg(s, rn, loaded_var);
11875 }
7d1b0095 11876 tcg_temp_free_i32(addr);
b0109805 11877 }
99c475ab 11878 break;
a7d3970d 11879 }
99c475ab
FB
11880 case 13:
11881 /* conditional branch or swi */
11882 cond = (insn >> 8) & 0xf;
11883 if (cond == 0xe)
11884 goto undef;
11885
11886 if (cond == 0xf) {
11887 /* swi */
eaed129d 11888 gen_set_pc_im(s, s->pc);
d4a2dc67 11889 s->svc_imm = extract32(insn, 0, 8);
dcba3a8d 11890 s->base.is_jmp = DISAS_SWI;
99c475ab
FB
11891 break;
11892 }
11893 /* generate a conditional jump to next instruction */
e50e6a20 11894 s->condlabel = gen_new_label();
39fb730a 11895 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11896 s->condjmp = 1;
99c475ab
FB
11897
11898 /* jump to the offset */
5899f386 11899 val = (uint32_t)s->pc + 2;
99c475ab 11900 offset = ((int32_t)insn << 24) >> 24;
5899f386 11901 val += offset << 1;
8aaca4c0 11902 gen_jmp(s, val);
99c475ab
FB
11903 break;
11904
11905 case 14:
358bf29e 11906 if (insn & (1 << 11)) {
296e5a0a
PM
11907 /* thumb_insn_is_16bit() ensures we can't get here for
11908 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
11909 * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
11910 */
11911 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11912 ARCH(5);
11913 offset = ((insn & 0x7ff) << 1);
11914 tmp = load_reg(s, 14);
11915 tcg_gen_addi_i32(tmp, tmp, offset);
11916 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
11917
11918 tmp2 = tcg_temp_new_i32();
11919 tcg_gen_movi_i32(tmp2, s->pc | 1);
11920 store_reg(s, 14, tmp2);
11921 gen_bx(s, tmp);
358bf29e
PB
11922 break;
11923 }
9ee6e8bb 11924 /* unconditional branch */
99c475ab
FB
11925 val = (uint32_t)s->pc;
11926 offset = ((int32_t)insn << 21) >> 21;
11927 val += (offset << 1) + 2;
8aaca4c0 11928 gen_jmp(s, val);
99c475ab
FB
11929 break;
11930
11931 case 15:
296e5a0a
PM
11932 /* thumb_insn_is_16bit() ensures we can't get here for
11933 * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
11934 */
11935 assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
11936
11937 if (insn & (1 << 11)) {
11938 /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
11939 offset = ((insn & 0x7ff) << 1) | 1;
11940 tmp = load_reg(s, 14);
11941 tcg_gen_addi_i32(tmp, tmp, offset);
11942
11943 tmp2 = tcg_temp_new_i32();
11944 tcg_gen_movi_i32(tmp2, s->pc | 1);
11945 store_reg(s, 14, tmp2);
11946 gen_bx(s, tmp);
11947 } else {
11948 /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
11949 uint32_t uoffset = ((int32_t)insn << 21) >> 9;
11950
11951 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
11952 }
9ee6e8bb 11953 break;
99c475ab
FB
11954 }
11955 return;
9ee6e8bb 11956illegal_op:
99c475ab 11957undef:
73710361
GB
11958 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11959 default_exception_el(s));
99c475ab
FB
11960}
11961
541ebcd4
PM
11962static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11963{
11964 /* Return true if the insn at dc->pc might cross a page boundary.
11965 * (False positives are OK, false negatives are not.)
5b8d7289
PM
11966 * We know this is a Thumb insn, and our caller ensures we are
11967 * only called if dc->pc is less than 4 bytes from the page
11968 * boundary, so we cross the page if the first 16 bits indicate
11969 * that this is a 32 bit insn.
541ebcd4 11970 */
5b8d7289 11971 uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4 11972
5b8d7289 11973 return !thumb_insn_is_16bit(s, insn);
541ebcd4
PM
11974}
11975
1d8a5535
LV
11976static int arm_tr_init_disas_context(DisasContextBase *dcbase,
11977 CPUState *cs, int max_insns)
2c0262af 11978{
1d8a5535 11979 DisasContext *dc = container_of(dcbase, DisasContext, base);
9c489ea6 11980 CPUARMState *env = cs->env_ptr;
4e5e1215 11981 ARMCPU *cpu = arm_env_get_cpu(env);
3b46e624 11982
dcba3a8d 11983 dc->pc = dc->base.pc_first;
e50e6a20 11984 dc->condjmp = 0;
3926cc84 11985
40f860cd 11986 dc->aarch64 = 0;
cef9ee70
SS
11987 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11988 * there is no secure EL1, so we route exceptions to EL3.
11989 */
11990 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11991 !arm_el_is_aa64(env, 3);
1d8a5535
LV
11992 dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
11993 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
11994 dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
11995 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
11996 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
11997 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
c1e37810 11998 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11999#if !defined(CONFIG_USER_ONLY)
c1e37810 12000 dc->user = (dc->current_el == 0);
3926cc84 12001#endif
1d8a5535
LV
12002 dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
12003 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
12004 dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
12005 dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
12006 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
12007 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
12008 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
fb602cb7
PM
12009 dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12010 regime_is_secure(env, dc->mmu_idx);
60322b39 12011 dc->cp_regs = cpu->cp_regs;
a984e42c 12012 dc->features = env->features;
40f860cd 12013
50225ad0
PM
12014 /* Single step state. The code-generation logic here is:
12015 * SS_ACTIVE == 0:
12016 * generate code with no special handling for single-stepping (except
12017 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
12018 * this happens anyway because those changes are all system register or
12019 * PSTATE writes).
12020 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
12021 * emit code for one insn
12022 * emit code to clear PSTATE.SS
12023 * emit code to generate software step exception for completed step
12024 * end TB (as usual for having generated an exception)
12025 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
12026 * emit code to generate a software step exception
12027 * end the TB
12028 */
1d8a5535
LV
12029 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
12030 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
50225ad0
PM
12031 dc->is_ldex = false;
12032 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
12033
13189a90
LV
12034 dc->next_page_start =
12035 (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1d8a5535 12036
f7708456
RH
12037 /* If architectural single step active, limit to 1. */
12038 if (is_singlestepping(dc)) {
12039 max_insns = 1;
12040 }
12041
d0264d86
RH
12042 /* ARM is a fixed-length ISA. Bound the number of insns to execute
12043 to those left on the page. */
12044 if (!dc->thumb) {
12045 int bound = (dc->next_page_start - dc->base.pc_first) / 4;
12046 max_insns = MIN(max_insns, bound);
12047 }
12048
a7812ae4
PB
12049 cpu_F0s = tcg_temp_new_i32();
12050 cpu_F1s = tcg_temp_new_i32();
12051 cpu_F0d = tcg_temp_new_i64();
12052 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
12053 cpu_V0 = cpu_F0d;
12054 cpu_V1 = cpu_F1d;
e677137d 12055 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 12056 cpu_M0 = tcg_temp_new_i64();
1d8a5535
LV
12057
12058 return max_insns;
12059}
12060
b1476854
LV
12061static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
12062{
12063 DisasContext *dc = container_of(dcbase, DisasContext, base);
12064
12065 /* A note on handling of the condexec (IT) bits:
12066 *
12067 * We want to avoid the overhead of having to write the updated condexec
12068 * bits back to the CPUARMState for every instruction in an IT block. So:
12069 * (1) if the condexec bits are not already zero then we write
12070 * zero back into the CPUARMState now. This avoids complications trying
12071 * to do it at the end of the block. (For example if we don't do this
12072 * it's hard to identify whether we can safely skip writing condexec
12073 * at the end of the TB, which we definitely want to do for the case
12074 * where a TB doesn't do anything with the IT state at all.)
12075 * (2) if we are going to leave the TB then we call gen_set_condexec()
12076 * which will write the correct value into CPUARMState if zero is wrong.
12077 * This is done both for leaving the TB at the end, and for leaving
12078 * it because of an exception we know will happen, which is done in
12079 * gen_exception_insn(). The latter is necessary because we need to
12080 * leave the TB with the PC/IT state just prior to execution of the
12081 * instruction which caused the exception.
12082 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
12083 * then the CPUARMState will be wrong and we need to reset it.
12084 * This is handled in the same way as restoration of the
12085 * PC in these situations; we save the value of the condexec bits
12086 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
12087 * then uses this to restore them after an exception.
12088 *
12089 * Note that there are no instructions which can read the condexec
12090 * bits, and none which can write non-static values to them, so
12091 * we don't need to care about whether CPUARMState is correct in the
12092 * middle of a TB.
12093 */
12094
12095 /* Reset the conditional execution bits immediately. This avoids
12096 complications trying to do it at the end of the block. */
12097 if (dc->condexec_mask || dc->condexec_cond) {
12098 TCGv_i32 tmp = tcg_temp_new_i32();
12099 tcg_gen_movi_i32(tmp, 0);
12100 store_cpu_field(tmp, condexec_bits);
12101 }
23169224 12102 tcg_clear_temp_count();
b1476854
LV
12103}
12104
f62bd897
LV
12105static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
12106{
12107 DisasContext *dc = container_of(dcbase, DisasContext, base);
12108
f62bd897
LV
12109 tcg_gen_insn_start(dc->pc,
12110 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
12111 0);
15fa08f8 12112 dc->insn_start = tcg_last_op();
f62bd897
LV
12113}
12114
a68956ad
LV
12115static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
12116 const CPUBreakpoint *bp)
12117{
12118 DisasContext *dc = container_of(dcbase, DisasContext, base);
12119
12120 if (bp->flags & BP_CPU) {
12121 gen_set_condexec(dc);
12122 gen_set_pc_im(dc, dc->pc);
12123 gen_helper_check_breakpoints(cpu_env);
12124 /* End the TB early; it's likely not going to be executed */
12125 dc->base.is_jmp = DISAS_TOO_MANY;
12126 } else {
12127 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
12128 /* The address covered by the breakpoint must be
12129 included in [tb->pc, tb->pc + tb->size) in order
12130 to for it to be properly cleared -- thus we
12131 increment the PC here so that the logic setting
12132 tb->size below does the right thing. */
12133 /* TODO: Advance PC by correct instruction length to
12134 * avoid disassembler error messages */
12135 dc->pc += 2;
12136 dc->base.is_jmp = DISAS_NORETURN;
12137 }
12138
12139 return true;
12140}
12141
722ef0a5 12142static bool arm_pre_translate_insn(DisasContext *dc)
13189a90 12143{
13189a90
LV
12144#ifdef CONFIG_USER_ONLY
12145 /* Intercept jump to the magic kernel page. */
12146 if (dc->pc >= 0xffff0000) {
12147 /* We always get here via a jump, so know we are not in a
12148 conditional execution block. */
12149 gen_exception_internal(EXCP_KERNEL_TRAP);
12150 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12151 return true;
13189a90
LV
12152 }
12153#endif
12154
12155 if (dc->ss_active && !dc->pstate_ss) {
12156 /* Singlestep state is Active-pending.
12157 * If we're in this state at the start of a TB then either
12158 * a) we just took an exception to an EL which is being debugged
12159 * and this is the first insn in the exception handler
12160 * b) debug exceptions were masked and we just unmasked them
12161 * without changing EL (eg by clearing PSTATE.D)
12162 * In either case we're going to take a swstep exception in the
12163 * "did not step an insn" case, and so the syndrome ISV and EX
12164 * bits should be zero.
12165 */
12166 assert(dc->base.num_insns == 1);
12167 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
12168 default_exception_el(dc));
12169 dc->base.is_jmp = DISAS_NORETURN;
722ef0a5 12170 return true;
13189a90
LV
12171 }
12172
722ef0a5
RH
12173 return false;
12174}
13189a90 12175
d0264d86 12176static void arm_post_translate_insn(DisasContext *dc)
722ef0a5 12177{
13189a90
LV
12178 if (dc->condjmp && !dc->base.is_jmp) {
12179 gen_set_label(dc->condlabel);
12180 dc->condjmp = 0;
12181 }
13189a90 12182 dc->base.pc_next = dc->pc;
23169224 12183 translator_loop_temp_check(&dc->base);
13189a90
LV
12184}
12185
722ef0a5
RH
12186static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12187{
12188 DisasContext *dc = container_of(dcbase, DisasContext, base);
12189 CPUARMState *env = cpu->env_ptr;
12190 unsigned int insn;
12191
12192 if (arm_pre_translate_insn(dc)) {
12193 return;
12194 }
12195
12196 insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
58803318 12197 dc->insn = insn;
722ef0a5
RH
12198 dc->pc += 4;
12199 disas_arm_insn(dc, insn);
12200
d0264d86
RH
12201 arm_post_translate_insn(dc);
12202
12203 /* ARM is a fixed-length ISA. We performed the cross-page check
12204 in init_disas_context by adjusting max_insns. */
722ef0a5
RH
12205}
12206
dcf14dfb
PM
12207static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
12208{
12209 /* Return true if this Thumb insn is always unconditional,
12210 * even inside an IT block. This is true of only a very few
12211 * instructions: BKPT, HLT, and SG.
12212 *
12213 * A larger class of instructions are UNPREDICTABLE if used
12214 * inside an IT block; we do not need to detect those here, because
12215 * what we do by default (perform the cc check and update the IT
12216 * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
12217 * choice for those situations.
12218 *
12219 * insn is either a 16-bit or a 32-bit instruction; the two are
12220 * distinguishable because for the 16-bit case the top 16 bits
12221 * are zeroes, and that isn't a valid 32-bit encoding.
12222 */
12223 if ((insn & 0xffffff00) == 0xbe00) {
12224 /* BKPT */
12225 return true;
12226 }
12227
12228 if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
12229 !arm_dc_feature(s, ARM_FEATURE_M)) {
12230 /* HLT: v8A only. This is unconditional even when it is going to
12231 * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
12232 * For v7 cores this was a plain old undefined encoding and so
12233 * honours its cc check. (We might be using the encoding as
12234 * a semihosting trap, but we don't change the cc check behaviour
12235 * on that account, because a debugger connected to a real v7A
12236 * core and emulating semihosting traps by catching the UNDEF
12237 * exception would also only see cases where the cc check passed.
12238 * No guest code should be trying to do a HLT semihosting trap
12239 * in an IT block anyway.
12240 */
12241 return true;
12242 }
12243
12244 if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
12245 arm_dc_feature(s, ARM_FEATURE_M)) {
12246 /* SG: v8M only */
12247 return true;
12248 }
12249
12250 return false;
12251}
12252
722ef0a5
RH
12253static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
12254{
12255 DisasContext *dc = container_of(dcbase, DisasContext, base);
12256 CPUARMState *env = cpu->env_ptr;
296e5a0a
PM
12257 uint32_t insn;
12258 bool is_16bit;
722ef0a5
RH
12259
12260 if (arm_pre_translate_insn(dc)) {
12261 return;
12262 }
12263
296e5a0a
PM
12264 insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12265 is_16bit = thumb_insn_is_16bit(dc, insn);
12266 dc->pc += 2;
12267 if (!is_16bit) {
12268 uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
12269
12270 insn = insn << 16 | insn2;
12271 dc->pc += 2;
12272 }
58803318 12273 dc->insn = insn;
296e5a0a 12274
dcf14dfb 12275 if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
296e5a0a
PM
12276 uint32_t cond = dc->condexec_cond;
12277
12278 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
12279 dc->condlabel = gen_new_label();
12280 arm_gen_test_cc(cond ^ 1, dc->condlabel);
12281 dc->condjmp = 1;
12282 }
12283 }
12284
12285 if (is_16bit) {
12286 disas_thumb_insn(dc, insn);
12287 } else {
2eea841c 12288 disas_thumb2_insn(dc, insn);
296e5a0a 12289 }
722ef0a5
RH
12290
12291 /* Advance the Thumb condexec condition. */
12292 if (dc->condexec_mask) {
12293 dc->condexec_cond = ((dc->condexec_cond & 0xe) |
12294 ((dc->condexec_mask >> 4) & 1));
12295 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
12296 if (dc->condexec_mask == 0) {
12297 dc->condexec_cond = 0;
12298 }
12299 }
12300
d0264d86
RH
12301 arm_post_translate_insn(dc);
12302
12303 /* Thumb is a variable-length ISA. Stop translation when the next insn
12304 * will touch a new page. This ensures that prefetch aborts occur at
12305 * the right place.
12306 *
12307 * We want to stop the TB if the next insn starts in a new page,
12308 * or if it spans between this page and the next. This means that
12309 * if we're looking at the last halfword in the page we need to
12310 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12311 * or a 32-bit Thumb insn (which won't).
12312 * This is to avoid generating a silly TB with a single 16-bit insn
12313 * in it at the end of this page (which would execute correctly
12314 * but isn't very efficient).
12315 */
12316 if (dc->base.is_jmp == DISAS_NEXT
12317 && (dc->pc >= dc->next_page_start
12318 || (dc->pc >= dc->next_page_start - 3
12319 && insn_crosses_page(env, dc)))) {
12320 dc->base.is_jmp = DISAS_TOO_MANY;
12321 }
722ef0a5
RH
12322}
12323
70d3c035 12324static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
1d8a5535 12325{
70d3c035 12326 DisasContext *dc = container_of(dcbase, DisasContext, base);
2e70f6ef 12327
c5a49c63 12328 if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
70d3c035
LV
12329 /* FIXME: This can theoretically happen with self-modifying code. */
12330 cpu_abort(cpu, "IO on conditional branch instruction");
2e70f6ef 12331 }
9ee6e8bb 12332
b5ff1b31 12333 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12334 instruction was a conditional branch or trap, and the PC has
12335 already been written. */
f021b2c4 12336 gen_set_condexec(dc);
dcba3a8d 12337 if (dc->base.is_jmp == DISAS_BX_EXCRET) {
3bb8a96f
PM
12338 /* Exception return branches need some special case code at the
12339 * end of the TB, which is complex enough that it has to
12340 * handle the single-step vs not and the condition-failed
12341 * insn codepath itself.
12342 */
12343 gen_bx_excret_final_code(dc);
12344 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12345 /* Unconditional and "condition passed" instruction codepath. */
dcba3a8d 12346 switch (dc->base.is_jmp) {
7999a5c8 12347 case DISAS_SWI:
50225ad0 12348 gen_ss_advance(dc);
73710361
GB
12349 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12350 default_exception_el(dc));
7999a5c8
SF
12351 break;
12352 case DISAS_HVC:
37e6456e 12353 gen_ss_advance(dc);
73710361 12354 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12355 break;
12356 case DISAS_SMC:
37e6456e 12357 gen_ss_advance(dc);
73710361 12358 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12359 break;
12360 case DISAS_NEXT:
a68956ad 12361 case DISAS_TOO_MANY:
7999a5c8
SF
12362 case DISAS_UPDATE:
12363 gen_set_pc_im(dc, dc->pc);
12364 /* fall through */
12365 default:
5425415e
PM
12366 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12367 gen_singlestep_exception(dc);
a0c231e6
RH
12368 break;
12369 case DISAS_NORETURN:
12370 break;
7999a5c8 12371 }
8aaca4c0 12372 } else {
9ee6e8bb
PB
12373 /* While branches must always occur at the end of an IT block,
12374 there are a few other things that can cause us to terminate
65626741 12375 the TB in the middle of an IT block:
9ee6e8bb
PB
12376 - Exception generating instructions (bkpt, swi, undefined).
12377 - Page boundaries.
12378 - Hardware watchpoints.
12379 Hardware breakpoints have already been handled and skip this code.
12380 */
dcba3a8d 12381 switch(dc->base.is_jmp) {
8aaca4c0 12382 case DISAS_NEXT:
a68956ad 12383 case DISAS_TOO_MANY:
6e256c93 12384 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12385 break;
577bf808 12386 case DISAS_JUMP:
8a6b28c7
EC
12387 gen_goto_ptr();
12388 break;
e8d52302
AB
12389 case DISAS_UPDATE:
12390 gen_set_pc_im(dc, dc->pc);
12391 /* fall through */
577bf808 12392 default:
8aaca4c0 12393 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12394 tcg_gen_exit_tb(0);
8aaca4c0 12395 break;
a0c231e6 12396 case DISAS_NORETURN:
8aaca4c0
FB
12397 /* nothing more to generate */
12398 break;
9ee6e8bb 12399 case DISAS_WFI:
58803318
SS
12400 {
12401 TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
12402 !(dc->insn & (1U << 31))) ? 2 : 4);
12403
12404 gen_helper_wfi(cpu_env, tmp);
12405 tcg_temp_free_i32(tmp);
84549b6d
PM
12406 /* The helper doesn't necessarily throw an exception, but we
12407 * must go back to the main loop to check for interrupts anyway.
12408 */
12409 tcg_gen_exit_tb(0);
9ee6e8bb 12410 break;
58803318 12411 }
72c1d3af
PM
12412 case DISAS_WFE:
12413 gen_helper_wfe(cpu_env);
12414 break;
c87e5a61
PM
12415 case DISAS_YIELD:
12416 gen_helper_yield(cpu_env);
12417 break;
9ee6e8bb 12418 case DISAS_SWI:
73710361
GB
12419 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12420 default_exception_el(dc));
9ee6e8bb 12421 break;
37e6456e 12422 case DISAS_HVC:
73710361 12423 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12424 break;
12425 case DISAS_SMC:
73710361 12426 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12427 break;
8aaca4c0 12428 }
f021b2c4
PM
12429 }
12430
12431 if (dc->condjmp) {
12432 /* "Condition failed" instruction codepath for the branch/trap insn */
12433 gen_set_label(dc->condlabel);
12434 gen_set_condexec(dc);
b636649f 12435 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12436 gen_set_pc_im(dc, dc->pc);
12437 gen_singlestep_exception(dc);
12438 } else {
6e256c93 12439 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12440 }
2c0262af 12441 }
23169224
LV
12442
12443 /* Functions above can change dc->pc, so re-align db->pc_next */
12444 dc->base.pc_next = dc->pc;
70d3c035
LV
12445}
12446
4013f7fc
LV
12447static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
12448{
12449 DisasContext *dc = container_of(dcbase, DisasContext, base);
12450
12451 qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
1d48474d 12452 log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
4013f7fc
LV
12453}
12454
23169224
LV
12455static const TranslatorOps arm_translator_ops = {
12456 .init_disas_context = arm_tr_init_disas_context,
12457 .tb_start = arm_tr_tb_start,
12458 .insn_start = arm_tr_insn_start,
12459 .breakpoint_check = arm_tr_breakpoint_check,
12460 .translate_insn = arm_tr_translate_insn,
12461 .tb_stop = arm_tr_tb_stop,
12462 .disas_log = arm_tr_disas_log,
12463};
12464
722ef0a5
RH
12465static const TranslatorOps thumb_translator_ops = {
12466 .init_disas_context = arm_tr_init_disas_context,
12467 .tb_start = arm_tr_tb_start,
12468 .insn_start = arm_tr_insn_start,
12469 .breakpoint_check = arm_tr_breakpoint_check,
12470 .translate_insn = thumb_tr_translate_insn,
12471 .tb_stop = arm_tr_tb_stop,
12472 .disas_log = arm_tr_disas_log,
12473};
12474
70d3c035 12475/* generate intermediate code for basic block 'tb'. */
23169224 12476void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
70d3c035 12477{
23169224
LV
12478 DisasContext dc;
12479 const TranslatorOps *ops = &arm_translator_ops;
70d3c035 12480
722ef0a5
RH
12481 if (ARM_TBFLAG_THUMB(tb->flags)) {
12482 ops = &thumb_translator_ops;
12483 }
23169224 12484#ifdef TARGET_AARCH64
70d3c035 12485 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
23169224 12486 ops = &aarch64_translator_ops;
2c0262af
FB
12487 }
12488#endif
23169224
LV
12489
12490 translator_loop(ops, &dc.base, cpu, tb);
2c0262af
FB
12491}
12492
b5ff1b31 12493static const char *cpu_mode_names[16] = {
28c9457d
EI
12494 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12495 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12496};
9ee6e8bb 12497
878096ee
AF
12498void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12499 int flags)
2c0262af 12500{
878096ee
AF
12501 ARMCPU *cpu = ARM_CPU(cs);
12502 CPUARMState *env = &cpu->env;
2c0262af
FB
12503 int i;
12504
17731115
PM
12505 if (is_a64(env)) {
12506 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12507 return;
12508 }
12509
2c0262af 12510 for(i=0;i<16;i++) {
7fe48483 12511 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12512 if ((i % 4) == 3)
7fe48483 12513 cpu_fprintf(f, "\n");
2c0262af 12514 else
7fe48483 12515 cpu_fprintf(f, " ");
2c0262af 12516 }
06e5cf7a 12517
5b906f35
PM
12518 if (arm_feature(env, ARM_FEATURE_M)) {
12519 uint32_t xpsr = xpsr_read(env);
12520 const char *mode;
1e577cc7
PM
12521 const char *ns_status = "";
12522
12523 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
12524 ns_status = env->v7m.secure ? "S " : "NS ";
12525 }
5b906f35
PM
12526
12527 if (xpsr & XPSR_EXCP) {
12528 mode = "handler";
12529 } else {
8bfc26ea 12530 if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_NPRIV_MASK) {
5b906f35
PM
12531 mode = "unpriv-thread";
12532 } else {
12533 mode = "priv-thread";
12534 }
12535 }
12536
1e577cc7 12537 cpu_fprintf(f, "XPSR=%08x %c%c%c%c %c %s%s\n",
5b906f35
PM
12538 xpsr,
12539 xpsr & XPSR_N ? 'N' : '-',
12540 xpsr & XPSR_Z ? 'Z' : '-',
12541 xpsr & XPSR_C ? 'C' : '-',
12542 xpsr & XPSR_V ? 'V' : '-',
12543 xpsr & XPSR_T ? 'T' : 'A',
1e577cc7 12544 ns_status,
5b906f35 12545 mode);
06e5cf7a 12546 } else {
5b906f35
PM
12547 uint32_t psr = cpsr_read(env);
12548 const char *ns_status = "";
12549
12550 if (arm_feature(env, ARM_FEATURE_EL3) &&
12551 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12552 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12553 }
12554
12555 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
12556 psr,
12557 psr & CPSR_N ? 'N' : '-',
12558 psr & CPSR_Z ? 'Z' : '-',
12559 psr & CPSR_C ? 'C' : '-',
12560 psr & CPSR_V ? 'V' : '-',
12561 psr & CPSR_T ? 'T' : 'A',
12562 ns_status,
12563 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
12564 }
b7bcbe95 12565
f2617cfc
PM
12566 if (flags & CPU_DUMP_FPU) {
12567 int numvfpregs = 0;
12568 if (arm_feature(env, ARM_FEATURE_VFP)) {
12569 numvfpregs += 16;
12570 }
12571 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12572 numvfpregs += 16;
12573 }
12574 for (i = 0; i < numvfpregs; i++) {
12575 uint64_t v = float64_val(env->vfp.regs[i]);
12576 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12577 i * 2, (uint32_t)v,
12578 i * 2 + 1, (uint32_t)(v >> 32),
12579 i, v);
12580 }
12581 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12582 }
2c0262af 12583}
a6b025d3 12584
bad729e2
RH
12585void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12586 target_ulong *data)
d2856f1a 12587{
3926cc84 12588 if (is_a64(env)) {
bad729e2 12589 env->pc = data[0];
40f860cd 12590 env->condexec_bits = 0;
aaa1f954 12591 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12592 } else {
bad729e2
RH
12593 env->regs[15] = data[0];
12594 env->condexec_bits = data[1];
aaa1f954 12595 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12596 }
d2856f1a 12597}