]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170711' into...
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 44#define ENABLE_ARCH_5J 0
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
b26eefb6
PB
79/* initialize TCG globals. */
80void arm_translate_init(void)
81{
155c3eac
FN
82 int i;
83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 85 tcg_ctx.tcg_env = cpu_env;
a7812ae4 86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
8bd5c820 148static inline int get_a32_user_mem_index(DisasContext *s)
579d21cc 149{
8bd5c820 150 /* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
579d21cc
PM
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
8bd5c820 159 return arm_to_core_mmu_idx(ARMMMUIdx_S12NSE0);
579d21cc
PM
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
8bd5c820 163 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
e7b921c2
PM
164 case ARMMMUIdx_MUser:
165 case ARMMMUIdx_MPriv:
3bef7012 166 case ARMMMUIdx_MNegPri:
e7b921c2 167 return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
579d21cc
PM
168 case ARMMMUIdx_S2NS:
169 default:
170 g_assert_not_reached();
171 }
172}
173
39d5492a 174static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 175{
39d5492a 176 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
177 tcg_gen_ld_i32(tmp, cpu_env, offset);
178 return tmp;
179}
180
0ecb72a5 181#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 182
39d5492a 183static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
184{
185 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 186 tcg_temp_free_i32(var);
d9ba4830
PB
187}
188
189#define store_cpu_field(var, name) \
0ecb72a5 190 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 191
b26eefb6 192/* Set a variable to the value of a CPU register. */
39d5492a 193static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
194{
195 if (reg == 15) {
196 uint32_t addr;
b90372ad 197 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
198 if (s->thumb)
199 addr = (long)s->pc + 2;
200 else
201 addr = (long)s->pc + 4;
202 tcg_gen_movi_i32(var, addr);
203 } else {
155c3eac 204 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
205 }
206}
207
208/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 209static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 210{
39d5492a 211 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
212 load_reg_var(s, tmp, reg);
213 return tmp;
214}
215
216/* Set a CPU register. The source must be a temporary and will be
217 marked as dead. */
39d5492a 218static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
219{
220 if (reg == 15) {
9b6a3ea7
PM
221 /* In Thumb mode, we must ignore bit 0.
222 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
223 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
224 * We choose to ignore [1:0] in ARM mode for all architecture versions.
225 */
226 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
b26eefb6
PB
227 s->is_jmp = DISAS_JUMP;
228 }
155c3eac 229 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 230 tcg_temp_free_i32(var);
b26eefb6
PB
231}
232
b26eefb6 233/* Value extensions. */
86831435
PB
234#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
235#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
236#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
237#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
238
1497c961
PB
239#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
240#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 241
b26eefb6 242
39d5492a 243static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 244{
39d5492a 245 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 246 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
247 tcg_temp_free_i32(tmp_mask);
248}
d9ba4830
PB
249/* Set NZCV flags from the high 4 bits of var. */
250#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
251
d4a2dc67 252static void gen_exception_internal(int excp)
d9ba4830 253{
d4a2dc67
PM
254 TCGv_i32 tcg_excp = tcg_const_i32(excp);
255
256 assert(excp_is_internal(excp));
257 gen_helper_exception_internal(cpu_env, tcg_excp);
258 tcg_temp_free_i32(tcg_excp);
259}
260
73710361 261static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
262{
263 TCGv_i32 tcg_excp = tcg_const_i32(excp);
264 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 265 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 266
73710361
GB
267 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
268 tcg_syn, tcg_el);
269
270 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
271 tcg_temp_free_i32(tcg_syn);
272 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
273}
274
50225ad0
PM
275static void gen_ss_advance(DisasContext *s)
276{
277 /* If the singlestep state is Active-not-pending, advance to
278 * Active-pending.
279 */
280 if (s->ss_active) {
281 s->pstate_ss = 0;
282 gen_helper_clear_pstate_ss(cpu_env);
283 }
284}
285
286static void gen_step_complete_exception(DisasContext *s)
287{
288 /* We just completed step of an insn. Move from Active-not-pending
289 * to Active-pending, and then also take the swstep exception.
290 * This corresponds to making the (IMPDEF) choice to prioritize
291 * swstep exceptions over asynchronous exceptions taken to an exception
292 * level where debug is disabled. This choice has the advantage that
293 * we do not need to maintain internal state corresponding to the
294 * ISV/EX syndrome bits between completion of the step and generation
295 * of the exception, and our syndrome information is always correct.
296 */
297 gen_ss_advance(s);
73710361
GB
298 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
299 default_exception_el(s));
50225ad0
PM
300 s->is_jmp = DISAS_EXC;
301}
302
5425415e
PM
303static void gen_singlestep_exception(DisasContext *s)
304{
305 /* Generate the right kind of exception for singlestep, which is
306 * either the architectural singlestep or EXCP_DEBUG for QEMU's
307 * gdb singlestepping.
308 */
309 if (s->ss_active) {
310 gen_step_complete_exception(s);
311 } else {
312 gen_exception_internal(EXCP_DEBUG);
313 }
314}
315
b636649f
PM
316static inline bool is_singlestepping(DisasContext *s)
317{
318 /* Return true if we are singlestepping either because of
319 * architectural singlestep or QEMU gdbstub singlestep. This does
320 * not include the command line '-singlestep' mode which is rather
321 * misnamed as it only means "one instruction per TB" and doesn't
322 * affect the code we generate.
323 */
324 return s->singlestep_enabled || s->ss_active;
325}
326
39d5492a 327static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 328{
39d5492a
PM
329 TCGv_i32 tmp1 = tcg_temp_new_i32();
330 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
331 tcg_gen_ext16s_i32(tmp1, a);
332 tcg_gen_ext16s_i32(tmp2, b);
3670669c 333 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 334 tcg_temp_free_i32(tmp2);
3670669c
PB
335 tcg_gen_sari_i32(a, a, 16);
336 tcg_gen_sari_i32(b, b, 16);
337 tcg_gen_mul_i32(b, b, a);
338 tcg_gen_mov_i32(a, tmp1);
7d1b0095 339 tcg_temp_free_i32(tmp1);
3670669c
PB
340}
341
342/* Byteswap each halfword. */
39d5492a 343static void gen_rev16(TCGv_i32 var)
3670669c 344{
39d5492a 345 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
346 tcg_gen_shri_i32(tmp, var, 8);
347 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
348 tcg_gen_shli_i32(var, var, 8);
349 tcg_gen_andi_i32(var, var, 0xff00ff00);
350 tcg_gen_or_i32(var, var, tmp);
7d1b0095 351 tcg_temp_free_i32(tmp);
3670669c
PB
352}
353
354/* Byteswap low halfword and sign extend. */
39d5492a 355static void gen_revsh(TCGv_i32 var)
3670669c 356{
1a855029
AJ
357 tcg_gen_ext16u_i32(var, var);
358 tcg_gen_bswap16_i32(var, var);
359 tcg_gen_ext16s_i32(var, var);
3670669c
PB
360}
361
838fa72d 362/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 363static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 364{
838fa72d
AJ
365 TCGv_i64 tmp64 = tcg_temp_new_i64();
366
367 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 368 tcg_temp_free_i32(b);
838fa72d
AJ
369 tcg_gen_shli_i64(tmp64, tmp64, 32);
370 tcg_gen_add_i64(a, tmp64, a);
371
372 tcg_temp_free_i64(tmp64);
373 return a;
374}
375
376/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 377static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
378{
379 TCGv_i64 tmp64 = tcg_temp_new_i64();
380
381 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 382 tcg_temp_free_i32(b);
838fa72d
AJ
383 tcg_gen_shli_i64(tmp64, tmp64, 32);
384 tcg_gen_sub_i64(a, tmp64, a);
385
386 tcg_temp_free_i64(tmp64);
387 return a;
3670669c
PB
388}
389
5e3f878a 390/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 391static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 392{
39d5492a
PM
393 TCGv_i32 lo = tcg_temp_new_i32();
394 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 395 TCGv_i64 ret;
5e3f878a 396
831d7fe8 397 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 398 tcg_temp_free_i32(a);
7d1b0095 399 tcg_temp_free_i32(b);
831d7fe8
RH
400
401 ret = tcg_temp_new_i64();
402 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
403 tcg_temp_free_i32(lo);
404 tcg_temp_free_i32(hi);
831d7fe8
RH
405
406 return ret;
5e3f878a
PB
407}
408
39d5492a 409static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 410{
39d5492a
PM
411 TCGv_i32 lo = tcg_temp_new_i32();
412 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 413 TCGv_i64 ret;
5e3f878a 414
831d7fe8 415 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 416 tcg_temp_free_i32(a);
7d1b0095 417 tcg_temp_free_i32(b);
831d7fe8
RH
418
419 ret = tcg_temp_new_i64();
420 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
421 tcg_temp_free_i32(lo);
422 tcg_temp_free_i32(hi);
831d7fe8
RH
423
424 return ret;
5e3f878a
PB
425}
426
8f01245e 427/* Swap low and high halfwords. */
39d5492a 428static void gen_swap_half(TCGv_i32 var)
8f01245e 429{
39d5492a 430 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
431 tcg_gen_shri_i32(tmp, var, 16);
432 tcg_gen_shli_i32(var, var, 16);
433 tcg_gen_or_i32(var, var, tmp);
7d1b0095 434 tcg_temp_free_i32(tmp);
8f01245e
PB
435}
436
b26eefb6
PB
437/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
438 tmp = (t0 ^ t1) & 0x8000;
439 t0 &= ~0x8000;
440 t1 &= ~0x8000;
441 t0 = (t0 + t1) ^ tmp;
442 */
443
39d5492a 444static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 445{
39d5492a 446 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
447 tcg_gen_xor_i32(tmp, t0, t1);
448 tcg_gen_andi_i32(tmp, tmp, 0x8000);
449 tcg_gen_andi_i32(t0, t0, ~0x8000);
450 tcg_gen_andi_i32(t1, t1, ~0x8000);
451 tcg_gen_add_i32(t0, t0, t1);
452 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
453 tcg_temp_free_i32(tmp);
454 tcg_temp_free_i32(t1);
b26eefb6
PB
455}
456
457/* Set CF to the top bit of var. */
39d5492a 458static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 459{
66c374de 460 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
461}
462
463/* Set N and Z flags from var. */
39d5492a 464static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 465{
66c374de
AJ
466 tcg_gen_mov_i32(cpu_NF, var);
467 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
468}
469
470/* T0 += T1 + CF. */
39d5492a 471static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 472{
396e467c 473 tcg_gen_add_i32(t0, t0, t1);
66c374de 474 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
475}
476
e9bb4aa9 477/* dest = T0 + T1 + CF. */
39d5492a 478static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 479{
e9bb4aa9 480 tcg_gen_add_i32(dest, t0, t1);
66c374de 481 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
482}
483
3670669c 484/* dest = T0 - T1 + CF - 1. */
39d5492a 485static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 486{
3670669c 487 tcg_gen_sub_i32(dest, t0, t1);
66c374de 488 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 489 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
490}
491
72485ec4 492/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 493static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 494{
39d5492a 495 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
496 tcg_gen_movi_i32(tmp, 0);
497 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 498 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 499 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
500 tcg_gen_xor_i32(tmp, t0, t1);
501 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
502 tcg_temp_free_i32(tmp);
503 tcg_gen_mov_i32(dest, cpu_NF);
504}
505
49b4c31e 506/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 507static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 508{
39d5492a 509 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
510 if (TCG_TARGET_HAS_add2_i32) {
511 tcg_gen_movi_i32(tmp, 0);
512 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 513 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
514 } else {
515 TCGv_i64 q0 = tcg_temp_new_i64();
516 TCGv_i64 q1 = tcg_temp_new_i64();
517 tcg_gen_extu_i32_i64(q0, t0);
518 tcg_gen_extu_i32_i64(q1, t1);
519 tcg_gen_add_i64(q0, q0, q1);
520 tcg_gen_extu_i32_i64(q1, cpu_CF);
521 tcg_gen_add_i64(q0, q0, q1);
522 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
523 tcg_temp_free_i64(q0);
524 tcg_temp_free_i64(q1);
525 }
526 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
527 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
528 tcg_gen_xor_i32(tmp, t0, t1);
529 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
530 tcg_temp_free_i32(tmp);
531 tcg_gen_mov_i32(dest, cpu_NF);
532}
533
72485ec4 534/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 535static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 536{
39d5492a 537 TCGv_i32 tmp;
72485ec4
AJ
538 tcg_gen_sub_i32(cpu_NF, t0, t1);
539 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
540 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
541 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
542 tmp = tcg_temp_new_i32();
543 tcg_gen_xor_i32(tmp, t0, t1);
544 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
545 tcg_temp_free_i32(tmp);
546 tcg_gen_mov_i32(dest, cpu_NF);
547}
548
e77f0832 549/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 550static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 551{
39d5492a 552 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
553 tcg_gen_not_i32(tmp, t1);
554 gen_adc_CC(dest, t0, tmp);
39d5492a 555 tcg_temp_free_i32(tmp);
2de68a49
RH
556}
557
365af80e 558#define GEN_SHIFT(name) \
39d5492a 559static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 560{ \
39d5492a 561 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
562 tmp1 = tcg_temp_new_i32(); \
563 tcg_gen_andi_i32(tmp1, t1, 0xff); \
564 tmp2 = tcg_const_i32(0); \
565 tmp3 = tcg_const_i32(0x1f); \
566 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
567 tcg_temp_free_i32(tmp3); \
568 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
569 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
570 tcg_temp_free_i32(tmp2); \
571 tcg_temp_free_i32(tmp1); \
572}
573GEN_SHIFT(shl)
574GEN_SHIFT(shr)
575#undef GEN_SHIFT
576
39d5492a 577static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 578{
39d5492a 579 TCGv_i32 tmp1, tmp2;
365af80e
AJ
580 tmp1 = tcg_temp_new_i32();
581 tcg_gen_andi_i32(tmp1, t1, 0xff);
582 tmp2 = tcg_const_i32(0x1f);
583 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
584 tcg_temp_free_i32(tmp2);
585 tcg_gen_sar_i32(dest, t0, tmp1);
586 tcg_temp_free_i32(tmp1);
587}
588
39d5492a 589static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 590{
39d5492a
PM
591 TCGv_i32 c0 = tcg_const_i32(0);
592 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
593 tcg_gen_neg_i32(tmp, src);
594 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
595 tcg_temp_free_i32(c0);
596 tcg_temp_free_i32(tmp);
597}
ad69471c 598
39d5492a 599static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 600{
9a119ff6 601 if (shift == 0) {
66c374de 602 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 603 } else {
66c374de
AJ
604 tcg_gen_shri_i32(cpu_CF, var, shift);
605 if (shift != 31) {
606 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
607 }
9a119ff6 608 }
9a119ff6 609}
b26eefb6 610
9a119ff6 611/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
612static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
613 int shift, int flags)
9a119ff6
PB
614{
615 switch (shiftop) {
616 case 0: /* LSL */
617 if (shift != 0) {
618 if (flags)
619 shifter_out_im(var, 32 - shift);
620 tcg_gen_shli_i32(var, var, shift);
621 }
622 break;
623 case 1: /* LSR */
624 if (shift == 0) {
625 if (flags) {
66c374de 626 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
627 }
628 tcg_gen_movi_i32(var, 0);
629 } else {
630 if (flags)
631 shifter_out_im(var, shift - 1);
632 tcg_gen_shri_i32(var, var, shift);
633 }
634 break;
635 case 2: /* ASR */
636 if (shift == 0)
637 shift = 32;
638 if (flags)
639 shifter_out_im(var, shift - 1);
640 if (shift == 32)
641 shift = 31;
642 tcg_gen_sari_i32(var, var, shift);
643 break;
644 case 3: /* ROR/RRX */
645 if (shift != 0) {
646 if (flags)
647 shifter_out_im(var, shift - 1);
f669df27 648 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 649 } else {
39d5492a 650 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 651 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
652 if (flags)
653 shifter_out_im(var, 0);
654 tcg_gen_shri_i32(var, var, 1);
b26eefb6 655 tcg_gen_or_i32(var, var, tmp);
7d1b0095 656 tcg_temp_free_i32(tmp);
b26eefb6
PB
657 }
658 }
659};
660
39d5492a
PM
661static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
662 TCGv_i32 shift, int flags)
8984bd2e
PB
663{
664 if (flags) {
665 switch (shiftop) {
9ef39277
BS
666 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
667 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
668 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
669 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
670 }
671 } else {
672 switch (shiftop) {
365af80e
AJ
673 case 0:
674 gen_shl(var, var, shift);
675 break;
676 case 1:
677 gen_shr(var, var, shift);
678 break;
679 case 2:
680 gen_sar(var, var, shift);
681 break;
f669df27
AJ
682 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
683 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
684 }
685 }
7d1b0095 686 tcg_temp_free_i32(shift);
8984bd2e
PB
687}
688
6ddbc6e4
PB
689#define PAS_OP(pfx) \
690 switch (op2) { \
691 case 0: gen_pas_helper(glue(pfx,add16)); break; \
692 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
693 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
694 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
695 case 4: gen_pas_helper(glue(pfx,add8)); break; \
696 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
697 }
39d5492a 698static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 699{
a7812ae4 700 TCGv_ptr tmp;
6ddbc6e4
PB
701
702 switch (op1) {
703#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
704 case 1:
a7812ae4 705 tmp = tcg_temp_new_ptr();
0ecb72a5 706 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 707 PAS_OP(s)
b75263d6 708 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
709 break;
710 case 5:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(u)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716#undef gen_pas_helper
717#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
718 case 2:
719 PAS_OP(q);
720 break;
721 case 3:
722 PAS_OP(sh);
723 break;
724 case 6:
725 PAS_OP(uq);
726 break;
727 case 7:
728 PAS_OP(uh);
729 break;
730#undef gen_pas_helper
731 }
732}
9ee6e8bb
PB
733#undef PAS_OP
734
6ddbc6e4
PB
735/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
736#define PAS_OP(pfx) \
ed89a2f1 737 switch (op1) { \
6ddbc6e4
PB
738 case 0: gen_pas_helper(glue(pfx,add8)); break; \
739 case 1: gen_pas_helper(glue(pfx,add16)); break; \
740 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
741 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
742 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
743 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
744 }
39d5492a 745static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 746{
a7812ae4 747 TCGv_ptr tmp;
6ddbc6e4 748
ed89a2f1 749 switch (op2) {
6ddbc6e4
PB
750#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
751 case 0:
a7812ae4 752 tmp = tcg_temp_new_ptr();
0ecb72a5 753 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 754 PAS_OP(s)
b75263d6 755 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
756 break;
757 case 4:
a7812ae4 758 tmp = tcg_temp_new_ptr();
0ecb72a5 759 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 760 PAS_OP(u)
b75263d6 761 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
762 break;
763#undef gen_pas_helper
764#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
765 case 1:
766 PAS_OP(q);
767 break;
768 case 2:
769 PAS_OP(sh);
770 break;
771 case 5:
772 PAS_OP(uq);
773 break;
774 case 6:
775 PAS_OP(uh);
776 break;
777#undef gen_pas_helper
778 }
779}
9ee6e8bb
PB
780#undef PAS_OP
781
39fb730a 782/*
6c2c63d3 783 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
784 * This is common between ARM and Aarch64 targets.
785 */
6c2c63d3 786void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 787{
6c2c63d3
RH
788 TCGv_i32 value;
789 TCGCond cond;
790 bool global = true;
d9ba4830 791
d9ba4830
PB
792 switch (cc) {
793 case 0: /* eq: Z */
d9ba4830 794 case 1: /* ne: !Z */
6c2c63d3
RH
795 cond = TCG_COND_EQ;
796 value = cpu_ZF;
d9ba4830 797 break;
6c2c63d3 798
d9ba4830 799 case 2: /* cs: C */
d9ba4830 800 case 3: /* cc: !C */
6c2c63d3
RH
801 cond = TCG_COND_NE;
802 value = cpu_CF;
d9ba4830 803 break;
6c2c63d3 804
d9ba4830 805 case 4: /* mi: N */
d9ba4830 806 case 5: /* pl: !N */
6c2c63d3
RH
807 cond = TCG_COND_LT;
808 value = cpu_NF;
d9ba4830 809 break;
6c2c63d3 810
d9ba4830 811 case 6: /* vs: V */
d9ba4830 812 case 7: /* vc: !V */
6c2c63d3
RH
813 cond = TCG_COND_LT;
814 value = cpu_VF;
d9ba4830 815 break;
6c2c63d3 816
d9ba4830 817 case 8: /* hi: C && !Z */
6c2c63d3
RH
818 case 9: /* ls: !C || Z -> !(C && !Z) */
819 cond = TCG_COND_NE;
820 value = tcg_temp_new_i32();
821 global = false;
822 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
823 ZF is non-zero for !Z; so AND the two subexpressions. */
824 tcg_gen_neg_i32(value, cpu_CF);
825 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 826 break;
6c2c63d3 827
d9ba4830 828 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 829 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
830 /* Since we're only interested in the sign bit, == 0 is >= 0. */
831 cond = TCG_COND_GE;
832 value = tcg_temp_new_i32();
833 global = false;
834 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 835 break;
6c2c63d3 836
d9ba4830 837 case 12: /* gt: !Z && N == V */
d9ba4830 838 case 13: /* le: Z || N != V */
6c2c63d3
RH
839 cond = TCG_COND_NE;
840 value = tcg_temp_new_i32();
841 global = false;
842 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
843 * the sign bit then AND with ZF to yield the result. */
844 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
845 tcg_gen_sari_i32(value, value, 31);
846 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 847 break;
6c2c63d3 848
9305eac0
RH
849 case 14: /* always */
850 case 15: /* always */
851 /* Use the ALWAYS condition, which will fold early.
852 * It doesn't matter what we use for the value. */
853 cond = TCG_COND_ALWAYS;
854 value = cpu_ZF;
855 goto no_invert;
856
d9ba4830
PB
857 default:
858 fprintf(stderr, "Bad condition code 0x%x\n", cc);
859 abort();
860 }
6c2c63d3
RH
861
862 if (cc & 1) {
863 cond = tcg_invert_cond(cond);
864 }
865
9305eac0 866 no_invert:
6c2c63d3
RH
867 cmp->cond = cond;
868 cmp->value = value;
869 cmp->value_global = global;
870}
871
872void arm_free_cc(DisasCompare *cmp)
873{
874 if (!cmp->value_global) {
875 tcg_temp_free_i32(cmp->value);
876 }
877}
878
879void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
880{
881 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
882}
883
884void arm_gen_test_cc(int cc, TCGLabel *label)
885{
886 DisasCompare cmp;
887 arm_test_cc(&cmp, cc);
888 arm_jump_cc(&cmp, label);
889 arm_free_cc(&cmp);
d9ba4830 890}
2c0262af 891
b1d8e52e 892static const uint8_t table_logic_cc[16] = {
2c0262af
FB
893 1, /* and */
894 1, /* xor */
895 0, /* sub */
896 0, /* rsb */
897 0, /* add */
898 0, /* adc */
899 0, /* sbc */
900 0, /* rsc */
901 1, /* andl */
902 1, /* xorl */
903 0, /* cmp */
904 0, /* cmn */
905 1, /* orr */
906 1, /* mov */
907 1, /* bic */
908 1, /* mvn */
909};
3b46e624 910
4d5e8c96
PM
911static inline void gen_set_condexec(DisasContext *s)
912{
913 if (s->condexec_mask) {
914 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
915 TCGv_i32 tmp = tcg_temp_new_i32();
916 tcg_gen_movi_i32(tmp, val);
917 store_cpu_field(tmp, condexec_bits);
918 }
919}
920
921static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
922{
923 tcg_gen_movi_i32(cpu_R[15], val);
924}
925
d9ba4830
PB
926/* Set PC and Thumb state from an immediate address. */
927static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 928{
39d5492a 929 TCGv_i32 tmp;
99c475ab 930
577bf808 931 s->is_jmp = DISAS_JUMP;
d9ba4830 932 if (s->thumb != (addr & 1)) {
7d1b0095 933 tmp = tcg_temp_new_i32();
d9ba4830 934 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 935 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 936 tcg_temp_free_i32(tmp);
d9ba4830 937 }
155c3eac 938 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
939}
940
941/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 942static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 943{
577bf808 944 s->is_jmp = DISAS_JUMP;
155c3eac
FN
945 tcg_gen_andi_i32(cpu_R[15], var, ~1);
946 tcg_gen_andi_i32(var, var, 1);
947 store_cpu_field(var, thumb);
d9ba4830
PB
948}
949
3bb8a96f
PM
950/* Set PC and Thumb state from var. var is marked as dead.
951 * For M-profile CPUs, include logic to detect exception-return
952 * branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
953 * and BX reg, and no others, and happens only for code in Handler mode.
954 */
955static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
956{
957 /* Generate the same code here as for a simple bx, but flag via
958 * s->is_jmp that we need to do the rest of the work later.
959 */
960 gen_bx(s, var);
961 if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
962 s->is_jmp = DISAS_BX_EXCRET;
963 }
964}
965
966static inline void gen_bx_excret_final_code(DisasContext *s)
967{
968 /* Generate the code to finish possible exception return and end the TB */
969 TCGLabel *excret_label = gen_new_label();
970
971 /* Is the new PC value in the magic range indicating exception return? */
972 tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
973 /* No: end the TB as we would for a DISAS_JMP */
974 if (is_singlestepping(s)) {
975 gen_singlestep_exception(s);
976 } else {
977 tcg_gen_exit_tb(0);
978 }
979 gen_set_label(excret_label);
980 /* Yes: this is an exception return.
981 * At this point in runtime env->regs[15] and env->thumb will hold
982 * the exception-return magic number, which do_v7m_exception_exit()
983 * will read. Nothing else will be able to see those values because
984 * the cpu-exec main loop guarantees that we will always go straight
985 * from raising the exception to the exception-handling code.
986 *
987 * gen_ss_advance(s) does nothing on M profile currently but
988 * calling it is conceptually the right thing as we have executed
989 * this instruction (compare SWI, HVC, SMC handling).
990 */
991 gen_ss_advance(s);
992 gen_exception_internal(EXCP_EXCEPTION_EXIT);
993}
994
21aeb343
JR
995/* Variant of store_reg which uses branch&exchange logic when storing
996 to r15 in ARM architecture v7 and above. The source must be a temporary
997 and will be marked as dead. */
7dcc1f89 998static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
999{
1000 if (reg == 15 && ENABLE_ARCH_7) {
1001 gen_bx(s, var);
1002 } else {
1003 store_reg(s, reg, var);
1004 }
1005}
1006
be5e7a76
DES
1007/* Variant of store_reg which uses branch&exchange logic when storing
1008 * to r15 in ARM architecture v5T and above. This is used for storing
1009 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
1010 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 1011static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
1012{
1013 if (reg == 15 && ENABLE_ARCH_5) {
3bb8a96f 1014 gen_bx_excret(s, var);
be5e7a76
DES
1015 } else {
1016 store_reg(s, reg, var);
1017 }
1018}
1019
e334bd31
PB
1020#ifdef CONFIG_USER_ONLY
1021#define IS_USER_ONLY 1
1022#else
1023#define IS_USER_ONLY 0
1024#endif
1025
08307563
PM
1026/* Abstractions of "generate code to do a guest load/store for
1027 * AArch32", where a vaddr is always 32 bits (and is zero
1028 * extended if we're a 64 bit core) and data is also
1029 * 32 bits unless specifically doing a 64 bit access.
1030 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 1031 * that the address argument is TCGv_i32 rather than TCGv.
08307563 1032 */
08307563 1033
7f5616f5 1034static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 1035{
7f5616f5
RH
1036 TCGv addr = tcg_temp_new();
1037 tcg_gen_extu_i32_tl(addr, a32);
1038
e334bd31 1039 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
1040 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
1041 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 1042 }
7f5616f5 1043 return addr;
08307563
PM
1044}
1045
7f5616f5
RH
1046static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1047 int index, TCGMemOp opc)
08307563 1048{
7f5616f5
RH
1049 TCGv addr = gen_aa32_addr(s, a32, opc);
1050 tcg_gen_qemu_ld_i32(val, addr, index, opc);
1051 tcg_temp_free(addr);
08307563
PM
1052}
1053
7f5616f5
RH
1054static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
1055 int index, TCGMemOp opc)
1056{
1057 TCGv addr = gen_aa32_addr(s, a32, opc);
1058 tcg_gen_qemu_st_i32(val, addr, index, opc);
1059 tcg_temp_free(addr);
1060}
08307563 1061
7f5616f5 1062#define DO_GEN_LD(SUFF, OPC) \
12dcc321 1063static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1064 TCGv_i32 a32, int index) \
08307563 1065{ \
7f5616f5 1066 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1067} \
1068static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
1069 TCGv_i32 val, \
1070 TCGv_i32 a32, int index, \
1071 ISSInfo issinfo) \
1072{ \
1073 gen_aa32_ld##SUFF(s, val, a32, index); \
1074 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
1075}
1076
7f5616f5 1077#define DO_GEN_ST(SUFF, OPC) \
12dcc321 1078static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 1079 TCGv_i32 a32, int index) \
08307563 1080{ \
7f5616f5 1081 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
1082} \
1083static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
1084 TCGv_i32 val, \
1085 TCGv_i32 a32, int index, \
1086 ISSInfo issinfo) \
1087{ \
1088 gen_aa32_st##SUFF(s, val, a32, index); \
1089 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1090}
1091
7f5616f5 1092static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1093{
e334bd31
PB
1094 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1095 if (!IS_USER_ONLY && s->sctlr_b) {
1096 tcg_gen_rotri_i64(val, val, 32);
1097 }
08307563
PM
1098}
1099
7f5616f5
RH
1100static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1101 int index, TCGMemOp opc)
08307563 1102{
7f5616f5
RH
1103 TCGv addr = gen_aa32_addr(s, a32, opc);
1104 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1105 gen_aa32_frob64(s, val);
1106 tcg_temp_free(addr);
1107}
1108
1109static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1110 TCGv_i32 a32, int index)
1111{
1112 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1113}
1114
1115static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1116 int index, TCGMemOp opc)
1117{
1118 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1119
1120 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1121 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1122 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1123 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1124 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1125 tcg_temp_free_i64(tmp);
e334bd31 1126 } else {
7f5616f5 1127 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1128 }
7f5616f5 1129 tcg_temp_free(addr);
08307563
PM
1130}
1131
7f5616f5
RH
1132static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1133 TCGv_i32 a32, int index)
1134{
1135 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1136}
08307563 1137
7f5616f5
RH
1138DO_GEN_LD(8s, MO_SB)
1139DO_GEN_LD(8u, MO_UB)
1140DO_GEN_LD(16s, MO_SW)
1141DO_GEN_LD(16u, MO_UW)
1142DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1143DO_GEN_ST(8, MO_UB)
1144DO_GEN_ST(16, MO_UW)
1145DO_GEN_ST(32, MO_UL)
08307563 1146
37e6456e
PM
1147static inline void gen_hvc(DisasContext *s, int imm16)
1148{
1149 /* The pre HVC helper handles cases when HVC gets trapped
1150 * as an undefined insn by runtime configuration (ie before
1151 * the insn really executes).
1152 */
1153 gen_set_pc_im(s, s->pc - 4);
1154 gen_helper_pre_hvc(cpu_env);
1155 /* Otherwise we will treat this as a real exception which
1156 * happens after execution of the insn. (The distinction matters
1157 * for the PC value reported to the exception handler and also
1158 * for single stepping.)
1159 */
1160 s->svc_imm = imm16;
1161 gen_set_pc_im(s, s->pc);
1162 s->is_jmp = DISAS_HVC;
1163}
1164
1165static inline void gen_smc(DisasContext *s)
1166{
1167 /* As with HVC, we may take an exception either before or after
1168 * the insn executes.
1169 */
1170 TCGv_i32 tmp;
1171
1172 gen_set_pc_im(s, s->pc - 4);
1173 tmp = tcg_const_i32(syn_aa32_smc());
1174 gen_helper_pre_smc(cpu_env, tmp);
1175 tcg_temp_free_i32(tmp);
1176 gen_set_pc_im(s, s->pc);
1177 s->is_jmp = DISAS_SMC;
1178}
1179
d4a2dc67
PM
1180static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1181{
1182 gen_set_condexec(s);
1183 gen_set_pc_im(s, s->pc - offset);
1184 gen_exception_internal(excp);
8a6b28c7 1185 s->is_jmp = DISAS_EXC;
d4a2dc67
PM
1186}
1187
73710361
GB
1188static void gen_exception_insn(DisasContext *s, int offset, int excp,
1189 int syn, uint32_t target_el)
d4a2dc67
PM
1190{
1191 gen_set_condexec(s);
1192 gen_set_pc_im(s, s->pc - offset);
73710361 1193 gen_exception(excp, syn, target_el);
8a6b28c7 1194 s->is_jmp = DISAS_EXC;
d4a2dc67
PM
1195}
1196
b5ff1b31
FB
1197/* Force a TB lookup after an instruction that changes the CPU state. */
1198static inline void gen_lookup_tb(DisasContext *s)
1199{
a6445c52 1200 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
8a6b28c7 1201 s->is_jmp = DISAS_EXIT;
b5ff1b31
FB
1202}
1203
19a6e31c
PM
1204static inline void gen_hlt(DisasContext *s, int imm)
1205{
1206 /* HLT. This has two purposes.
1207 * Architecturally, it is an external halting debug instruction.
1208 * Since QEMU doesn't implement external debug, we treat this as
1209 * it is required for halting debug disabled: it will UNDEF.
1210 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1211 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1212 * must trigger semihosting even for ARMv7 and earlier, where
1213 * HLT was an undefined encoding.
1214 * In system mode, we don't allow userspace access to
1215 * semihosting, to provide some semblance of security
1216 * (and for consistency with our 32-bit semihosting).
1217 */
1218 if (semihosting_enabled() &&
1219#ifndef CONFIG_USER_ONLY
1220 s->current_el != 0 &&
1221#endif
1222 (imm == (s->thumb ? 0x3c : 0xf000))) {
1223 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1224 return;
1225 }
1226
1227 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1228 default_exception_el(s));
1229}
1230
b0109805 1231static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1232 TCGv_i32 var)
2c0262af 1233{
1e8d4eec 1234 int val, rm, shift, shiftop;
39d5492a 1235 TCGv_i32 offset;
2c0262af
FB
1236
1237 if (!(insn & (1 << 25))) {
1238 /* immediate */
1239 val = insn & 0xfff;
1240 if (!(insn & (1 << 23)))
1241 val = -val;
537730b9 1242 if (val != 0)
b0109805 1243 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1244 } else {
1245 /* shift/register */
1246 rm = (insn) & 0xf;
1247 shift = (insn >> 7) & 0x1f;
1e8d4eec 1248 shiftop = (insn >> 5) & 3;
b26eefb6 1249 offset = load_reg(s, rm);
9a119ff6 1250 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1251 if (!(insn & (1 << 23)))
b0109805 1252 tcg_gen_sub_i32(var, var, offset);
2c0262af 1253 else
b0109805 1254 tcg_gen_add_i32(var, var, offset);
7d1b0095 1255 tcg_temp_free_i32(offset);
2c0262af
FB
1256 }
1257}
1258
191f9a93 1259static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1260 int extra, TCGv_i32 var)
2c0262af
FB
1261{
1262 int val, rm;
39d5492a 1263 TCGv_i32 offset;
3b46e624 1264
2c0262af
FB
1265 if (insn & (1 << 22)) {
1266 /* immediate */
1267 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1268 if (!(insn & (1 << 23)))
1269 val = -val;
18acad92 1270 val += extra;
537730b9 1271 if (val != 0)
b0109805 1272 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1273 } else {
1274 /* register */
191f9a93 1275 if (extra)
b0109805 1276 tcg_gen_addi_i32(var, var, extra);
2c0262af 1277 rm = (insn) & 0xf;
b26eefb6 1278 offset = load_reg(s, rm);
2c0262af 1279 if (!(insn & (1 << 23)))
b0109805 1280 tcg_gen_sub_i32(var, var, offset);
2c0262af 1281 else
b0109805 1282 tcg_gen_add_i32(var, var, offset);
7d1b0095 1283 tcg_temp_free_i32(offset);
2c0262af
FB
1284 }
1285}
1286
5aaebd13
PM
1287static TCGv_ptr get_fpstatus_ptr(int neon)
1288{
1289 TCGv_ptr statusptr = tcg_temp_new_ptr();
1290 int offset;
1291 if (neon) {
0ecb72a5 1292 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1293 } else {
0ecb72a5 1294 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1295 }
1296 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1297 return statusptr;
1298}
1299
4373f3ce
PB
1300#define VFP_OP2(name) \
1301static inline void gen_vfp_##name(int dp) \
1302{ \
ae1857ec
PM
1303 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1304 if (dp) { \
1305 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1306 } else { \
1307 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1308 } \
1309 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1310}
1311
4373f3ce
PB
1312VFP_OP2(add)
1313VFP_OP2(sub)
1314VFP_OP2(mul)
1315VFP_OP2(div)
1316
1317#undef VFP_OP2
1318
605a6aed
PM
1319static inline void gen_vfp_F1_mul(int dp)
1320{
1321 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1322 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1323 if (dp) {
ae1857ec 1324 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1325 } else {
ae1857ec 1326 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1327 }
ae1857ec 1328 tcg_temp_free_ptr(fpst);
605a6aed
PM
1329}
1330
1331static inline void gen_vfp_F1_neg(int dp)
1332{
1333 /* Like gen_vfp_neg() but put result in F1 */
1334 if (dp) {
1335 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1336 } else {
1337 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1338 }
1339}
1340
4373f3ce
PB
1341static inline void gen_vfp_abs(int dp)
1342{
1343 if (dp)
1344 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1345 else
1346 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1347}
1348
1349static inline void gen_vfp_neg(int dp)
1350{
1351 if (dp)
1352 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1353 else
1354 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1355}
1356
1357static inline void gen_vfp_sqrt(int dp)
1358{
1359 if (dp)
1360 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1361 else
1362 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1363}
1364
1365static inline void gen_vfp_cmp(int dp)
1366{
1367 if (dp)
1368 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1369 else
1370 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1371}
1372
1373static inline void gen_vfp_cmpe(int dp)
1374{
1375 if (dp)
1376 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1377 else
1378 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1379}
1380
1381static inline void gen_vfp_F1_ld0(int dp)
1382{
1383 if (dp)
5b340b51 1384 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1385 else
5b340b51 1386 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1387}
1388
5500b06c
PM
1389#define VFP_GEN_ITOF(name) \
1390static inline void gen_vfp_##name(int dp, int neon) \
1391{ \
5aaebd13 1392 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1393 if (dp) { \
1394 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1395 } else { \
1396 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1397 } \
b7fa9214 1398 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1399}
1400
5500b06c
PM
1401VFP_GEN_ITOF(uito)
1402VFP_GEN_ITOF(sito)
1403#undef VFP_GEN_ITOF
4373f3ce 1404
5500b06c
PM
1405#define VFP_GEN_FTOI(name) \
1406static inline void gen_vfp_##name(int dp, int neon) \
1407{ \
5aaebd13 1408 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1409 if (dp) { \
1410 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1411 } else { \
1412 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1413 } \
b7fa9214 1414 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1415}
1416
5500b06c
PM
1417VFP_GEN_FTOI(toui)
1418VFP_GEN_FTOI(touiz)
1419VFP_GEN_FTOI(tosi)
1420VFP_GEN_FTOI(tosiz)
1421#undef VFP_GEN_FTOI
4373f3ce 1422
16d5b3ca 1423#define VFP_GEN_FIX(name, round) \
5500b06c 1424static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1425{ \
39d5492a 1426 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1427 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1428 if (dp) { \
16d5b3ca
WN
1429 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1430 statusptr); \
5500b06c 1431 } else { \
16d5b3ca
WN
1432 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1433 statusptr); \
5500b06c 1434 } \
b75263d6 1435 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1436 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1437}
16d5b3ca
WN
1438VFP_GEN_FIX(tosh, _round_to_zero)
1439VFP_GEN_FIX(tosl, _round_to_zero)
1440VFP_GEN_FIX(touh, _round_to_zero)
1441VFP_GEN_FIX(toul, _round_to_zero)
1442VFP_GEN_FIX(shto, )
1443VFP_GEN_FIX(slto, )
1444VFP_GEN_FIX(uhto, )
1445VFP_GEN_FIX(ulto, )
4373f3ce 1446#undef VFP_GEN_FIX
9ee6e8bb 1447
39d5492a 1448static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1449{
08307563 1450 if (dp) {
12dcc321 1451 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1452 } else {
12dcc321 1453 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1454 }
b5ff1b31
FB
1455}
1456
39d5492a 1457static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1458{
08307563 1459 if (dp) {
12dcc321 1460 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1461 } else {
12dcc321 1462 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1463 }
b5ff1b31
FB
1464}
1465
8e96005d
FB
1466static inline long
1467vfp_reg_offset (int dp, int reg)
1468{
1469 if (dp)
1470 return offsetof(CPUARMState, vfp.regs[reg]);
1471 else if (reg & 1) {
1472 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1473 + offsetof(CPU_DoubleU, l.upper);
1474 } else {
1475 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1476 + offsetof(CPU_DoubleU, l.lower);
1477 }
1478}
9ee6e8bb
PB
1479
1480/* Return the offset of a 32-bit piece of a NEON register.
1481 zero is the least significant end of the register. */
1482static inline long
1483neon_reg_offset (int reg, int n)
1484{
1485 int sreg;
1486 sreg = reg * 2 + n;
1487 return vfp_reg_offset(0, sreg);
1488}
1489
39d5492a 1490static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1491{
39d5492a 1492 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1493 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1494 return tmp;
1495}
1496
39d5492a 1497static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1498{
1499 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1500 tcg_temp_free_i32(var);
8f8e3aa4
PB
1501}
1502
a7812ae4 1503static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1504{
1505 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1506}
1507
a7812ae4 1508static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1509{
1510 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1511}
1512
4373f3ce
PB
1513#define tcg_gen_ld_f32 tcg_gen_ld_i32
1514#define tcg_gen_ld_f64 tcg_gen_ld_i64
1515#define tcg_gen_st_f32 tcg_gen_st_i32
1516#define tcg_gen_st_f64 tcg_gen_st_i64
1517
b7bcbe95
FB
1518static inline void gen_mov_F0_vreg(int dp, int reg)
1519{
1520 if (dp)
4373f3ce 1521 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1522 else
4373f3ce 1523 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1524}
1525
1526static inline void gen_mov_F1_vreg(int dp, int reg)
1527{
1528 if (dp)
4373f3ce 1529 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1530 else
4373f3ce 1531 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1532}
1533
1534static inline void gen_mov_vreg_F0(int dp, int reg)
1535{
1536 if (dp)
4373f3ce 1537 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1538 else
4373f3ce 1539 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1540}
1541
18c9b560
AZ
1542#define ARM_CP_RW_BIT (1 << 20)
1543
a7812ae4 1544static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1545{
0ecb72a5 1546 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1547}
1548
a7812ae4 1549static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1550{
0ecb72a5 1551 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1552}
1553
39d5492a 1554static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1555{
39d5492a 1556 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1557 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1558 return var;
e677137d
PB
1559}
1560
39d5492a 1561static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1562{
0ecb72a5 1563 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1564 tcg_temp_free_i32(var);
e677137d
PB
1565}
1566
1567static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1568{
1569 iwmmxt_store_reg(cpu_M0, rn);
1570}
1571
1572static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1573{
1574 iwmmxt_load_reg(cpu_M0, rn);
1575}
1576
1577static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1578{
1579 iwmmxt_load_reg(cpu_V1, rn);
1580 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1581}
1582
1583static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1584{
1585 iwmmxt_load_reg(cpu_V1, rn);
1586 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1587}
1588
1589static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1590{
1591 iwmmxt_load_reg(cpu_V1, rn);
1592 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1593}
1594
1595#define IWMMXT_OP(name) \
1596static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1597{ \
1598 iwmmxt_load_reg(cpu_V1, rn); \
1599 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1600}
1601
477955bd
PM
1602#define IWMMXT_OP_ENV(name) \
1603static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1604{ \
1605 iwmmxt_load_reg(cpu_V1, rn); \
1606 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1607}
1608
1609#define IWMMXT_OP_ENV_SIZE(name) \
1610IWMMXT_OP_ENV(name##b) \
1611IWMMXT_OP_ENV(name##w) \
1612IWMMXT_OP_ENV(name##l)
e677137d 1613
477955bd 1614#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1615static inline void gen_op_iwmmxt_##name##_M0(void) \
1616{ \
477955bd 1617 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1618}
1619
1620IWMMXT_OP(maddsq)
1621IWMMXT_OP(madduq)
1622IWMMXT_OP(sadb)
1623IWMMXT_OP(sadw)
1624IWMMXT_OP(mulslw)
1625IWMMXT_OP(mulshw)
1626IWMMXT_OP(mululw)
1627IWMMXT_OP(muluhw)
1628IWMMXT_OP(macsw)
1629IWMMXT_OP(macuw)
1630
477955bd
PM
1631IWMMXT_OP_ENV_SIZE(unpackl)
1632IWMMXT_OP_ENV_SIZE(unpackh)
1633
1634IWMMXT_OP_ENV1(unpacklub)
1635IWMMXT_OP_ENV1(unpackluw)
1636IWMMXT_OP_ENV1(unpacklul)
1637IWMMXT_OP_ENV1(unpackhub)
1638IWMMXT_OP_ENV1(unpackhuw)
1639IWMMXT_OP_ENV1(unpackhul)
1640IWMMXT_OP_ENV1(unpacklsb)
1641IWMMXT_OP_ENV1(unpacklsw)
1642IWMMXT_OP_ENV1(unpacklsl)
1643IWMMXT_OP_ENV1(unpackhsb)
1644IWMMXT_OP_ENV1(unpackhsw)
1645IWMMXT_OP_ENV1(unpackhsl)
1646
1647IWMMXT_OP_ENV_SIZE(cmpeq)
1648IWMMXT_OP_ENV_SIZE(cmpgtu)
1649IWMMXT_OP_ENV_SIZE(cmpgts)
1650
1651IWMMXT_OP_ENV_SIZE(mins)
1652IWMMXT_OP_ENV_SIZE(minu)
1653IWMMXT_OP_ENV_SIZE(maxs)
1654IWMMXT_OP_ENV_SIZE(maxu)
1655
1656IWMMXT_OP_ENV_SIZE(subn)
1657IWMMXT_OP_ENV_SIZE(addn)
1658IWMMXT_OP_ENV_SIZE(subu)
1659IWMMXT_OP_ENV_SIZE(addu)
1660IWMMXT_OP_ENV_SIZE(subs)
1661IWMMXT_OP_ENV_SIZE(adds)
1662
1663IWMMXT_OP_ENV(avgb0)
1664IWMMXT_OP_ENV(avgb1)
1665IWMMXT_OP_ENV(avgw0)
1666IWMMXT_OP_ENV(avgw1)
e677137d 1667
477955bd
PM
1668IWMMXT_OP_ENV(packuw)
1669IWMMXT_OP_ENV(packul)
1670IWMMXT_OP_ENV(packuq)
1671IWMMXT_OP_ENV(packsw)
1672IWMMXT_OP_ENV(packsl)
1673IWMMXT_OP_ENV(packsq)
e677137d 1674
e677137d
PB
1675static void gen_op_iwmmxt_set_mup(void)
1676{
39d5492a 1677 TCGv_i32 tmp;
e677137d
PB
1678 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1679 tcg_gen_ori_i32(tmp, tmp, 2);
1680 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1681}
1682
1683static void gen_op_iwmmxt_set_cup(void)
1684{
39d5492a 1685 TCGv_i32 tmp;
e677137d
PB
1686 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1687 tcg_gen_ori_i32(tmp, tmp, 1);
1688 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1689}
1690
1691static void gen_op_iwmmxt_setpsr_nz(void)
1692{
39d5492a 1693 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1694 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1695 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1696}
1697
1698static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1699{
1700 iwmmxt_load_reg(cpu_V1, rn);
86831435 1701 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1702 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1703}
1704
39d5492a
PM
1705static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1706 TCGv_i32 dest)
18c9b560
AZ
1707{
1708 int rd;
1709 uint32_t offset;
39d5492a 1710 TCGv_i32 tmp;
18c9b560
AZ
1711
1712 rd = (insn >> 16) & 0xf;
da6b5335 1713 tmp = load_reg(s, rd);
18c9b560
AZ
1714
1715 offset = (insn & 0xff) << ((insn >> 7) & 2);
1716 if (insn & (1 << 24)) {
1717 /* Pre indexed */
1718 if (insn & (1 << 23))
da6b5335 1719 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1720 else
da6b5335
FN
1721 tcg_gen_addi_i32(tmp, tmp, -offset);
1722 tcg_gen_mov_i32(dest, tmp);
18c9b560 1723 if (insn & (1 << 21))
da6b5335
FN
1724 store_reg(s, rd, tmp);
1725 else
7d1b0095 1726 tcg_temp_free_i32(tmp);
18c9b560
AZ
1727 } else if (insn & (1 << 21)) {
1728 /* Post indexed */
da6b5335 1729 tcg_gen_mov_i32(dest, tmp);
18c9b560 1730 if (insn & (1 << 23))
da6b5335 1731 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1732 else
da6b5335
FN
1733 tcg_gen_addi_i32(tmp, tmp, -offset);
1734 store_reg(s, rd, tmp);
18c9b560
AZ
1735 } else if (!(insn & (1 << 23)))
1736 return 1;
1737 return 0;
1738}
1739
39d5492a 1740static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1741{
1742 int rd = (insn >> 0) & 0xf;
39d5492a 1743 TCGv_i32 tmp;
18c9b560 1744
da6b5335
FN
1745 if (insn & (1 << 8)) {
1746 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1747 return 1;
da6b5335
FN
1748 } else {
1749 tmp = iwmmxt_load_creg(rd);
1750 }
1751 } else {
7d1b0095 1752 tmp = tcg_temp_new_i32();
da6b5335 1753 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1754 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1755 }
1756 tcg_gen_andi_i32(tmp, tmp, mask);
1757 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1758 tcg_temp_free_i32(tmp);
18c9b560
AZ
1759 return 0;
1760}
1761
a1c7273b 1762/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1763 (ie. an undefined instruction). */
7dcc1f89 1764static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1765{
1766 int rd, wrd;
1767 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1768 TCGv_i32 addr;
1769 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1770
1771 if ((insn & 0x0e000e00) == 0x0c000000) {
1772 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1773 wrd = insn & 0xf;
1774 rdlo = (insn >> 12) & 0xf;
1775 rdhi = (insn >> 16) & 0xf;
1776 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1777 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1778 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1779 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1780 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1781 } else { /* TMCRR */
da6b5335
FN
1782 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1783 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1784 gen_op_iwmmxt_set_mup();
1785 }
1786 return 0;
1787 }
1788
1789 wrd = (insn >> 12) & 0xf;
7d1b0095 1790 addr = tcg_temp_new_i32();
da6b5335 1791 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1792 tcg_temp_free_i32(addr);
18c9b560 1793 return 1;
da6b5335 1794 }
18c9b560
AZ
1795 if (insn & ARM_CP_RW_BIT) {
1796 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1797 tmp = tcg_temp_new_i32();
12dcc321 1798 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1799 iwmmxt_store_creg(wrd, tmp);
18c9b560 1800 } else {
e677137d
PB
1801 i = 1;
1802 if (insn & (1 << 8)) {
1803 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1804 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1805 i = 0;
1806 } else { /* WLDRW wRd */
29531141 1807 tmp = tcg_temp_new_i32();
12dcc321 1808 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1809 }
1810 } else {
29531141 1811 tmp = tcg_temp_new_i32();
e677137d 1812 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1813 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1814 } else { /* WLDRB */
12dcc321 1815 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1816 }
1817 }
1818 if (i) {
1819 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1820 tcg_temp_free_i32(tmp);
e677137d 1821 }
18c9b560
AZ
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 }
1824 } else {
1825 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1826 tmp = iwmmxt_load_creg(wrd);
12dcc321 1827 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1828 } else {
1829 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1830 tmp = tcg_temp_new_i32();
e677137d
PB
1831 if (insn & (1 << 8)) {
1832 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1833 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1834 } else { /* WSTRW wRd */
ecc7b3aa 1835 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1836 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1837 }
1838 } else {
1839 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1840 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1841 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1842 } else { /* WSTRB */
ecc7b3aa 1843 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1844 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1845 }
1846 }
18c9b560 1847 }
29531141 1848 tcg_temp_free_i32(tmp);
18c9b560 1849 }
7d1b0095 1850 tcg_temp_free_i32(addr);
18c9b560
AZ
1851 return 0;
1852 }
1853
1854 if ((insn & 0x0f000000) != 0x0e000000)
1855 return 1;
1856
1857 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1858 case 0x000: /* WOR */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 0) & 0xf;
1861 rd1 = (insn >> 16) & 0xf;
1862 gen_op_iwmmxt_movq_M0_wRn(rd0);
1863 gen_op_iwmmxt_orq_M0_wRn(rd1);
1864 gen_op_iwmmxt_setpsr_nz();
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 gen_op_iwmmxt_set_cup();
1868 break;
1869 case 0x011: /* TMCR */
1870 if (insn & 0xf)
1871 return 1;
1872 rd = (insn >> 12) & 0xf;
1873 wrd = (insn >> 16) & 0xf;
1874 switch (wrd) {
1875 case ARM_IWMMXT_wCID:
1876 case ARM_IWMMXT_wCASF:
1877 break;
1878 case ARM_IWMMXT_wCon:
1879 gen_op_iwmmxt_set_cup();
1880 /* Fall through. */
1881 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1882 tmp = iwmmxt_load_creg(wrd);
1883 tmp2 = load_reg(s, rd);
f669df27 1884 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1885 tcg_temp_free_i32(tmp2);
da6b5335 1886 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1887 break;
1888 case ARM_IWMMXT_wCGR0:
1889 case ARM_IWMMXT_wCGR1:
1890 case ARM_IWMMXT_wCGR2:
1891 case ARM_IWMMXT_wCGR3:
1892 gen_op_iwmmxt_set_cup();
da6b5335
FN
1893 tmp = load_reg(s, rd);
1894 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1895 break;
1896 default:
1897 return 1;
1898 }
1899 break;
1900 case 0x100: /* WXOR */
1901 wrd = (insn >> 12) & 0xf;
1902 rd0 = (insn >> 0) & 0xf;
1903 rd1 = (insn >> 16) & 0xf;
1904 gen_op_iwmmxt_movq_M0_wRn(rd0);
1905 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1906 gen_op_iwmmxt_setpsr_nz();
1907 gen_op_iwmmxt_movq_wRn_M0(wrd);
1908 gen_op_iwmmxt_set_mup();
1909 gen_op_iwmmxt_set_cup();
1910 break;
1911 case 0x111: /* TMRC */
1912 if (insn & 0xf)
1913 return 1;
1914 rd = (insn >> 12) & 0xf;
1915 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1916 tmp = iwmmxt_load_creg(wrd);
1917 store_reg(s, rd, tmp);
18c9b560
AZ
1918 break;
1919 case 0x300: /* WANDN */
1920 wrd = (insn >> 12) & 0xf;
1921 rd0 = (insn >> 0) & 0xf;
1922 rd1 = (insn >> 16) & 0xf;
1923 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1924 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1925 gen_op_iwmmxt_andq_M0_wRn(rd1);
1926 gen_op_iwmmxt_setpsr_nz();
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 gen_op_iwmmxt_set_mup();
1929 gen_op_iwmmxt_set_cup();
1930 break;
1931 case 0x200: /* WAND */
1932 wrd = (insn >> 12) & 0xf;
1933 rd0 = (insn >> 0) & 0xf;
1934 rd1 = (insn >> 16) & 0xf;
1935 gen_op_iwmmxt_movq_M0_wRn(rd0);
1936 gen_op_iwmmxt_andq_M0_wRn(rd1);
1937 gen_op_iwmmxt_setpsr_nz();
1938 gen_op_iwmmxt_movq_wRn_M0(wrd);
1939 gen_op_iwmmxt_set_mup();
1940 gen_op_iwmmxt_set_cup();
1941 break;
1942 case 0x810: case 0xa10: /* WMADD */
1943 wrd = (insn >> 12) & 0xf;
1944 rd0 = (insn >> 0) & 0xf;
1945 rd1 = (insn >> 16) & 0xf;
1946 gen_op_iwmmxt_movq_M0_wRn(rd0);
1947 if (insn & (1 << 21))
1948 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1949 else
1950 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 break;
1954 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1955 wrd = (insn >> 12) & 0xf;
1956 rd0 = (insn >> 16) & 0xf;
1957 rd1 = (insn >> 0) & 0xf;
1958 gen_op_iwmmxt_movq_M0_wRn(rd0);
1959 switch ((insn >> 22) & 3) {
1960 case 0:
1961 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1962 break;
1963 case 1:
1964 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1965 break;
1966 case 2:
1967 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1968 break;
1969 case 3:
1970 return 1;
1971 }
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 gen_op_iwmmxt_set_cup();
1975 break;
1976 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1977 wrd = (insn >> 12) & 0xf;
1978 rd0 = (insn >> 16) & 0xf;
1979 rd1 = (insn >> 0) & 0xf;
1980 gen_op_iwmmxt_movq_M0_wRn(rd0);
1981 switch ((insn >> 22) & 3) {
1982 case 0:
1983 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1984 break;
1985 case 1:
1986 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1987 break;
1988 case 2:
1989 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1990 break;
1991 case 3:
1992 return 1;
1993 }
1994 gen_op_iwmmxt_movq_wRn_M0(wrd);
1995 gen_op_iwmmxt_set_mup();
1996 gen_op_iwmmxt_set_cup();
1997 break;
1998 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1999 wrd = (insn >> 12) & 0xf;
2000 rd0 = (insn >> 16) & 0xf;
2001 rd1 = (insn >> 0) & 0xf;
2002 gen_op_iwmmxt_movq_M0_wRn(rd0);
2003 if (insn & (1 << 22))
2004 gen_op_iwmmxt_sadw_M0_wRn(rd1);
2005 else
2006 gen_op_iwmmxt_sadb_M0_wRn(rd1);
2007 if (!(insn & (1 << 20)))
2008 gen_op_iwmmxt_addl_M0_wRn(wrd);
2009 gen_op_iwmmxt_movq_wRn_M0(wrd);
2010 gen_op_iwmmxt_set_mup();
2011 break;
2012 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
2013 wrd = (insn >> 12) & 0xf;
2014 rd0 = (insn >> 16) & 0xf;
2015 rd1 = (insn >> 0) & 0xf;
2016 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2017 if (insn & (1 << 21)) {
2018 if (insn & (1 << 20))
2019 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
2020 else
2021 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
2022 } else {
2023 if (insn & (1 << 20))
2024 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
2025 else
2026 gen_op_iwmmxt_mululw_M0_wRn(rd1);
2027 }
18c9b560
AZ
2028 gen_op_iwmmxt_movq_wRn_M0(wrd);
2029 gen_op_iwmmxt_set_mup();
2030 break;
2031 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 rd1 = (insn >> 0) & 0xf;
2035 gen_op_iwmmxt_movq_M0_wRn(rd0);
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_macsw_M0_wRn(rd1);
2038 else
2039 gen_op_iwmmxt_macuw_M0_wRn(rd1);
2040 if (!(insn & (1 << 20))) {
e677137d
PB
2041 iwmmxt_load_reg(cpu_V1, wrd);
2042 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
2043 }
2044 gen_op_iwmmxt_movq_wRn_M0(wrd);
2045 gen_op_iwmmxt_set_mup();
2046 break;
2047 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
2048 wrd = (insn >> 12) & 0xf;
2049 rd0 = (insn >> 16) & 0xf;
2050 rd1 = (insn >> 0) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 switch ((insn >> 22) & 3) {
2053 case 0:
2054 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
2055 break;
2056 case 1:
2057 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
2058 break;
2059 case 2:
2060 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
2061 break;
2062 case 3:
2063 return 1;
2064 }
2065 gen_op_iwmmxt_movq_wRn_M0(wrd);
2066 gen_op_iwmmxt_set_mup();
2067 gen_op_iwmmxt_set_cup();
2068 break;
2069 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
2070 wrd = (insn >> 12) & 0xf;
2071 rd0 = (insn >> 16) & 0xf;
2072 rd1 = (insn >> 0) & 0xf;
2073 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2074 if (insn & (1 << 22)) {
2075 if (insn & (1 << 20))
2076 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2077 else
2078 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2079 } else {
2080 if (insn & (1 << 20))
2081 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2082 else
2083 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2084 }
18c9b560
AZ
2085 gen_op_iwmmxt_movq_wRn_M0(wrd);
2086 gen_op_iwmmxt_set_mup();
2087 gen_op_iwmmxt_set_cup();
2088 break;
2089 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2090 wrd = (insn >> 12) & 0xf;
2091 rd0 = (insn >> 16) & 0xf;
2092 rd1 = (insn >> 0) & 0xf;
2093 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2094 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2095 tcg_gen_andi_i32(tmp, tmp, 7);
2096 iwmmxt_load_reg(cpu_V1, rd1);
2097 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2098 tcg_temp_free_i32(tmp);
18c9b560
AZ
2099 gen_op_iwmmxt_movq_wRn_M0(wrd);
2100 gen_op_iwmmxt_set_mup();
2101 break;
2102 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2103 if (((insn >> 6) & 3) == 3)
2104 return 1;
18c9b560
AZ
2105 rd = (insn >> 12) & 0xf;
2106 wrd = (insn >> 16) & 0xf;
da6b5335 2107 tmp = load_reg(s, rd);
18c9b560
AZ
2108 gen_op_iwmmxt_movq_M0_wRn(wrd);
2109 switch ((insn >> 6) & 3) {
2110 case 0:
da6b5335
FN
2111 tmp2 = tcg_const_i32(0xff);
2112 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2113 break;
2114 case 1:
da6b5335
FN
2115 tmp2 = tcg_const_i32(0xffff);
2116 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2117 break;
2118 case 2:
da6b5335
FN
2119 tmp2 = tcg_const_i32(0xffffffff);
2120 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2121 break;
da6b5335 2122 default:
39d5492a
PM
2123 TCGV_UNUSED_I32(tmp2);
2124 TCGV_UNUSED_I32(tmp3);
18c9b560 2125 }
da6b5335 2126 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2127 tcg_temp_free_i32(tmp3);
2128 tcg_temp_free_i32(tmp2);
7d1b0095 2129 tcg_temp_free_i32(tmp);
18c9b560
AZ
2130 gen_op_iwmmxt_movq_wRn_M0(wrd);
2131 gen_op_iwmmxt_set_mup();
2132 break;
2133 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2134 rd = (insn >> 12) & 0xf;
2135 wrd = (insn >> 16) & 0xf;
da6b5335 2136 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2137 return 1;
2138 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2139 tmp = tcg_temp_new_i32();
18c9b560
AZ
2140 switch ((insn >> 22) & 3) {
2141 case 0:
da6b5335 2142 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2143 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2144 if (insn & 8) {
2145 tcg_gen_ext8s_i32(tmp, tmp);
2146 } else {
2147 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2148 }
2149 break;
2150 case 1:
da6b5335 2151 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2152 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2153 if (insn & 8) {
2154 tcg_gen_ext16s_i32(tmp, tmp);
2155 } else {
2156 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2157 }
2158 break;
2159 case 2:
da6b5335 2160 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2161 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2162 break;
18c9b560 2163 }
da6b5335 2164 store_reg(s, rd, tmp);
18c9b560
AZ
2165 break;
2166 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2167 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2168 return 1;
da6b5335 2169 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2170 switch ((insn >> 22) & 3) {
2171 case 0:
da6b5335 2172 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2173 break;
2174 case 1:
da6b5335 2175 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2176 break;
2177 case 2:
da6b5335 2178 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2179 break;
18c9b560 2180 }
da6b5335
FN
2181 tcg_gen_shli_i32(tmp, tmp, 28);
2182 gen_set_nzcv(tmp);
7d1b0095 2183 tcg_temp_free_i32(tmp);
18c9b560
AZ
2184 break;
2185 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2186 if (((insn >> 6) & 3) == 3)
2187 return 1;
18c9b560
AZ
2188 rd = (insn >> 12) & 0xf;
2189 wrd = (insn >> 16) & 0xf;
da6b5335 2190 tmp = load_reg(s, rd);
18c9b560
AZ
2191 switch ((insn >> 6) & 3) {
2192 case 0:
da6b5335 2193 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2194 break;
2195 case 1:
da6b5335 2196 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2197 break;
2198 case 2:
da6b5335 2199 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2200 break;
18c9b560 2201 }
7d1b0095 2202 tcg_temp_free_i32(tmp);
18c9b560
AZ
2203 gen_op_iwmmxt_movq_wRn_M0(wrd);
2204 gen_op_iwmmxt_set_mup();
2205 break;
2206 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2207 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2208 return 1;
da6b5335 2209 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2210 tmp2 = tcg_temp_new_i32();
da6b5335 2211 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2212 switch ((insn >> 22) & 3) {
2213 case 0:
2214 for (i = 0; i < 7; i ++) {
da6b5335
FN
2215 tcg_gen_shli_i32(tmp2, tmp2, 4);
2216 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2217 }
2218 break;
2219 case 1:
2220 for (i = 0; i < 3; i ++) {
da6b5335
FN
2221 tcg_gen_shli_i32(tmp2, tmp2, 8);
2222 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2223 }
2224 break;
2225 case 2:
da6b5335
FN
2226 tcg_gen_shli_i32(tmp2, tmp2, 16);
2227 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2228 break;
18c9b560 2229 }
da6b5335 2230 gen_set_nzcv(tmp);
7d1b0095
PM
2231 tcg_temp_free_i32(tmp2);
2232 tcg_temp_free_i32(tmp);
18c9b560
AZ
2233 break;
2234 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2235 wrd = (insn >> 12) & 0xf;
2236 rd0 = (insn >> 16) & 0xf;
2237 gen_op_iwmmxt_movq_M0_wRn(rd0);
2238 switch ((insn >> 22) & 3) {
2239 case 0:
e677137d 2240 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2241 break;
2242 case 1:
e677137d 2243 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2244 break;
2245 case 2:
e677137d 2246 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2247 break;
2248 case 3:
2249 return 1;
2250 }
2251 gen_op_iwmmxt_movq_wRn_M0(wrd);
2252 gen_op_iwmmxt_set_mup();
2253 break;
2254 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2255 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2256 return 1;
da6b5335 2257 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2258 tmp2 = tcg_temp_new_i32();
da6b5335 2259 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2260 switch ((insn >> 22) & 3) {
2261 case 0:
2262 for (i = 0; i < 7; i ++) {
da6b5335
FN
2263 tcg_gen_shli_i32(tmp2, tmp2, 4);
2264 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2265 }
2266 break;
2267 case 1:
2268 for (i = 0; i < 3; i ++) {
da6b5335
FN
2269 tcg_gen_shli_i32(tmp2, tmp2, 8);
2270 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2271 }
2272 break;
2273 case 2:
da6b5335
FN
2274 tcg_gen_shli_i32(tmp2, tmp2, 16);
2275 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2276 break;
18c9b560 2277 }
da6b5335 2278 gen_set_nzcv(tmp);
7d1b0095
PM
2279 tcg_temp_free_i32(tmp2);
2280 tcg_temp_free_i32(tmp);
18c9b560
AZ
2281 break;
2282 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2283 rd = (insn >> 12) & 0xf;
2284 rd0 = (insn >> 16) & 0xf;
da6b5335 2285 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2286 return 1;
2287 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2288 tmp = tcg_temp_new_i32();
18c9b560
AZ
2289 switch ((insn >> 22) & 3) {
2290 case 0:
da6b5335 2291 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2292 break;
2293 case 1:
da6b5335 2294 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2295 break;
2296 case 2:
da6b5335 2297 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2298 break;
18c9b560 2299 }
da6b5335 2300 store_reg(s, rd, tmp);
18c9b560
AZ
2301 break;
2302 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2303 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2304 wrd = (insn >> 12) & 0xf;
2305 rd0 = (insn >> 16) & 0xf;
2306 rd1 = (insn >> 0) & 0xf;
2307 gen_op_iwmmxt_movq_M0_wRn(rd0);
2308 switch ((insn >> 22) & 3) {
2309 case 0:
2310 if (insn & (1 << 21))
2311 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2312 else
2313 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2314 break;
2315 case 1:
2316 if (insn & (1 << 21))
2317 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2318 else
2319 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2320 break;
2321 case 2:
2322 if (insn & (1 << 21))
2323 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2324 else
2325 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2326 break;
2327 case 3:
2328 return 1;
2329 }
2330 gen_op_iwmmxt_movq_wRn_M0(wrd);
2331 gen_op_iwmmxt_set_mup();
2332 gen_op_iwmmxt_set_cup();
2333 break;
2334 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2335 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2336 wrd = (insn >> 12) & 0xf;
2337 rd0 = (insn >> 16) & 0xf;
2338 gen_op_iwmmxt_movq_M0_wRn(rd0);
2339 switch ((insn >> 22) & 3) {
2340 case 0:
2341 if (insn & (1 << 21))
2342 gen_op_iwmmxt_unpacklsb_M0();
2343 else
2344 gen_op_iwmmxt_unpacklub_M0();
2345 break;
2346 case 1:
2347 if (insn & (1 << 21))
2348 gen_op_iwmmxt_unpacklsw_M0();
2349 else
2350 gen_op_iwmmxt_unpackluw_M0();
2351 break;
2352 case 2:
2353 if (insn & (1 << 21))
2354 gen_op_iwmmxt_unpacklsl_M0();
2355 else
2356 gen_op_iwmmxt_unpacklul_M0();
2357 break;
2358 case 3:
2359 return 1;
2360 }
2361 gen_op_iwmmxt_movq_wRn_M0(wrd);
2362 gen_op_iwmmxt_set_mup();
2363 gen_op_iwmmxt_set_cup();
2364 break;
2365 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2366 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2367 wrd = (insn >> 12) & 0xf;
2368 rd0 = (insn >> 16) & 0xf;
2369 gen_op_iwmmxt_movq_M0_wRn(rd0);
2370 switch ((insn >> 22) & 3) {
2371 case 0:
2372 if (insn & (1 << 21))
2373 gen_op_iwmmxt_unpackhsb_M0();
2374 else
2375 gen_op_iwmmxt_unpackhub_M0();
2376 break;
2377 case 1:
2378 if (insn & (1 << 21))
2379 gen_op_iwmmxt_unpackhsw_M0();
2380 else
2381 gen_op_iwmmxt_unpackhuw_M0();
2382 break;
2383 case 2:
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_unpackhsl_M0();
2386 else
2387 gen_op_iwmmxt_unpackhul_M0();
2388 break;
2389 case 3:
2390 return 1;
2391 }
2392 gen_op_iwmmxt_movq_wRn_M0(wrd);
2393 gen_op_iwmmxt_set_mup();
2394 gen_op_iwmmxt_set_cup();
2395 break;
2396 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2397 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2398 if (((insn >> 22) & 3) == 0)
2399 return 1;
18c9b560
AZ
2400 wrd = (insn >> 12) & 0xf;
2401 rd0 = (insn >> 16) & 0xf;
2402 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2403 tmp = tcg_temp_new_i32();
da6b5335 2404 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2405 tcg_temp_free_i32(tmp);
18c9b560 2406 return 1;
da6b5335 2407 }
18c9b560 2408 switch ((insn >> 22) & 3) {
18c9b560 2409 case 1:
477955bd 2410 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2411 break;
2412 case 2:
477955bd 2413 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2414 break;
2415 case 3:
477955bd 2416 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2417 break;
2418 }
7d1b0095 2419 tcg_temp_free_i32(tmp);
18c9b560
AZ
2420 gen_op_iwmmxt_movq_wRn_M0(wrd);
2421 gen_op_iwmmxt_set_mup();
2422 gen_op_iwmmxt_set_cup();
2423 break;
2424 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2425 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2426 if (((insn >> 22) & 3) == 0)
2427 return 1;
18c9b560
AZ
2428 wrd = (insn >> 12) & 0xf;
2429 rd0 = (insn >> 16) & 0xf;
2430 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2431 tmp = tcg_temp_new_i32();
da6b5335 2432 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2433 tcg_temp_free_i32(tmp);
18c9b560 2434 return 1;
da6b5335 2435 }
18c9b560 2436 switch ((insn >> 22) & 3) {
18c9b560 2437 case 1:
477955bd 2438 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2439 break;
2440 case 2:
477955bd 2441 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2442 break;
2443 case 3:
477955bd 2444 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2445 break;
2446 }
7d1b0095 2447 tcg_temp_free_i32(tmp);
18c9b560
AZ
2448 gen_op_iwmmxt_movq_wRn_M0(wrd);
2449 gen_op_iwmmxt_set_mup();
2450 gen_op_iwmmxt_set_cup();
2451 break;
2452 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2453 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2454 if (((insn >> 22) & 3) == 0)
2455 return 1;
18c9b560
AZ
2456 wrd = (insn >> 12) & 0xf;
2457 rd0 = (insn >> 16) & 0xf;
2458 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2459 tmp = tcg_temp_new_i32();
da6b5335 2460 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2461 tcg_temp_free_i32(tmp);
18c9b560 2462 return 1;
da6b5335 2463 }
18c9b560 2464 switch ((insn >> 22) & 3) {
18c9b560 2465 case 1:
477955bd 2466 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2467 break;
2468 case 2:
477955bd 2469 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2470 break;
2471 case 3:
477955bd 2472 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2473 break;
2474 }
7d1b0095 2475 tcg_temp_free_i32(tmp);
18c9b560
AZ
2476 gen_op_iwmmxt_movq_wRn_M0(wrd);
2477 gen_op_iwmmxt_set_mup();
2478 gen_op_iwmmxt_set_cup();
2479 break;
2480 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2481 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2482 if (((insn >> 22) & 3) == 0)
2483 return 1;
18c9b560
AZ
2484 wrd = (insn >> 12) & 0xf;
2485 rd0 = (insn >> 16) & 0xf;
2486 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2487 tmp = tcg_temp_new_i32();
18c9b560 2488 switch ((insn >> 22) & 3) {
18c9b560 2489 case 1:
da6b5335 2490 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2491 tcg_temp_free_i32(tmp);
18c9b560 2492 return 1;
da6b5335 2493 }
477955bd 2494 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2495 break;
2496 case 2:
da6b5335 2497 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2498 tcg_temp_free_i32(tmp);
18c9b560 2499 return 1;
da6b5335 2500 }
477955bd 2501 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2502 break;
2503 case 3:
da6b5335 2504 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2505 tcg_temp_free_i32(tmp);
18c9b560 2506 return 1;
da6b5335 2507 }
477955bd 2508 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2509 break;
2510 }
7d1b0095 2511 tcg_temp_free_i32(tmp);
18c9b560
AZ
2512 gen_op_iwmmxt_movq_wRn_M0(wrd);
2513 gen_op_iwmmxt_set_mup();
2514 gen_op_iwmmxt_set_cup();
2515 break;
2516 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2517 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2518 wrd = (insn >> 12) & 0xf;
2519 rd0 = (insn >> 16) & 0xf;
2520 rd1 = (insn >> 0) & 0xf;
2521 gen_op_iwmmxt_movq_M0_wRn(rd0);
2522 switch ((insn >> 22) & 3) {
2523 case 0:
2524 if (insn & (1 << 21))
2525 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2526 else
2527 gen_op_iwmmxt_minub_M0_wRn(rd1);
2528 break;
2529 case 1:
2530 if (insn & (1 << 21))
2531 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2532 else
2533 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2534 break;
2535 case 2:
2536 if (insn & (1 << 21))
2537 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2538 else
2539 gen_op_iwmmxt_minul_M0_wRn(rd1);
2540 break;
2541 case 3:
2542 return 1;
2543 }
2544 gen_op_iwmmxt_movq_wRn_M0(wrd);
2545 gen_op_iwmmxt_set_mup();
2546 break;
2547 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2548 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2549 wrd = (insn >> 12) & 0xf;
2550 rd0 = (insn >> 16) & 0xf;
2551 rd1 = (insn >> 0) & 0xf;
2552 gen_op_iwmmxt_movq_M0_wRn(rd0);
2553 switch ((insn >> 22) & 3) {
2554 case 0:
2555 if (insn & (1 << 21))
2556 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2557 else
2558 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2559 break;
2560 case 1:
2561 if (insn & (1 << 21))
2562 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2563 else
2564 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2565 break;
2566 case 2:
2567 if (insn & (1 << 21))
2568 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2569 else
2570 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2571 break;
2572 case 3:
2573 return 1;
2574 }
2575 gen_op_iwmmxt_movq_wRn_M0(wrd);
2576 gen_op_iwmmxt_set_mup();
2577 break;
2578 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2579 case 0x402: case 0x502: case 0x602: case 0x702:
2580 wrd = (insn >> 12) & 0xf;
2581 rd0 = (insn >> 16) & 0xf;
2582 rd1 = (insn >> 0) & 0xf;
2583 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2584 tmp = tcg_const_i32((insn >> 20) & 3);
2585 iwmmxt_load_reg(cpu_V1, rd1);
2586 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2587 tcg_temp_free_i32(tmp);
18c9b560
AZ
2588 gen_op_iwmmxt_movq_wRn_M0(wrd);
2589 gen_op_iwmmxt_set_mup();
2590 break;
2591 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2592 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2593 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2594 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2595 wrd = (insn >> 12) & 0xf;
2596 rd0 = (insn >> 16) & 0xf;
2597 rd1 = (insn >> 0) & 0xf;
2598 gen_op_iwmmxt_movq_M0_wRn(rd0);
2599 switch ((insn >> 20) & 0xf) {
2600 case 0x0:
2601 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2602 break;
2603 case 0x1:
2604 gen_op_iwmmxt_subub_M0_wRn(rd1);
2605 break;
2606 case 0x3:
2607 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2608 break;
2609 case 0x4:
2610 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2611 break;
2612 case 0x5:
2613 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2614 break;
2615 case 0x7:
2616 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2617 break;
2618 case 0x8:
2619 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2620 break;
2621 case 0x9:
2622 gen_op_iwmmxt_subul_M0_wRn(rd1);
2623 break;
2624 case 0xb:
2625 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2626 break;
2627 default:
2628 return 1;
2629 }
2630 gen_op_iwmmxt_movq_wRn_M0(wrd);
2631 gen_op_iwmmxt_set_mup();
2632 gen_op_iwmmxt_set_cup();
2633 break;
2634 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2635 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2636 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2637 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2638 wrd = (insn >> 12) & 0xf;
2639 rd0 = (insn >> 16) & 0xf;
2640 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2641 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2642 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2643 tcg_temp_free_i32(tmp);
18c9b560
AZ
2644 gen_op_iwmmxt_movq_wRn_M0(wrd);
2645 gen_op_iwmmxt_set_mup();
2646 gen_op_iwmmxt_set_cup();
2647 break;
2648 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2649 case 0x418: case 0x518: case 0x618: case 0x718:
2650 case 0x818: case 0x918: case 0xa18: case 0xb18:
2651 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2652 wrd = (insn >> 12) & 0xf;
2653 rd0 = (insn >> 16) & 0xf;
2654 rd1 = (insn >> 0) & 0xf;
2655 gen_op_iwmmxt_movq_M0_wRn(rd0);
2656 switch ((insn >> 20) & 0xf) {
2657 case 0x0:
2658 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2659 break;
2660 case 0x1:
2661 gen_op_iwmmxt_addub_M0_wRn(rd1);
2662 break;
2663 case 0x3:
2664 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2665 break;
2666 case 0x4:
2667 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2668 break;
2669 case 0x5:
2670 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2671 break;
2672 case 0x7:
2673 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2674 break;
2675 case 0x8:
2676 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2677 break;
2678 case 0x9:
2679 gen_op_iwmmxt_addul_M0_wRn(rd1);
2680 break;
2681 case 0xb:
2682 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2683 break;
2684 default:
2685 return 1;
2686 }
2687 gen_op_iwmmxt_movq_wRn_M0(wrd);
2688 gen_op_iwmmxt_set_mup();
2689 gen_op_iwmmxt_set_cup();
2690 break;
2691 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2692 case 0x408: case 0x508: case 0x608: case 0x708:
2693 case 0x808: case 0x908: case 0xa08: case 0xb08:
2694 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2695 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2696 return 1;
18c9b560
AZ
2697 wrd = (insn >> 12) & 0xf;
2698 rd0 = (insn >> 16) & 0xf;
2699 rd1 = (insn >> 0) & 0xf;
2700 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2701 switch ((insn >> 22) & 3) {
18c9b560
AZ
2702 case 1:
2703 if (insn & (1 << 21))
2704 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2705 else
2706 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2707 break;
2708 case 2:
2709 if (insn & (1 << 21))
2710 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2711 else
2712 gen_op_iwmmxt_packul_M0_wRn(rd1);
2713 break;
2714 case 3:
2715 if (insn & (1 << 21))
2716 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2717 else
2718 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2719 break;
2720 }
2721 gen_op_iwmmxt_movq_wRn_M0(wrd);
2722 gen_op_iwmmxt_set_mup();
2723 gen_op_iwmmxt_set_cup();
2724 break;
2725 case 0x201: case 0x203: case 0x205: case 0x207:
2726 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2727 case 0x211: case 0x213: case 0x215: case 0x217:
2728 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2729 wrd = (insn >> 5) & 0xf;
2730 rd0 = (insn >> 12) & 0xf;
2731 rd1 = (insn >> 0) & 0xf;
2732 if (rd0 == 0xf || rd1 == 0xf)
2733 return 1;
2734 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2735 tmp = load_reg(s, rd0);
2736 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2737 switch ((insn >> 16) & 0xf) {
2738 case 0x0: /* TMIA */
da6b5335 2739 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2740 break;
2741 case 0x8: /* TMIAPH */
da6b5335 2742 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2743 break;
2744 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2745 if (insn & (1 << 16))
da6b5335 2746 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2747 if (insn & (1 << 17))
da6b5335
FN
2748 tcg_gen_shri_i32(tmp2, tmp2, 16);
2749 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2750 break;
2751 default:
7d1b0095
PM
2752 tcg_temp_free_i32(tmp2);
2753 tcg_temp_free_i32(tmp);
18c9b560
AZ
2754 return 1;
2755 }
7d1b0095
PM
2756 tcg_temp_free_i32(tmp2);
2757 tcg_temp_free_i32(tmp);
18c9b560
AZ
2758 gen_op_iwmmxt_movq_wRn_M0(wrd);
2759 gen_op_iwmmxt_set_mup();
2760 break;
2761 default:
2762 return 1;
2763 }
2764
2765 return 0;
2766}
2767
a1c7273b 2768/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2769 (ie. an undefined instruction). */
7dcc1f89 2770static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2771{
2772 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2773 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2774
2775 if ((insn & 0x0ff00f10) == 0x0e200010) {
2776 /* Multiply with Internal Accumulate Format */
2777 rd0 = (insn >> 12) & 0xf;
2778 rd1 = insn & 0xf;
2779 acc = (insn >> 5) & 7;
2780
2781 if (acc != 0)
2782 return 1;
2783
3a554c0f
FN
2784 tmp = load_reg(s, rd0);
2785 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2786 switch ((insn >> 16) & 0xf) {
2787 case 0x0: /* MIA */
3a554c0f 2788 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2789 break;
2790 case 0x8: /* MIAPH */
3a554c0f 2791 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2792 break;
2793 case 0xc: /* MIABB */
2794 case 0xd: /* MIABT */
2795 case 0xe: /* MIATB */
2796 case 0xf: /* MIATT */
18c9b560 2797 if (insn & (1 << 16))
3a554c0f 2798 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2799 if (insn & (1 << 17))
3a554c0f
FN
2800 tcg_gen_shri_i32(tmp2, tmp2, 16);
2801 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2802 break;
2803 default:
2804 return 1;
2805 }
7d1b0095
PM
2806 tcg_temp_free_i32(tmp2);
2807 tcg_temp_free_i32(tmp);
18c9b560
AZ
2808
2809 gen_op_iwmmxt_movq_wRn_M0(acc);
2810 return 0;
2811 }
2812
2813 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2814 /* Internal Accumulator Access Format */
2815 rdhi = (insn >> 16) & 0xf;
2816 rdlo = (insn >> 12) & 0xf;
2817 acc = insn & 7;
2818
2819 if (acc != 0)
2820 return 1;
2821
2822 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2823 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2824 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2825 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2826 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2827 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2828 } else { /* MAR */
3a554c0f
FN
2829 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2830 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2831 }
2832 return 0;
2833 }
2834
2835 return 1;
2836}
2837
9ee6e8bb
PB
2838#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2839#define VFP_SREG(insn, bigbit, smallbit) \
2840 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2841#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2842 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2843 reg = (((insn) >> (bigbit)) & 0x0f) \
2844 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2845 } else { \
2846 if (insn & (1 << (smallbit))) \
2847 return 1; \
2848 reg = ((insn) >> (bigbit)) & 0x0f; \
2849 }} while (0)
2850
2851#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2852#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2853#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2854#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2855#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2856#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2857
4373f3ce 2858/* Move between integer and VFP cores. */
39d5492a 2859static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2860{
39d5492a 2861 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2862 tcg_gen_mov_i32(tmp, cpu_F0s);
2863 return tmp;
2864}
2865
39d5492a 2866static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2867{
2868 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2869 tcg_temp_free_i32(tmp);
4373f3ce
PB
2870}
2871
39d5492a 2872static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2873{
39d5492a 2874 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2875 if (shift)
2876 tcg_gen_shri_i32(var, var, shift);
86831435 2877 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2878 tcg_gen_shli_i32(tmp, var, 8);
2879 tcg_gen_or_i32(var, var, tmp);
2880 tcg_gen_shli_i32(tmp, var, 16);
2881 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2882 tcg_temp_free_i32(tmp);
ad69471c
PB
2883}
2884
39d5492a 2885static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2886{
39d5492a 2887 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2888 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2889 tcg_gen_shli_i32(tmp, var, 16);
2890 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2891 tcg_temp_free_i32(tmp);
ad69471c
PB
2892}
2893
39d5492a 2894static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2895{
39d5492a 2896 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2897 tcg_gen_andi_i32(var, var, 0xffff0000);
2898 tcg_gen_shri_i32(tmp, var, 16);
2899 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2900 tcg_temp_free_i32(tmp);
ad69471c
PB
2901}
2902
39d5492a 2903static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2904{
2905 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2906 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2907 switch (size) {
2908 case 0:
12dcc321 2909 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2910 gen_neon_dup_u8(tmp, 0);
2911 break;
2912 case 1:
12dcc321 2913 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2914 gen_neon_dup_low16(tmp);
2915 break;
2916 case 2:
12dcc321 2917 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2918 break;
2919 default: /* Avoid compiler warnings. */
2920 abort();
2921 }
2922 return tmp;
2923}
2924
04731fb5
WN
2925static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2926 uint32_t dp)
2927{
2928 uint32_t cc = extract32(insn, 20, 2);
2929
2930 if (dp) {
2931 TCGv_i64 frn, frm, dest;
2932 TCGv_i64 tmp, zero, zf, nf, vf;
2933
2934 zero = tcg_const_i64(0);
2935
2936 frn = tcg_temp_new_i64();
2937 frm = tcg_temp_new_i64();
2938 dest = tcg_temp_new_i64();
2939
2940 zf = tcg_temp_new_i64();
2941 nf = tcg_temp_new_i64();
2942 vf = tcg_temp_new_i64();
2943
2944 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2945 tcg_gen_ext_i32_i64(nf, cpu_NF);
2946 tcg_gen_ext_i32_i64(vf, cpu_VF);
2947
2948 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2949 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2950 switch (cc) {
2951 case 0: /* eq: Z */
2952 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2953 frn, frm);
2954 break;
2955 case 1: /* vs: V */
2956 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2957 frn, frm);
2958 break;
2959 case 2: /* ge: N == V -> N ^ V == 0 */
2960 tmp = tcg_temp_new_i64();
2961 tcg_gen_xor_i64(tmp, vf, nf);
2962 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2963 frn, frm);
2964 tcg_temp_free_i64(tmp);
2965 break;
2966 case 3: /* gt: !Z && N == V */
2967 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2968 frn, frm);
2969 tmp = tcg_temp_new_i64();
2970 tcg_gen_xor_i64(tmp, vf, nf);
2971 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2972 dest, frm);
2973 tcg_temp_free_i64(tmp);
2974 break;
2975 }
2976 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2977 tcg_temp_free_i64(frn);
2978 tcg_temp_free_i64(frm);
2979 tcg_temp_free_i64(dest);
2980
2981 tcg_temp_free_i64(zf);
2982 tcg_temp_free_i64(nf);
2983 tcg_temp_free_i64(vf);
2984
2985 tcg_temp_free_i64(zero);
2986 } else {
2987 TCGv_i32 frn, frm, dest;
2988 TCGv_i32 tmp, zero;
2989
2990 zero = tcg_const_i32(0);
2991
2992 frn = tcg_temp_new_i32();
2993 frm = tcg_temp_new_i32();
2994 dest = tcg_temp_new_i32();
2995 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2996 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2997 switch (cc) {
2998 case 0: /* eq: Z */
2999 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
3000 frn, frm);
3001 break;
3002 case 1: /* vs: V */
3003 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
3004 frn, frm);
3005 break;
3006 case 2: /* ge: N == V -> N ^ V == 0 */
3007 tmp = tcg_temp_new_i32();
3008 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3009 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3010 frn, frm);
3011 tcg_temp_free_i32(tmp);
3012 break;
3013 case 3: /* gt: !Z && N == V */
3014 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
3015 frn, frm);
3016 tmp = tcg_temp_new_i32();
3017 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
3018 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
3019 dest, frm);
3020 tcg_temp_free_i32(tmp);
3021 break;
3022 }
3023 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3024 tcg_temp_free_i32(frn);
3025 tcg_temp_free_i32(frm);
3026 tcg_temp_free_i32(dest);
3027
3028 tcg_temp_free_i32(zero);
3029 }
3030
3031 return 0;
3032}
3033
40cfacdd
WN
3034static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
3035 uint32_t rm, uint32_t dp)
3036{
3037 uint32_t vmin = extract32(insn, 6, 1);
3038 TCGv_ptr fpst = get_fpstatus_ptr(0);
3039
3040 if (dp) {
3041 TCGv_i64 frn, frm, dest;
3042
3043 frn = tcg_temp_new_i64();
3044 frm = tcg_temp_new_i64();
3045 dest = tcg_temp_new_i64();
3046
3047 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
3048 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
3049 if (vmin) {
f71a2ae5 3050 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 3051 } else {
f71a2ae5 3052 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
3053 }
3054 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
3055 tcg_temp_free_i64(frn);
3056 tcg_temp_free_i64(frm);
3057 tcg_temp_free_i64(dest);
3058 } else {
3059 TCGv_i32 frn, frm, dest;
3060
3061 frn = tcg_temp_new_i32();
3062 frm = tcg_temp_new_i32();
3063 dest = tcg_temp_new_i32();
3064
3065 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
3066 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
3067 if (vmin) {
f71a2ae5 3068 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 3069 } else {
f71a2ae5 3070 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
3071 }
3072 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3073 tcg_temp_free_i32(frn);
3074 tcg_temp_free_i32(frm);
3075 tcg_temp_free_i32(dest);
3076 }
3077
3078 tcg_temp_free_ptr(fpst);
3079 return 0;
3080}
3081
7655f39b
WN
3082static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3083 int rounding)
3084{
3085 TCGv_ptr fpst = get_fpstatus_ptr(0);
3086 TCGv_i32 tcg_rmode;
3087
3088 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3089 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3090
3091 if (dp) {
3092 TCGv_i64 tcg_op;
3093 TCGv_i64 tcg_res;
3094 tcg_op = tcg_temp_new_i64();
3095 tcg_res = tcg_temp_new_i64();
3096 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3097 gen_helper_rintd(tcg_res, tcg_op, fpst);
3098 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3099 tcg_temp_free_i64(tcg_op);
3100 tcg_temp_free_i64(tcg_res);
3101 } else {
3102 TCGv_i32 tcg_op;
3103 TCGv_i32 tcg_res;
3104 tcg_op = tcg_temp_new_i32();
3105 tcg_res = tcg_temp_new_i32();
3106 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3107 gen_helper_rints(tcg_res, tcg_op, fpst);
3108 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3109 tcg_temp_free_i32(tcg_op);
3110 tcg_temp_free_i32(tcg_res);
3111 }
3112
3113 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3114 tcg_temp_free_i32(tcg_rmode);
3115
3116 tcg_temp_free_ptr(fpst);
3117 return 0;
3118}
3119
c9975a83
WN
3120static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3121 int rounding)
3122{
3123 bool is_signed = extract32(insn, 7, 1);
3124 TCGv_ptr fpst = get_fpstatus_ptr(0);
3125 TCGv_i32 tcg_rmode, tcg_shift;
3126
3127 tcg_shift = tcg_const_i32(0);
3128
3129 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3130 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3131
3132 if (dp) {
3133 TCGv_i64 tcg_double, tcg_res;
3134 TCGv_i32 tcg_tmp;
3135 /* Rd is encoded as a single precision register even when the source
3136 * is double precision.
3137 */
3138 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3139 tcg_double = tcg_temp_new_i64();
3140 tcg_res = tcg_temp_new_i64();
3141 tcg_tmp = tcg_temp_new_i32();
3142 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3143 if (is_signed) {
3144 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3145 } else {
3146 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3147 }
ecc7b3aa 3148 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3149 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3150 tcg_temp_free_i32(tcg_tmp);
3151 tcg_temp_free_i64(tcg_res);
3152 tcg_temp_free_i64(tcg_double);
3153 } else {
3154 TCGv_i32 tcg_single, tcg_res;
3155 tcg_single = tcg_temp_new_i32();
3156 tcg_res = tcg_temp_new_i32();
3157 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3158 if (is_signed) {
3159 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3160 } else {
3161 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3162 }
3163 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3164 tcg_temp_free_i32(tcg_res);
3165 tcg_temp_free_i32(tcg_single);
3166 }
3167
3168 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3169 tcg_temp_free_i32(tcg_rmode);
3170
3171 tcg_temp_free_i32(tcg_shift);
3172
3173 tcg_temp_free_ptr(fpst);
3174
3175 return 0;
3176}
7655f39b
WN
3177
3178/* Table for converting the most common AArch32 encoding of
3179 * rounding mode to arm_fprounding order (which matches the
3180 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3181 */
3182static const uint8_t fp_decode_rm[] = {
3183 FPROUNDING_TIEAWAY,
3184 FPROUNDING_TIEEVEN,
3185 FPROUNDING_POSINF,
3186 FPROUNDING_NEGINF,
3187};
3188
7dcc1f89 3189static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3190{
3191 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3192
d614a513 3193 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3194 return 1;
3195 }
3196
3197 if (dp) {
3198 VFP_DREG_D(rd, insn);
3199 VFP_DREG_N(rn, insn);
3200 VFP_DREG_M(rm, insn);
3201 } else {
3202 rd = VFP_SREG_D(insn);
3203 rn = VFP_SREG_N(insn);
3204 rm = VFP_SREG_M(insn);
3205 }
3206
3207 if ((insn & 0x0f800e50) == 0x0e000a00) {
3208 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3209 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3210 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3211 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3212 /* VRINTA, VRINTN, VRINTP, VRINTM */
3213 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3214 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3215 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3216 /* VCVTA, VCVTN, VCVTP, VCVTM */
3217 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3218 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3219 }
3220 return 1;
3221}
3222
a1c7273b 3223/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3224 (ie. an undefined instruction). */
7dcc1f89 3225static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3226{
3227 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3228 int dp, veclen;
39d5492a
PM
3229 TCGv_i32 addr;
3230 TCGv_i32 tmp;
3231 TCGv_i32 tmp2;
b7bcbe95 3232
d614a513 3233 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3234 return 1;
d614a513 3235 }
40f137e1 3236
2c7ffc41
PM
3237 /* FIXME: this access check should not take precedence over UNDEF
3238 * for invalid encodings; we will generate incorrect syndrome information
3239 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3240 */
9dbbc748 3241 if (s->fp_excp_el) {
2c7ffc41 3242 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3243 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3244 return 0;
3245 }
3246
5df8bac1 3247 if (!s->vfp_enabled) {
9ee6e8bb 3248 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3249 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3250 return 1;
3251 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3252 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3253 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3254 return 1;
a50c0f51 3255 }
40f137e1 3256 }
6a57f3eb
WN
3257
3258 if (extract32(insn, 28, 4) == 0xf) {
3259 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3260 * only used in v8 and above.
3261 */
7dcc1f89 3262 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3263 }
3264
b7bcbe95
FB
3265 dp = ((insn & 0xf00) == 0xb00);
3266 switch ((insn >> 24) & 0xf) {
3267 case 0xe:
3268 if (insn & (1 << 4)) {
3269 /* single register transfer */
b7bcbe95
FB
3270 rd = (insn >> 12) & 0xf;
3271 if (dp) {
9ee6e8bb
PB
3272 int size;
3273 int pass;
3274
3275 VFP_DREG_N(rn, insn);
3276 if (insn & 0xf)
b7bcbe95 3277 return 1;
9ee6e8bb 3278 if (insn & 0x00c00060
d614a513 3279 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3280 return 1;
d614a513 3281 }
9ee6e8bb
PB
3282
3283 pass = (insn >> 21) & 1;
3284 if (insn & (1 << 22)) {
3285 size = 0;
3286 offset = ((insn >> 5) & 3) * 8;
3287 } else if (insn & (1 << 5)) {
3288 size = 1;
3289 offset = (insn & (1 << 6)) ? 16 : 0;
3290 } else {
3291 size = 2;
3292 offset = 0;
3293 }
18c9b560 3294 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3295 /* vfp->arm */
ad69471c 3296 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3297 switch (size) {
3298 case 0:
9ee6e8bb 3299 if (offset)
ad69471c 3300 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3301 if (insn & (1 << 23))
ad69471c 3302 gen_uxtb(tmp);
9ee6e8bb 3303 else
ad69471c 3304 gen_sxtb(tmp);
9ee6e8bb
PB
3305 break;
3306 case 1:
9ee6e8bb
PB
3307 if (insn & (1 << 23)) {
3308 if (offset) {
ad69471c 3309 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3310 } else {
ad69471c 3311 gen_uxth(tmp);
9ee6e8bb
PB
3312 }
3313 } else {
3314 if (offset) {
ad69471c 3315 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3316 } else {
ad69471c 3317 gen_sxth(tmp);
9ee6e8bb
PB
3318 }
3319 }
3320 break;
3321 case 2:
9ee6e8bb
PB
3322 break;
3323 }
ad69471c 3324 store_reg(s, rd, tmp);
b7bcbe95
FB
3325 } else {
3326 /* arm->vfp */
ad69471c 3327 tmp = load_reg(s, rd);
9ee6e8bb
PB
3328 if (insn & (1 << 23)) {
3329 /* VDUP */
3330 if (size == 0) {
ad69471c 3331 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3332 } else if (size == 1) {
ad69471c 3333 gen_neon_dup_low16(tmp);
9ee6e8bb 3334 }
cbbccffc 3335 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3336 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3337 tcg_gen_mov_i32(tmp2, tmp);
3338 neon_store_reg(rn, n, tmp2);
3339 }
3340 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3341 } else {
3342 /* VMOV */
3343 switch (size) {
3344 case 0:
ad69471c 3345 tmp2 = neon_load_reg(rn, pass);
d593c48e 3346 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3347 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3348 break;
3349 case 1:
ad69471c 3350 tmp2 = neon_load_reg(rn, pass);
d593c48e 3351 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3352 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3353 break;
3354 case 2:
9ee6e8bb
PB
3355 break;
3356 }
ad69471c 3357 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3358 }
b7bcbe95 3359 }
9ee6e8bb
PB
3360 } else { /* !dp */
3361 if ((insn & 0x6f) != 0x00)
3362 return 1;
3363 rn = VFP_SREG_N(insn);
18c9b560 3364 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3365 /* vfp->arm */
3366 if (insn & (1 << 21)) {
3367 /* system register */
40f137e1 3368 rn >>= 1;
9ee6e8bb 3369
b7bcbe95 3370 switch (rn) {
40f137e1 3371 case ARM_VFP_FPSID:
4373f3ce 3372 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3373 VFP3 restricts all id registers to privileged
3374 accesses. */
3375 if (IS_USER(s)
d614a513 3376 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3377 return 1;
d614a513 3378 }
4373f3ce 3379 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3380 break;
40f137e1 3381 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3382 if (IS_USER(s))
3383 return 1;
4373f3ce 3384 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3385 break;
40f137e1
PB
3386 case ARM_VFP_FPINST:
3387 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3388 /* Not present in VFP3. */
3389 if (IS_USER(s)
d614a513 3390 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3391 return 1;
d614a513 3392 }
4373f3ce 3393 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3394 break;
40f137e1 3395 case ARM_VFP_FPSCR:
601d70b9 3396 if (rd == 15) {
4373f3ce
PB
3397 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3398 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3399 } else {
7d1b0095 3400 tmp = tcg_temp_new_i32();
4373f3ce
PB
3401 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3402 }
b7bcbe95 3403 break;
a50c0f51 3404 case ARM_VFP_MVFR2:
d614a513 3405 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3406 return 1;
3407 }
3408 /* fall through */
9ee6e8bb
PB
3409 case ARM_VFP_MVFR0:
3410 case ARM_VFP_MVFR1:
3411 if (IS_USER(s)
d614a513 3412 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3413 return 1;
d614a513 3414 }
4373f3ce 3415 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3416 break;
b7bcbe95
FB
3417 default:
3418 return 1;
3419 }
3420 } else {
3421 gen_mov_F0_vreg(0, rn);
4373f3ce 3422 tmp = gen_vfp_mrs();
b7bcbe95
FB
3423 }
3424 if (rd == 15) {
b5ff1b31 3425 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3426 gen_set_nzcv(tmp);
7d1b0095 3427 tcg_temp_free_i32(tmp);
4373f3ce
PB
3428 } else {
3429 store_reg(s, rd, tmp);
3430 }
b7bcbe95
FB
3431 } else {
3432 /* arm->vfp */
b7bcbe95 3433 if (insn & (1 << 21)) {
40f137e1 3434 rn >>= 1;
b7bcbe95
FB
3435 /* system register */
3436 switch (rn) {
40f137e1 3437 case ARM_VFP_FPSID:
9ee6e8bb
PB
3438 case ARM_VFP_MVFR0:
3439 case ARM_VFP_MVFR1:
b7bcbe95
FB
3440 /* Writes are ignored. */
3441 break;
40f137e1 3442 case ARM_VFP_FPSCR:
e4c1cfa5 3443 tmp = load_reg(s, rd);
4373f3ce 3444 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3445 tcg_temp_free_i32(tmp);
b5ff1b31 3446 gen_lookup_tb(s);
b7bcbe95 3447 break;
40f137e1 3448 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3449 if (IS_USER(s))
3450 return 1;
71b3c3de
JR
3451 /* TODO: VFP subarchitecture support.
3452 * For now, keep the EN bit only */
e4c1cfa5 3453 tmp = load_reg(s, rd);
71b3c3de 3454 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3455 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3456 gen_lookup_tb(s);
3457 break;
3458 case ARM_VFP_FPINST:
3459 case ARM_VFP_FPINST2:
23adb861
PM
3460 if (IS_USER(s)) {
3461 return 1;
3462 }
e4c1cfa5 3463 tmp = load_reg(s, rd);
4373f3ce 3464 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3465 break;
b7bcbe95
FB
3466 default:
3467 return 1;
3468 }
3469 } else {
e4c1cfa5 3470 tmp = load_reg(s, rd);
4373f3ce 3471 gen_vfp_msr(tmp);
b7bcbe95
FB
3472 gen_mov_vreg_F0(0, rn);
3473 }
3474 }
3475 }
3476 } else {
3477 /* data processing */
3478 /* The opcode is in bits 23, 21, 20 and 6. */
3479 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3480 if (dp) {
3481 if (op == 15) {
3482 /* rn is opcode */
3483 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3484 } else {
3485 /* rn is register number */
9ee6e8bb 3486 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3487 }
3488
239c20c7
WN
3489 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3490 ((rn & 0x1e) == 0x6))) {
3491 /* Integer or single/half precision destination. */
9ee6e8bb 3492 rd = VFP_SREG_D(insn);
b7bcbe95 3493 } else {
9ee6e8bb 3494 VFP_DREG_D(rd, insn);
b7bcbe95 3495 }
04595bf6 3496 if (op == 15 &&
239c20c7
WN
3497 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3498 ((rn & 0x1e) == 0x4))) {
3499 /* VCVT from int or half precision is always from S reg
3500 * regardless of dp bit. VCVT with immediate frac_bits
3501 * has same format as SREG_M.
04595bf6
PM
3502 */
3503 rm = VFP_SREG_M(insn);
b7bcbe95 3504 } else {
9ee6e8bb 3505 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3506 }
3507 } else {
9ee6e8bb 3508 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3509 if (op == 15 && rn == 15) {
3510 /* Double precision destination. */
9ee6e8bb
PB
3511 VFP_DREG_D(rd, insn);
3512 } else {
3513 rd = VFP_SREG_D(insn);
3514 }
04595bf6
PM
3515 /* NB that we implicitly rely on the encoding for the frac_bits
3516 * in VCVT of fixed to float being the same as that of an SREG_M
3517 */
9ee6e8bb 3518 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3519 }
3520
69d1fc22 3521 veclen = s->vec_len;
b7bcbe95
FB
3522 if (op == 15 && rn > 3)
3523 veclen = 0;
3524
3525 /* Shut up compiler warnings. */
3526 delta_m = 0;
3527 delta_d = 0;
3528 bank_mask = 0;
3b46e624 3529
b7bcbe95
FB
3530 if (veclen > 0) {
3531 if (dp)
3532 bank_mask = 0xc;
3533 else
3534 bank_mask = 0x18;
3535
3536 /* Figure out what type of vector operation this is. */
3537 if ((rd & bank_mask) == 0) {
3538 /* scalar */
3539 veclen = 0;
3540 } else {
3541 if (dp)
69d1fc22 3542 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3543 else
69d1fc22 3544 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3545
3546 if ((rm & bank_mask) == 0) {
3547 /* mixed scalar/vector */
3548 delta_m = 0;
3549 } else {
3550 /* vector */
3551 delta_m = delta_d;
3552 }
3553 }
3554 }
3555
3556 /* Load the initial operands. */
3557 if (op == 15) {
3558 switch (rn) {
3559 case 16:
3560 case 17:
3561 /* Integer source */
3562 gen_mov_F0_vreg(0, rm);
3563 break;
3564 case 8:
3565 case 9:
3566 /* Compare */
3567 gen_mov_F0_vreg(dp, rd);
3568 gen_mov_F1_vreg(dp, rm);
3569 break;
3570 case 10:
3571 case 11:
3572 /* Compare with zero */
3573 gen_mov_F0_vreg(dp, rd);
3574 gen_vfp_F1_ld0(dp);
3575 break;
9ee6e8bb
PB
3576 case 20:
3577 case 21:
3578 case 22:
3579 case 23:
644ad806
PB
3580 case 28:
3581 case 29:
3582 case 30:
3583 case 31:
9ee6e8bb
PB
3584 /* Source and destination the same. */
3585 gen_mov_F0_vreg(dp, rd);
3586 break;
6e0c0ed1
PM
3587 case 4:
3588 case 5:
3589 case 6:
3590 case 7:
239c20c7
WN
3591 /* VCVTB, VCVTT: only present with the halfprec extension
3592 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3593 * (we choose to UNDEF)
6e0c0ed1 3594 */
d614a513
PM
3595 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3596 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3597 return 1;
3598 }
239c20c7
WN
3599 if (!extract32(rn, 1, 1)) {
3600 /* Half precision source. */
3601 gen_mov_F0_vreg(0, rm);
3602 break;
3603 }
6e0c0ed1 3604 /* Otherwise fall through */
b7bcbe95
FB
3605 default:
3606 /* One source operand. */
3607 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3608 break;
b7bcbe95
FB
3609 }
3610 } else {
3611 /* Two source operands. */
3612 gen_mov_F0_vreg(dp, rn);
3613 gen_mov_F1_vreg(dp, rm);
3614 }
3615
3616 for (;;) {
3617 /* Perform the calculation. */
3618 switch (op) {
605a6aed
PM
3619 case 0: /* VMLA: fd + (fn * fm) */
3620 /* Note that order of inputs to the add matters for NaNs */
3621 gen_vfp_F1_mul(dp);
3622 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3623 gen_vfp_add(dp);
3624 break;
605a6aed 3625 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3626 gen_vfp_mul(dp);
605a6aed
PM
3627 gen_vfp_F1_neg(dp);
3628 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3629 gen_vfp_add(dp);
3630 break;
605a6aed
PM
3631 case 2: /* VNMLS: -fd + (fn * fm) */
3632 /* Note that it isn't valid to replace (-A + B) with (B - A)
3633 * or similar plausible looking simplifications
3634 * because this will give wrong results for NaNs.
3635 */
3636 gen_vfp_F1_mul(dp);
3637 gen_mov_F0_vreg(dp, rd);
3638 gen_vfp_neg(dp);
3639 gen_vfp_add(dp);
b7bcbe95 3640 break;
605a6aed 3641 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3642 gen_vfp_mul(dp);
605a6aed
PM
3643 gen_vfp_F1_neg(dp);
3644 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3645 gen_vfp_neg(dp);
605a6aed 3646 gen_vfp_add(dp);
b7bcbe95
FB
3647 break;
3648 case 4: /* mul: fn * fm */
3649 gen_vfp_mul(dp);
3650 break;
3651 case 5: /* nmul: -(fn * fm) */
3652 gen_vfp_mul(dp);
3653 gen_vfp_neg(dp);
3654 break;
3655 case 6: /* add: fn + fm */
3656 gen_vfp_add(dp);
3657 break;
3658 case 7: /* sub: fn - fm */
3659 gen_vfp_sub(dp);
3660 break;
3661 case 8: /* div: fn / fm */
3662 gen_vfp_div(dp);
3663 break;
da97f52c
PM
3664 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3665 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3666 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3667 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3668 /* These are fused multiply-add, and must be done as one
3669 * floating point operation with no rounding between the
3670 * multiplication and addition steps.
3671 * NB that doing the negations here as separate steps is
3672 * correct : an input NaN should come out with its sign bit
3673 * flipped if it is a negated-input.
3674 */
d614a513 3675 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3676 return 1;
3677 }
3678 if (dp) {
3679 TCGv_ptr fpst;
3680 TCGv_i64 frd;
3681 if (op & 1) {
3682 /* VFNMS, VFMS */
3683 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3684 }
3685 frd = tcg_temp_new_i64();
3686 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3687 if (op & 2) {
3688 /* VFNMA, VFNMS */
3689 gen_helper_vfp_negd(frd, frd);
3690 }
3691 fpst = get_fpstatus_ptr(0);
3692 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3693 cpu_F1d, frd, fpst);
3694 tcg_temp_free_ptr(fpst);
3695 tcg_temp_free_i64(frd);
3696 } else {
3697 TCGv_ptr fpst;
3698 TCGv_i32 frd;
3699 if (op & 1) {
3700 /* VFNMS, VFMS */
3701 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3702 }
3703 frd = tcg_temp_new_i32();
3704 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3705 if (op & 2) {
3706 gen_helper_vfp_negs(frd, frd);
3707 }
3708 fpst = get_fpstatus_ptr(0);
3709 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3710 cpu_F1s, frd, fpst);
3711 tcg_temp_free_ptr(fpst);
3712 tcg_temp_free_i32(frd);
3713 }
3714 break;
9ee6e8bb 3715 case 14: /* fconst */
d614a513
PM
3716 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3717 return 1;
3718 }
9ee6e8bb
PB
3719
3720 n = (insn << 12) & 0x80000000;
3721 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3722 if (dp) {
3723 if (i & 0x40)
3724 i |= 0x3f80;
3725 else
3726 i |= 0x4000;
3727 n |= i << 16;
4373f3ce 3728 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3729 } else {
3730 if (i & 0x40)
3731 i |= 0x780;
3732 else
3733 i |= 0x800;
3734 n |= i << 19;
5b340b51 3735 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3736 }
9ee6e8bb 3737 break;
b7bcbe95
FB
3738 case 15: /* extension space */
3739 switch (rn) {
3740 case 0: /* cpy */
3741 /* no-op */
3742 break;
3743 case 1: /* abs */
3744 gen_vfp_abs(dp);
3745 break;
3746 case 2: /* neg */
3747 gen_vfp_neg(dp);
3748 break;
3749 case 3: /* sqrt */
3750 gen_vfp_sqrt(dp);
3751 break;
239c20c7 3752 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3753 tmp = gen_vfp_mrs();
3754 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3755 if (dp) {
3756 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3757 cpu_env);
3758 } else {
3759 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3760 cpu_env);
3761 }
7d1b0095 3762 tcg_temp_free_i32(tmp);
60011498 3763 break;
239c20c7 3764 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3765 tmp = gen_vfp_mrs();
3766 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3767 if (dp) {
3768 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3769 cpu_env);
3770 } else {
3771 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3772 cpu_env);
3773 }
7d1b0095 3774 tcg_temp_free_i32(tmp);
60011498 3775 break;
239c20c7 3776 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3777 tmp = tcg_temp_new_i32();
239c20c7
WN
3778 if (dp) {
3779 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3780 cpu_env);
3781 } else {
3782 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3783 cpu_env);
3784 }
60011498
PB
3785 gen_mov_F0_vreg(0, rd);
3786 tmp2 = gen_vfp_mrs();
3787 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3788 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3789 tcg_temp_free_i32(tmp2);
60011498
PB
3790 gen_vfp_msr(tmp);
3791 break;
239c20c7 3792 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3793 tmp = tcg_temp_new_i32();
239c20c7
WN
3794 if (dp) {
3795 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3796 cpu_env);
3797 } else {
3798 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3799 cpu_env);
3800 }
60011498
PB
3801 tcg_gen_shli_i32(tmp, tmp, 16);
3802 gen_mov_F0_vreg(0, rd);
3803 tmp2 = gen_vfp_mrs();
3804 tcg_gen_ext16u_i32(tmp2, tmp2);
3805 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3806 tcg_temp_free_i32(tmp2);
60011498
PB
3807 gen_vfp_msr(tmp);
3808 break;
b7bcbe95
FB
3809 case 8: /* cmp */
3810 gen_vfp_cmp(dp);
3811 break;
3812 case 9: /* cmpe */
3813 gen_vfp_cmpe(dp);
3814 break;
3815 case 10: /* cmpz */
3816 gen_vfp_cmp(dp);
3817 break;
3818 case 11: /* cmpez */
3819 gen_vfp_F1_ld0(dp);
3820 gen_vfp_cmpe(dp);
3821 break;
664c6733
WN
3822 case 12: /* vrintr */
3823 {
3824 TCGv_ptr fpst = get_fpstatus_ptr(0);
3825 if (dp) {
3826 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3827 } else {
3828 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3829 }
3830 tcg_temp_free_ptr(fpst);
3831 break;
3832 }
a290c62a
WN
3833 case 13: /* vrintz */
3834 {
3835 TCGv_ptr fpst = get_fpstatus_ptr(0);
3836 TCGv_i32 tcg_rmode;
3837 tcg_rmode = tcg_const_i32(float_round_to_zero);
3838 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3839 if (dp) {
3840 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3841 } else {
3842 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3843 }
3844 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3845 tcg_temp_free_i32(tcg_rmode);
3846 tcg_temp_free_ptr(fpst);
3847 break;
3848 }
4e82bc01
WN
3849 case 14: /* vrintx */
3850 {
3851 TCGv_ptr fpst = get_fpstatus_ptr(0);
3852 if (dp) {
3853 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3854 } else {
3855 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3856 }
3857 tcg_temp_free_ptr(fpst);
3858 break;
3859 }
b7bcbe95
FB
3860 case 15: /* single<->double conversion */
3861 if (dp)
4373f3ce 3862 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3863 else
4373f3ce 3864 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3865 break;
3866 case 16: /* fuito */
5500b06c 3867 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3868 break;
3869 case 17: /* fsito */
5500b06c 3870 gen_vfp_sito(dp, 0);
b7bcbe95 3871 break;
9ee6e8bb 3872 case 20: /* fshto */
d614a513
PM
3873 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3874 return 1;
3875 }
5500b06c 3876 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3877 break;
3878 case 21: /* fslto */
d614a513
PM
3879 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3880 return 1;
3881 }
5500b06c 3882 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3883 break;
3884 case 22: /* fuhto */
d614a513
PM
3885 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3886 return 1;
3887 }
5500b06c 3888 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3889 break;
3890 case 23: /* fulto */
d614a513
PM
3891 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3892 return 1;
3893 }
5500b06c 3894 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3895 break;
b7bcbe95 3896 case 24: /* ftoui */
5500b06c 3897 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3898 break;
3899 case 25: /* ftouiz */
5500b06c 3900 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3901 break;
3902 case 26: /* ftosi */
5500b06c 3903 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3904 break;
3905 case 27: /* ftosiz */
5500b06c 3906 gen_vfp_tosiz(dp, 0);
b7bcbe95 3907 break;
9ee6e8bb 3908 case 28: /* ftosh */
d614a513
PM
3909 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3910 return 1;
3911 }
5500b06c 3912 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3913 break;
3914 case 29: /* ftosl */
d614a513
PM
3915 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3916 return 1;
3917 }
5500b06c 3918 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3919 break;
3920 case 30: /* ftouh */
d614a513
PM
3921 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3922 return 1;
3923 }
5500b06c 3924 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3925 break;
3926 case 31: /* ftoul */
d614a513
PM
3927 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3928 return 1;
3929 }
5500b06c 3930 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3931 break;
b7bcbe95 3932 default: /* undefined */
b7bcbe95
FB
3933 return 1;
3934 }
3935 break;
3936 default: /* undefined */
b7bcbe95
FB
3937 return 1;
3938 }
3939
3940 /* Write back the result. */
239c20c7
WN
3941 if (op == 15 && (rn >= 8 && rn <= 11)) {
3942 /* Comparison, do nothing. */
3943 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3944 (rn & 0x1e) == 0x6)) {
3945 /* VCVT double to int: always integer result.
3946 * VCVT double to half precision is always a single
3947 * precision result.
3948 */
b7bcbe95 3949 gen_mov_vreg_F0(0, rd);
239c20c7 3950 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3951 /* conversion */
3952 gen_mov_vreg_F0(!dp, rd);
239c20c7 3953 } else {
b7bcbe95 3954 gen_mov_vreg_F0(dp, rd);
239c20c7 3955 }
b7bcbe95
FB
3956
3957 /* break out of the loop if we have finished */
3958 if (veclen == 0)
3959 break;
3960
3961 if (op == 15 && delta_m == 0) {
3962 /* single source one-many */
3963 while (veclen--) {
3964 rd = ((rd + delta_d) & (bank_mask - 1))
3965 | (rd & bank_mask);
3966 gen_mov_vreg_F0(dp, rd);
3967 }
3968 break;
3969 }
3970 /* Setup the next operands. */
3971 veclen--;
3972 rd = ((rd + delta_d) & (bank_mask - 1))
3973 | (rd & bank_mask);
3974
3975 if (op == 15) {
3976 /* One source operand. */
3977 rm = ((rm + delta_m) & (bank_mask - 1))
3978 | (rm & bank_mask);
3979 gen_mov_F0_vreg(dp, rm);
3980 } else {
3981 /* Two source operands. */
3982 rn = ((rn + delta_d) & (bank_mask - 1))
3983 | (rn & bank_mask);
3984 gen_mov_F0_vreg(dp, rn);
3985 if (delta_m) {
3986 rm = ((rm + delta_m) & (bank_mask - 1))
3987 | (rm & bank_mask);
3988 gen_mov_F1_vreg(dp, rm);
3989 }
3990 }
3991 }
3992 }
3993 break;
3994 case 0xc:
3995 case 0xd:
8387da81 3996 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3997 /* two-register transfer */
3998 rn = (insn >> 16) & 0xf;
3999 rd = (insn >> 12) & 0xf;
4000 if (dp) {
9ee6e8bb
PB
4001 VFP_DREG_M(rm, insn);
4002 } else {
4003 rm = VFP_SREG_M(insn);
4004 }
b7bcbe95 4005
18c9b560 4006 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
4007 /* vfp->arm */
4008 if (dp) {
4373f3ce
PB
4009 gen_mov_F0_vreg(0, rm * 2);
4010 tmp = gen_vfp_mrs();
4011 store_reg(s, rd, tmp);
4012 gen_mov_F0_vreg(0, rm * 2 + 1);
4013 tmp = gen_vfp_mrs();
4014 store_reg(s, rn, tmp);
b7bcbe95
FB
4015 } else {
4016 gen_mov_F0_vreg(0, rm);
4373f3ce 4017 tmp = gen_vfp_mrs();
8387da81 4018 store_reg(s, rd, tmp);
b7bcbe95 4019 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 4020 tmp = gen_vfp_mrs();
8387da81 4021 store_reg(s, rn, tmp);
b7bcbe95
FB
4022 }
4023 } else {
4024 /* arm->vfp */
4025 if (dp) {
4373f3ce
PB
4026 tmp = load_reg(s, rd);
4027 gen_vfp_msr(tmp);
4028 gen_mov_vreg_F0(0, rm * 2);
4029 tmp = load_reg(s, rn);
4030 gen_vfp_msr(tmp);
4031 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 4032 } else {
8387da81 4033 tmp = load_reg(s, rd);
4373f3ce 4034 gen_vfp_msr(tmp);
b7bcbe95 4035 gen_mov_vreg_F0(0, rm);
8387da81 4036 tmp = load_reg(s, rn);
4373f3ce 4037 gen_vfp_msr(tmp);
b7bcbe95
FB
4038 gen_mov_vreg_F0(0, rm + 1);
4039 }
4040 }
4041 } else {
4042 /* Load/store */
4043 rn = (insn >> 16) & 0xf;
4044 if (dp)
9ee6e8bb 4045 VFP_DREG_D(rd, insn);
b7bcbe95 4046 else
9ee6e8bb 4047 rd = VFP_SREG_D(insn);
b7bcbe95
FB
4048 if ((insn & 0x01200000) == 0x01000000) {
4049 /* Single load/store */
4050 offset = (insn & 0xff) << 2;
4051 if ((insn & (1 << 23)) == 0)
4052 offset = -offset;
934814f1
PM
4053 if (s->thumb && rn == 15) {
4054 /* This is actually UNPREDICTABLE */
4055 addr = tcg_temp_new_i32();
4056 tcg_gen_movi_i32(addr, s->pc & ~2);
4057 } else {
4058 addr = load_reg(s, rn);
4059 }
312eea9f 4060 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4061 if (insn & (1 << 20)) {
312eea9f 4062 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4063 gen_mov_vreg_F0(dp, rd);
4064 } else {
4065 gen_mov_F0_vreg(dp, rd);
312eea9f 4066 gen_vfp_st(s, dp, addr);
b7bcbe95 4067 }
7d1b0095 4068 tcg_temp_free_i32(addr);
b7bcbe95
FB
4069 } else {
4070 /* load/store multiple */
934814f1 4071 int w = insn & (1 << 21);
b7bcbe95
FB
4072 if (dp)
4073 n = (insn >> 1) & 0x7f;
4074 else
4075 n = insn & 0xff;
4076
934814f1
PM
4077 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4078 /* P == U , W == 1 => UNDEF */
4079 return 1;
4080 }
4081 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4082 /* UNPREDICTABLE cases for bad immediates: we choose to
4083 * UNDEF to avoid generating huge numbers of TCG ops
4084 */
4085 return 1;
4086 }
4087 if (rn == 15 && w) {
4088 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4089 return 1;
4090 }
4091
4092 if (s->thumb && rn == 15) {
4093 /* This is actually UNPREDICTABLE */
4094 addr = tcg_temp_new_i32();
4095 tcg_gen_movi_i32(addr, s->pc & ~2);
4096 } else {
4097 addr = load_reg(s, rn);
4098 }
b7bcbe95 4099 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4100 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4101
4102 if (dp)
4103 offset = 8;
4104 else
4105 offset = 4;
4106 for (i = 0; i < n; i++) {
18c9b560 4107 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4108 /* load */
312eea9f 4109 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4110 gen_mov_vreg_F0(dp, rd + i);
4111 } else {
4112 /* store */
4113 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4114 gen_vfp_st(s, dp, addr);
b7bcbe95 4115 }
312eea9f 4116 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4117 }
934814f1 4118 if (w) {
b7bcbe95
FB
4119 /* writeback */
4120 if (insn & (1 << 24))
4121 offset = -offset * n;
4122 else if (dp && (insn & 1))
4123 offset = 4;
4124 else
4125 offset = 0;
4126
4127 if (offset != 0)
312eea9f
FN
4128 tcg_gen_addi_i32(addr, addr, offset);
4129 store_reg(s, rn, addr);
4130 } else {
7d1b0095 4131 tcg_temp_free_i32(addr);
b7bcbe95
FB
4132 }
4133 }
4134 }
4135 break;
4136 default:
4137 /* Should never happen. */
4138 return 1;
4139 }
4140 return 0;
4141}
4142
90aa39a1 4143static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4144{
90aa39a1
SF
4145#ifndef CONFIG_USER_ONLY
4146 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4147 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4148#else
4149 return true;
4150#endif
4151}
6e256c93 4152
8a6b28c7
EC
4153static void gen_goto_ptr(void)
4154{
4155 TCGv addr = tcg_temp_new();
4156 tcg_gen_extu_i32_tl(addr, cpu_R[15]);
4157 tcg_gen_lookup_and_goto_ptr(addr);
4158 tcg_temp_free(addr);
4159}
4160
4161static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
90aa39a1
SF
4162{
4163 if (use_goto_tb(s, dest)) {
57fec1fe 4164 tcg_gen_goto_tb(n);
eaed129d 4165 gen_set_pc_im(s, dest);
90aa39a1 4166 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4167 } else {
eaed129d 4168 gen_set_pc_im(s, dest);
8a6b28c7 4169 gen_goto_ptr();
6e256c93 4170 }
c53be334
FB
4171}
4172
8aaca4c0
FB
4173static inline void gen_jmp (DisasContext *s, uint32_t dest)
4174{
b636649f 4175 if (unlikely(is_singlestepping(s))) {
8aaca4c0 4176 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4177 if (s->thumb)
d9ba4830
PB
4178 dest |= 1;
4179 gen_bx_im(s, dest);
8aaca4c0 4180 } else {
6e256c93 4181 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4182 s->is_jmp = DISAS_TB_JUMP;
4183 }
4184}
4185
39d5492a 4186static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4187{
ee097184 4188 if (x)
d9ba4830 4189 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4190 else
d9ba4830 4191 gen_sxth(t0);
ee097184 4192 if (y)
d9ba4830 4193 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4194 else
d9ba4830
PB
4195 gen_sxth(t1);
4196 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4197}
4198
4199/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4200static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4201{
b5ff1b31
FB
4202 uint32_t mask;
4203
4204 mask = 0;
4205 if (flags & (1 << 0))
4206 mask |= 0xff;
4207 if (flags & (1 << 1))
4208 mask |= 0xff00;
4209 if (flags & (1 << 2))
4210 mask |= 0xff0000;
4211 if (flags & (1 << 3))
4212 mask |= 0xff000000;
9ee6e8bb 4213
2ae23e75 4214 /* Mask out undefined bits. */
9ee6e8bb 4215 mask &= ~CPSR_RESERVED;
d614a513 4216 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4217 mask &= ~CPSR_T;
d614a513
PM
4218 }
4219 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4220 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4221 }
4222 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4223 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4224 }
4225 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4226 mask &= ~CPSR_IT;
d614a513 4227 }
4051e12c
PM
4228 /* Mask out execution state and reserved bits. */
4229 if (!spsr) {
4230 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4231 }
b5ff1b31
FB
4232 /* Mask out privileged bits. */
4233 if (IS_USER(s))
9ee6e8bb 4234 mask &= CPSR_USER;
b5ff1b31
FB
4235 return mask;
4236}
4237
2fbac54b 4238/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4239static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4240{
39d5492a 4241 TCGv_i32 tmp;
b5ff1b31
FB
4242 if (spsr) {
4243 /* ??? This is also undefined in system mode. */
4244 if (IS_USER(s))
4245 return 1;
d9ba4830
PB
4246
4247 tmp = load_cpu_field(spsr);
4248 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4249 tcg_gen_andi_i32(t0, t0, mask);
4250 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4251 store_cpu_field(tmp, spsr);
b5ff1b31 4252 } else {
2fbac54b 4253 gen_set_cpsr(t0, mask);
b5ff1b31 4254 }
7d1b0095 4255 tcg_temp_free_i32(t0);
b5ff1b31
FB
4256 gen_lookup_tb(s);
4257 return 0;
4258}
4259
2fbac54b
FN
4260/* Returns nonzero if access to the PSR is not permitted. */
4261static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4262{
39d5492a 4263 TCGv_i32 tmp;
7d1b0095 4264 tmp = tcg_temp_new_i32();
2fbac54b
FN
4265 tcg_gen_movi_i32(tmp, val);
4266 return gen_set_psr(s, mask, spsr, tmp);
4267}
4268
8bfd0550
PM
4269static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4270 int *tgtmode, int *regno)
4271{
4272 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4273 * the target mode and register number, and identify the various
4274 * unpredictable cases.
4275 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4276 * + executed in user mode
4277 * + using R15 as the src/dest register
4278 * + accessing an unimplemented register
4279 * + accessing a register that's inaccessible at current PL/security state*
4280 * + accessing a register that you could access with a different insn
4281 * We choose to UNDEF in all these cases.
4282 * Since we don't know which of the various AArch32 modes we are in
4283 * we have to defer some checks to runtime.
4284 * Accesses to Monitor mode registers from Secure EL1 (which implies
4285 * that EL3 is AArch64) must trap to EL3.
4286 *
4287 * If the access checks fail this function will emit code to take
4288 * an exception and return false. Otherwise it will return true,
4289 * and set *tgtmode and *regno appropriately.
4290 */
4291 int exc_target = default_exception_el(s);
4292
4293 /* These instructions are present only in ARMv8, or in ARMv7 with the
4294 * Virtualization Extensions.
4295 */
4296 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4297 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4298 goto undef;
4299 }
4300
4301 if (IS_USER(s) || rn == 15) {
4302 goto undef;
4303 }
4304
4305 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4306 * of registers into (r, sysm).
4307 */
4308 if (r) {
4309 /* SPSRs for other modes */
4310 switch (sysm) {
4311 case 0xe: /* SPSR_fiq */
4312 *tgtmode = ARM_CPU_MODE_FIQ;
4313 break;
4314 case 0x10: /* SPSR_irq */
4315 *tgtmode = ARM_CPU_MODE_IRQ;
4316 break;
4317 case 0x12: /* SPSR_svc */
4318 *tgtmode = ARM_CPU_MODE_SVC;
4319 break;
4320 case 0x14: /* SPSR_abt */
4321 *tgtmode = ARM_CPU_MODE_ABT;
4322 break;
4323 case 0x16: /* SPSR_und */
4324 *tgtmode = ARM_CPU_MODE_UND;
4325 break;
4326 case 0x1c: /* SPSR_mon */
4327 *tgtmode = ARM_CPU_MODE_MON;
4328 break;
4329 case 0x1e: /* SPSR_hyp */
4330 *tgtmode = ARM_CPU_MODE_HYP;
4331 break;
4332 default: /* unallocated */
4333 goto undef;
4334 }
4335 /* We arbitrarily assign SPSR a register number of 16. */
4336 *regno = 16;
4337 } else {
4338 /* general purpose registers for other modes */
4339 switch (sysm) {
4340 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4341 *tgtmode = ARM_CPU_MODE_USR;
4342 *regno = sysm + 8;
4343 break;
4344 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4345 *tgtmode = ARM_CPU_MODE_FIQ;
4346 *regno = sysm;
4347 break;
4348 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4349 *tgtmode = ARM_CPU_MODE_IRQ;
4350 *regno = sysm & 1 ? 13 : 14;
4351 break;
4352 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4353 *tgtmode = ARM_CPU_MODE_SVC;
4354 *regno = sysm & 1 ? 13 : 14;
4355 break;
4356 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4357 *tgtmode = ARM_CPU_MODE_ABT;
4358 *regno = sysm & 1 ? 13 : 14;
4359 break;
4360 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4361 *tgtmode = ARM_CPU_MODE_UND;
4362 *regno = sysm & 1 ? 13 : 14;
4363 break;
4364 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4365 *tgtmode = ARM_CPU_MODE_MON;
4366 *regno = sysm & 1 ? 13 : 14;
4367 break;
4368 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4369 *tgtmode = ARM_CPU_MODE_HYP;
4370 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4371 *regno = sysm & 1 ? 13 : 17;
4372 break;
4373 default: /* unallocated */
4374 goto undef;
4375 }
4376 }
4377
4378 /* Catch the 'accessing inaccessible register' cases we can detect
4379 * at translate time.
4380 */
4381 switch (*tgtmode) {
4382 case ARM_CPU_MODE_MON:
4383 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4384 goto undef;
4385 }
4386 if (s->current_el == 1) {
4387 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4388 * then accesses to Mon registers trap to EL3
4389 */
4390 exc_target = 3;
4391 goto undef;
4392 }
4393 break;
4394 case ARM_CPU_MODE_HYP:
4395 /* Note that we can forbid accesses from EL2 here because they
4396 * must be from Hyp mode itself
4397 */
4398 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4399 goto undef;
4400 }
4401 break;
4402 default:
4403 break;
4404 }
4405
4406 return true;
4407
4408undef:
4409 /* If we get here then some access check did not pass */
4410 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4411 return false;
4412}
4413
4414static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4415{
4416 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4417 int tgtmode = 0, regno = 0;
4418
4419 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4420 return;
4421 }
4422
4423 /* Sync state because msr_banked() can raise exceptions */
4424 gen_set_condexec(s);
4425 gen_set_pc_im(s, s->pc - 4);
4426 tcg_reg = load_reg(s, rn);
4427 tcg_tgtmode = tcg_const_i32(tgtmode);
4428 tcg_regno = tcg_const_i32(regno);
4429 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4430 tcg_temp_free_i32(tcg_tgtmode);
4431 tcg_temp_free_i32(tcg_regno);
4432 tcg_temp_free_i32(tcg_reg);
4433 s->is_jmp = DISAS_UPDATE;
4434}
4435
4436static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4437{
4438 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4439 int tgtmode = 0, regno = 0;
4440
4441 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4442 return;
4443 }
4444
4445 /* Sync state because mrs_banked() can raise exceptions */
4446 gen_set_condexec(s);
4447 gen_set_pc_im(s, s->pc - 4);
4448 tcg_reg = tcg_temp_new_i32();
4449 tcg_tgtmode = tcg_const_i32(tgtmode);
4450 tcg_regno = tcg_const_i32(regno);
4451 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4452 tcg_temp_free_i32(tcg_tgtmode);
4453 tcg_temp_free_i32(tcg_regno);
4454 store_reg(s, rn, tcg_reg);
4455 s->is_jmp = DISAS_UPDATE;
4456}
4457
fb0e8e79
PM
4458/* Store value to PC as for an exception return (ie don't
4459 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4460 * will do the masking based on the new value of the Thumb bit.
4461 */
4462static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4463{
fb0e8e79
PM
4464 tcg_gen_mov_i32(cpu_R[15], pc);
4465 tcg_temp_free_i32(pc);
b5ff1b31
FB
4466}
4467
b0109805 4468/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4469static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4470{
fb0e8e79
PM
4471 store_pc_exc_ret(s, pc);
4472 /* The cpsr_write_eret helper will mask the low bits of PC
4473 * appropriately depending on the new Thumb bit, so it must
4474 * be called after storing the new PC.
4475 */
235ea1f5 4476 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4477 tcg_temp_free_i32(cpsr);
577bf808 4478 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4479}
3b46e624 4480
fb0e8e79
PM
4481/* Generate an old-style exception return. Marks pc as dead. */
4482static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4483{
4484 gen_rfe(s, pc, load_cpu_field(spsr));
4485}
4486
c22edfeb
AB
4487/*
4488 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4489 * only call the helper when running single threaded TCG code to ensure
4490 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4491 * just skip this instruction. Currently the SEV/SEVL instructions
4492 * which are *one* of many ways to wake the CPU from WFE are not
4493 * implemented so we can't sleep like WFI does.
4494 */
9ee6e8bb
PB
4495static void gen_nop_hint(DisasContext *s, int val)
4496{
4497 switch (val) {
c87e5a61 4498 case 1: /* yield */
c22edfeb
AB
4499 if (!parallel_cpus) {
4500 gen_set_pc_im(s, s->pc);
4501 s->is_jmp = DISAS_YIELD;
4502 }
c87e5a61 4503 break;
9ee6e8bb 4504 case 3: /* wfi */
eaed129d 4505 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4506 s->is_jmp = DISAS_WFI;
4507 break;
4508 case 2: /* wfe */
c22edfeb
AB
4509 if (!parallel_cpus) {
4510 gen_set_pc_im(s, s->pc);
4511 s->is_jmp = DISAS_WFE;
4512 }
72c1d3af 4513 break;
9ee6e8bb 4514 case 4: /* sev */
12b10571
MR
4515 case 5: /* sevl */
4516 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4517 default: /* nop */
4518 break;
4519 }
4520}
99c475ab 4521
ad69471c 4522#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4523
39d5492a 4524static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4525{
4526 switch (size) {
dd8fbd78
FN
4527 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4528 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4529 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4530 default: abort();
9ee6e8bb 4531 }
9ee6e8bb
PB
4532}
4533
39d5492a 4534static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4535{
4536 switch (size) {
dd8fbd78
FN
4537 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4538 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4539 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4540 default: return;
4541 }
4542}
4543
4544/* 32-bit pairwise ops end up the same as the elementwise versions. */
4545#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4546#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4547#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4548#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4549
ad69471c
PB
4550#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4551 switch ((size << 1) | u) { \
4552 case 0: \
dd8fbd78 4553 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4554 break; \
4555 case 1: \
dd8fbd78 4556 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4557 break; \
4558 case 2: \
dd8fbd78 4559 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4560 break; \
4561 case 3: \
dd8fbd78 4562 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4563 break; \
4564 case 4: \
dd8fbd78 4565 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4566 break; \
4567 case 5: \
dd8fbd78 4568 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4569 break; \
4570 default: return 1; \
4571 }} while (0)
9ee6e8bb
PB
4572
4573#define GEN_NEON_INTEGER_OP(name) do { \
4574 switch ((size << 1) | u) { \
ad69471c 4575 case 0: \
dd8fbd78 4576 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4577 break; \
4578 case 1: \
dd8fbd78 4579 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4580 break; \
4581 case 2: \
dd8fbd78 4582 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4583 break; \
4584 case 3: \
dd8fbd78 4585 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4586 break; \
4587 case 4: \
dd8fbd78 4588 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4589 break; \
4590 case 5: \
dd8fbd78 4591 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4592 break; \
9ee6e8bb
PB
4593 default: return 1; \
4594 }} while (0)
4595
39d5492a 4596static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4597{
39d5492a 4598 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4599 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4600 return tmp;
9ee6e8bb
PB
4601}
4602
39d5492a 4603static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4604{
dd8fbd78 4605 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4606 tcg_temp_free_i32(var);
9ee6e8bb
PB
4607}
4608
39d5492a 4609static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4610{
39d5492a 4611 TCGv_i32 tmp;
9ee6e8bb 4612 if (size == 1) {
0fad6efc
PM
4613 tmp = neon_load_reg(reg & 7, reg >> 4);
4614 if (reg & 8) {
dd8fbd78 4615 gen_neon_dup_high16(tmp);
0fad6efc
PM
4616 } else {
4617 gen_neon_dup_low16(tmp);
dd8fbd78 4618 }
0fad6efc
PM
4619 } else {
4620 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4621 }
dd8fbd78 4622 return tmp;
9ee6e8bb
PB
4623}
4624
02acedf9 4625static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4626{
39d5492a 4627 TCGv_i32 tmp, tmp2;
600b828c 4628 if (!q && size == 2) {
02acedf9
PM
4629 return 1;
4630 }
4631 tmp = tcg_const_i32(rd);
4632 tmp2 = tcg_const_i32(rm);
4633 if (q) {
4634 switch (size) {
4635 case 0:
02da0b2d 4636 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4637 break;
4638 case 1:
02da0b2d 4639 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4640 break;
4641 case 2:
02da0b2d 4642 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4643 break;
4644 default:
4645 abort();
4646 }
4647 } else {
4648 switch (size) {
4649 case 0:
02da0b2d 4650 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4651 break;
4652 case 1:
02da0b2d 4653 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4654 break;
4655 default:
4656 abort();
4657 }
4658 }
4659 tcg_temp_free_i32(tmp);
4660 tcg_temp_free_i32(tmp2);
4661 return 0;
19457615
FN
4662}
4663
d68a6f3a 4664static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4665{
39d5492a 4666 TCGv_i32 tmp, tmp2;
600b828c 4667 if (!q && size == 2) {
d68a6f3a
PM
4668 return 1;
4669 }
4670 tmp = tcg_const_i32(rd);
4671 tmp2 = tcg_const_i32(rm);
4672 if (q) {
4673 switch (size) {
4674 case 0:
02da0b2d 4675 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4676 break;
4677 case 1:
02da0b2d 4678 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4679 break;
4680 case 2:
02da0b2d 4681 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4682 break;
4683 default:
4684 abort();
4685 }
4686 } else {
4687 switch (size) {
4688 case 0:
02da0b2d 4689 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4690 break;
4691 case 1:
02da0b2d 4692 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4693 break;
4694 default:
4695 abort();
4696 }
4697 }
4698 tcg_temp_free_i32(tmp);
4699 tcg_temp_free_i32(tmp2);
4700 return 0;
19457615
FN
4701}
4702
39d5492a 4703static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4704{
39d5492a 4705 TCGv_i32 rd, tmp;
19457615 4706
7d1b0095
PM
4707 rd = tcg_temp_new_i32();
4708 tmp = tcg_temp_new_i32();
19457615
FN
4709
4710 tcg_gen_shli_i32(rd, t0, 8);
4711 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4712 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4713 tcg_gen_or_i32(rd, rd, tmp);
4714
4715 tcg_gen_shri_i32(t1, t1, 8);
4716 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4717 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4718 tcg_gen_or_i32(t1, t1, tmp);
4719 tcg_gen_mov_i32(t0, rd);
4720
7d1b0095
PM
4721 tcg_temp_free_i32(tmp);
4722 tcg_temp_free_i32(rd);
19457615
FN
4723}
4724
39d5492a 4725static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4726{
39d5492a 4727 TCGv_i32 rd, tmp;
19457615 4728
7d1b0095
PM
4729 rd = tcg_temp_new_i32();
4730 tmp = tcg_temp_new_i32();
19457615
FN
4731
4732 tcg_gen_shli_i32(rd, t0, 16);
4733 tcg_gen_andi_i32(tmp, t1, 0xffff);
4734 tcg_gen_or_i32(rd, rd, tmp);
4735 tcg_gen_shri_i32(t1, t1, 16);
4736 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4737 tcg_gen_or_i32(t1, t1, tmp);
4738 tcg_gen_mov_i32(t0, rd);
4739
7d1b0095
PM
4740 tcg_temp_free_i32(tmp);
4741 tcg_temp_free_i32(rd);
19457615
FN
4742}
4743
4744
9ee6e8bb
PB
4745static struct {
4746 int nregs;
4747 int interleave;
4748 int spacing;
4749} neon_ls_element_type[11] = {
4750 {4, 4, 1},
4751 {4, 4, 2},
4752 {4, 1, 1},
4753 {4, 2, 1},
4754 {3, 3, 1},
4755 {3, 3, 2},
4756 {3, 1, 1},
4757 {1, 1, 1},
4758 {2, 2, 1},
4759 {2, 2, 2},
4760 {2, 1, 1}
4761};
4762
4763/* Translate a NEON load/store element instruction. Return nonzero if the
4764 instruction is invalid. */
7dcc1f89 4765static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4766{
4767 int rd, rn, rm;
4768 int op;
4769 int nregs;
4770 int interleave;
84496233 4771 int spacing;
9ee6e8bb
PB
4772 int stride;
4773 int size;
4774 int reg;
4775 int pass;
4776 int load;
4777 int shift;
9ee6e8bb 4778 int n;
39d5492a
PM
4779 TCGv_i32 addr;
4780 TCGv_i32 tmp;
4781 TCGv_i32 tmp2;
84496233 4782 TCGv_i64 tmp64;
9ee6e8bb 4783
2c7ffc41
PM
4784 /* FIXME: this access check should not take precedence over UNDEF
4785 * for invalid encodings; we will generate incorrect syndrome information
4786 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4787 */
9dbbc748 4788 if (s->fp_excp_el) {
2c7ffc41 4789 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4790 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4791 return 0;
4792 }
4793
5df8bac1 4794 if (!s->vfp_enabled)
9ee6e8bb
PB
4795 return 1;
4796 VFP_DREG_D(rd, insn);
4797 rn = (insn >> 16) & 0xf;
4798 rm = insn & 0xf;
4799 load = (insn & (1 << 21)) != 0;
4800 if ((insn & (1 << 23)) == 0) {
4801 /* Load store all elements. */
4802 op = (insn >> 8) & 0xf;
4803 size = (insn >> 6) & 3;
84496233 4804 if (op > 10)
9ee6e8bb 4805 return 1;
f2dd89d0
PM
4806 /* Catch UNDEF cases for bad values of align field */
4807 switch (op & 0xc) {
4808 case 4:
4809 if (((insn >> 5) & 1) == 1) {
4810 return 1;
4811 }
4812 break;
4813 case 8:
4814 if (((insn >> 4) & 3) == 3) {
4815 return 1;
4816 }
4817 break;
4818 default:
4819 break;
4820 }
9ee6e8bb
PB
4821 nregs = neon_ls_element_type[op].nregs;
4822 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4823 spacing = neon_ls_element_type[op].spacing;
4824 if (size == 3 && (interleave | spacing) != 1)
4825 return 1;
e318a60b 4826 addr = tcg_temp_new_i32();
dcc65026 4827 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4828 stride = (1 << size) * interleave;
4829 for (reg = 0; reg < nregs; reg++) {
4830 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4831 load_reg_var(s, addr, rn);
4832 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4833 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4834 load_reg_var(s, addr, rn);
4835 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4836 }
84496233 4837 if (size == 3) {
8ed1237d 4838 tmp64 = tcg_temp_new_i64();
84496233 4839 if (load) {
12dcc321 4840 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4841 neon_store_reg64(tmp64, rd);
84496233 4842 } else {
84496233 4843 neon_load_reg64(tmp64, rd);
12dcc321 4844 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4845 }
8ed1237d 4846 tcg_temp_free_i64(tmp64);
84496233
JR
4847 tcg_gen_addi_i32(addr, addr, stride);
4848 } else {
4849 for (pass = 0; pass < 2; pass++) {
4850 if (size == 2) {
4851 if (load) {
58ab8e96 4852 tmp = tcg_temp_new_i32();
12dcc321 4853 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4854 neon_store_reg(rd, pass, tmp);
4855 } else {
4856 tmp = neon_load_reg(rd, pass);
12dcc321 4857 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4858 tcg_temp_free_i32(tmp);
84496233 4859 }
1b2b1e54 4860 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4861 } else if (size == 1) {
4862 if (load) {
58ab8e96 4863 tmp = tcg_temp_new_i32();
12dcc321 4864 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4865 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4866 tmp2 = tcg_temp_new_i32();
12dcc321 4867 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4868 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4869 tcg_gen_shli_i32(tmp2, tmp2, 16);
4870 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4871 tcg_temp_free_i32(tmp2);
84496233
JR
4872 neon_store_reg(rd, pass, tmp);
4873 } else {
4874 tmp = neon_load_reg(rd, pass);
7d1b0095 4875 tmp2 = tcg_temp_new_i32();
84496233 4876 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4877 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4878 tcg_temp_free_i32(tmp);
84496233 4879 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4880 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4881 tcg_temp_free_i32(tmp2);
1b2b1e54 4882 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4883 }
84496233
JR
4884 } else /* size == 0 */ {
4885 if (load) {
39d5492a 4886 TCGV_UNUSED_I32(tmp2);
84496233 4887 for (n = 0; n < 4; n++) {
58ab8e96 4888 tmp = tcg_temp_new_i32();
12dcc321 4889 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4890 tcg_gen_addi_i32(addr, addr, stride);
4891 if (n == 0) {
4892 tmp2 = tmp;
4893 } else {
41ba8341
PB
4894 tcg_gen_shli_i32(tmp, tmp, n * 8);
4895 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4896 tcg_temp_free_i32(tmp);
84496233 4897 }
9ee6e8bb 4898 }
84496233
JR
4899 neon_store_reg(rd, pass, tmp2);
4900 } else {
4901 tmp2 = neon_load_reg(rd, pass);
4902 for (n = 0; n < 4; n++) {
7d1b0095 4903 tmp = tcg_temp_new_i32();
84496233
JR
4904 if (n == 0) {
4905 tcg_gen_mov_i32(tmp, tmp2);
4906 } else {
4907 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4908 }
12dcc321 4909 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4910 tcg_temp_free_i32(tmp);
84496233
JR
4911 tcg_gen_addi_i32(addr, addr, stride);
4912 }
7d1b0095 4913 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4914 }
4915 }
4916 }
4917 }
84496233 4918 rd += spacing;
9ee6e8bb 4919 }
e318a60b 4920 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4921 stride = nregs * 8;
4922 } else {
4923 size = (insn >> 10) & 3;
4924 if (size == 3) {
4925 /* Load single element to all lanes. */
8e18cde3
PM
4926 int a = (insn >> 4) & 1;
4927 if (!load) {
9ee6e8bb 4928 return 1;
8e18cde3 4929 }
9ee6e8bb
PB
4930 size = (insn >> 6) & 3;
4931 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4932
4933 if (size == 3) {
4934 if (nregs != 4 || a == 0) {
9ee6e8bb 4935 return 1;
99c475ab 4936 }
8e18cde3
PM
4937 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4938 size = 2;
4939 }
4940 if (nregs == 1 && a == 1 && size == 0) {
4941 return 1;
4942 }
4943 if (nregs == 3 && a == 1) {
4944 return 1;
4945 }
e318a60b 4946 addr = tcg_temp_new_i32();
8e18cde3
PM
4947 load_reg_var(s, addr, rn);
4948 if (nregs == 1) {
4949 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4950 tmp = gen_load_and_replicate(s, addr, size);
4951 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4952 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4953 if (insn & (1 << 5)) {
4954 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4955 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4956 }
4957 tcg_temp_free_i32(tmp);
4958 } else {
4959 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4960 stride = (insn & (1 << 5)) ? 2 : 1;
4961 for (reg = 0; reg < nregs; reg++) {
4962 tmp = gen_load_and_replicate(s, addr, size);
4963 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4964 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4965 tcg_temp_free_i32(tmp);
4966 tcg_gen_addi_i32(addr, addr, 1 << size);
4967 rd += stride;
4968 }
9ee6e8bb 4969 }
e318a60b 4970 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4971 stride = (1 << size) * nregs;
4972 } else {
4973 /* Single element. */
93262b16 4974 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4975 pass = (insn >> 7) & 1;
4976 switch (size) {
4977 case 0:
4978 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4979 stride = 1;
4980 break;
4981 case 1:
4982 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4983 stride = (insn & (1 << 5)) ? 2 : 1;
4984 break;
4985 case 2:
4986 shift = 0;
9ee6e8bb
PB
4987 stride = (insn & (1 << 6)) ? 2 : 1;
4988 break;
4989 default:
4990 abort();
4991 }
4992 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4993 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4994 switch (nregs) {
4995 case 1:
4996 if (((idx & (1 << size)) != 0) ||
4997 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4998 return 1;
4999 }
5000 break;
5001 case 3:
5002 if ((idx & 1) != 0) {
5003 return 1;
5004 }
5005 /* fall through */
5006 case 2:
5007 if (size == 2 && (idx & 2) != 0) {
5008 return 1;
5009 }
5010 break;
5011 case 4:
5012 if ((size == 2) && ((idx & 3) == 3)) {
5013 return 1;
5014 }
5015 break;
5016 default:
5017 abort();
5018 }
5019 if ((rd + stride * (nregs - 1)) > 31) {
5020 /* Attempts to write off the end of the register file
5021 * are UNPREDICTABLE; we choose to UNDEF because otherwise
5022 * the neon_load_reg() would write off the end of the array.
5023 */
5024 return 1;
5025 }
e318a60b 5026 addr = tcg_temp_new_i32();
dcc65026 5027 load_reg_var(s, addr, rn);
9ee6e8bb
PB
5028 for (reg = 0; reg < nregs; reg++) {
5029 if (load) {
58ab8e96 5030 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
5031 switch (size) {
5032 case 0:
12dcc321 5033 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5034 break;
5035 case 1:
12dcc321 5036 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5037 break;
5038 case 2:
12dcc321 5039 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5040 break;
a50f5b91
PB
5041 default: /* Avoid compiler warnings. */
5042 abort();
9ee6e8bb
PB
5043 }
5044 if (size != 2) {
8f8e3aa4 5045 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
5046 tcg_gen_deposit_i32(tmp, tmp2, tmp,
5047 shift, size ? 16 : 8);
7d1b0095 5048 tcg_temp_free_i32(tmp2);
9ee6e8bb 5049 }
8f8e3aa4 5050 neon_store_reg(rd, pass, tmp);
9ee6e8bb 5051 } else { /* Store */
8f8e3aa4
PB
5052 tmp = neon_load_reg(rd, pass);
5053 if (shift)
5054 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
5055 switch (size) {
5056 case 0:
12dcc321 5057 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5058 break;
5059 case 1:
12dcc321 5060 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
5061 break;
5062 case 2:
12dcc321 5063 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 5064 break;
99c475ab 5065 }
58ab8e96 5066 tcg_temp_free_i32(tmp);
99c475ab 5067 }
9ee6e8bb 5068 rd += stride;
1b2b1e54 5069 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 5070 }
e318a60b 5071 tcg_temp_free_i32(addr);
9ee6e8bb 5072 stride = nregs * (1 << size);
99c475ab 5073 }
9ee6e8bb
PB
5074 }
5075 if (rm != 15) {
39d5492a 5076 TCGv_i32 base;
b26eefb6
PB
5077
5078 base = load_reg(s, rn);
9ee6e8bb 5079 if (rm == 13) {
b26eefb6 5080 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5081 } else {
39d5492a 5082 TCGv_i32 index;
b26eefb6
PB
5083 index = load_reg(s, rm);
5084 tcg_gen_add_i32(base, base, index);
7d1b0095 5085 tcg_temp_free_i32(index);
9ee6e8bb 5086 }
b26eefb6 5087 store_reg(s, rn, base);
9ee6e8bb
PB
5088 }
5089 return 0;
5090}
3b46e624 5091
8f8e3aa4 5092/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5093static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5094{
5095 tcg_gen_and_i32(t, t, c);
f669df27 5096 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5097 tcg_gen_or_i32(dest, t, f);
5098}
5099
39d5492a 5100static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5101{
5102 switch (size) {
5103 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5104 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5105 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5106 default: abort();
5107 }
5108}
5109
39d5492a 5110static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5111{
5112 switch (size) {
02da0b2d
PM
5113 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5114 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5115 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5116 default: abort();
5117 }
5118}
5119
39d5492a 5120static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5121{
5122 switch (size) {
02da0b2d
PM
5123 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5124 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5125 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5126 default: abort();
5127 }
5128}
5129
39d5492a 5130static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5131{
5132 switch (size) {
02da0b2d
PM
5133 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5134 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5135 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5136 default: abort();
5137 }
5138}
5139
39d5492a 5140static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5141 int q, int u)
5142{
5143 if (q) {
5144 if (u) {
5145 switch (size) {
5146 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5147 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5148 default: abort();
5149 }
5150 } else {
5151 switch (size) {
5152 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5153 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5154 default: abort();
5155 }
5156 }
5157 } else {
5158 if (u) {
5159 switch (size) {
b408a9b0
CL
5160 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5161 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5162 default: abort();
5163 }
5164 } else {
5165 switch (size) {
5166 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5167 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5168 default: abort();
5169 }
5170 }
5171 }
5172}
5173
39d5492a 5174static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5175{
5176 if (u) {
5177 switch (size) {
5178 case 0: gen_helper_neon_widen_u8(dest, src); break;
5179 case 1: gen_helper_neon_widen_u16(dest, src); break;
5180 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5181 default: abort();
5182 }
5183 } else {
5184 switch (size) {
5185 case 0: gen_helper_neon_widen_s8(dest, src); break;
5186 case 1: gen_helper_neon_widen_s16(dest, src); break;
5187 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5188 default: abort();
5189 }
5190 }
7d1b0095 5191 tcg_temp_free_i32(src);
ad69471c
PB
5192}
5193
5194static inline void gen_neon_addl(int size)
5195{
5196 switch (size) {
5197 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5198 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5199 case 2: tcg_gen_add_i64(CPU_V001); break;
5200 default: abort();
5201 }
5202}
5203
5204static inline void gen_neon_subl(int size)
5205{
5206 switch (size) {
5207 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5208 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5209 case 2: tcg_gen_sub_i64(CPU_V001); break;
5210 default: abort();
5211 }
5212}
5213
a7812ae4 5214static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5215{
5216 switch (size) {
5217 case 0: gen_helper_neon_negl_u16(var, var); break;
5218 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5219 case 2:
5220 tcg_gen_neg_i64(var, var);
5221 break;
ad69471c
PB
5222 default: abort();
5223 }
5224}
5225
a7812ae4 5226static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5227{
5228 switch (size) {
02da0b2d
PM
5229 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5230 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5231 default: abort();
5232 }
5233}
5234
39d5492a
PM
5235static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5236 int size, int u)
ad69471c 5237{
a7812ae4 5238 TCGv_i64 tmp;
ad69471c
PB
5239
5240 switch ((size << 1) | u) {
5241 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5242 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5243 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5244 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5245 case 4:
5246 tmp = gen_muls_i64_i32(a, b);
5247 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5248 tcg_temp_free_i64(tmp);
ad69471c
PB
5249 break;
5250 case 5:
5251 tmp = gen_mulu_i64_i32(a, b);
5252 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5253 tcg_temp_free_i64(tmp);
ad69471c
PB
5254 break;
5255 default: abort();
5256 }
c6067f04
CL
5257
5258 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5259 Don't forget to clean them now. */
5260 if (size < 2) {
7d1b0095
PM
5261 tcg_temp_free_i32(a);
5262 tcg_temp_free_i32(b);
c6067f04 5263 }
ad69471c
PB
5264}
5265
39d5492a
PM
5266static void gen_neon_narrow_op(int op, int u, int size,
5267 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5268{
5269 if (op) {
5270 if (u) {
5271 gen_neon_unarrow_sats(size, dest, src);
5272 } else {
5273 gen_neon_narrow(size, dest, src);
5274 }
5275 } else {
5276 if (u) {
5277 gen_neon_narrow_satu(size, dest, src);
5278 } else {
5279 gen_neon_narrow_sats(size, dest, src);
5280 }
5281 }
5282}
5283
62698be3
PM
5284/* Symbolic constants for op fields for Neon 3-register same-length.
5285 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5286 * table A7-9.
5287 */
5288#define NEON_3R_VHADD 0
5289#define NEON_3R_VQADD 1
5290#define NEON_3R_VRHADD 2
5291#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5292#define NEON_3R_VHSUB 4
5293#define NEON_3R_VQSUB 5
5294#define NEON_3R_VCGT 6
5295#define NEON_3R_VCGE 7
5296#define NEON_3R_VSHL 8
5297#define NEON_3R_VQSHL 9
5298#define NEON_3R_VRSHL 10
5299#define NEON_3R_VQRSHL 11
5300#define NEON_3R_VMAX 12
5301#define NEON_3R_VMIN 13
5302#define NEON_3R_VABD 14
5303#define NEON_3R_VABA 15
5304#define NEON_3R_VADD_VSUB 16
5305#define NEON_3R_VTST_VCEQ 17
5306#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5307#define NEON_3R_VMUL 19
5308#define NEON_3R_VPMAX 20
5309#define NEON_3R_VPMIN 21
5310#define NEON_3R_VQDMULH_VQRDMULH 22
5311#define NEON_3R_VPADD 23
f1ecb913 5312#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5313#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5314#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5315#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5316#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5317#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5318#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5319#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5320
5321static const uint8_t neon_3r_sizes[] = {
5322 [NEON_3R_VHADD] = 0x7,
5323 [NEON_3R_VQADD] = 0xf,
5324 [NEON_3R_VRHADD] = 0x7,
5325 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5326 [NEON_3R_VHSUB] = 0x7,
5327 [NEON_3R_VQSUB] = 0xf,
5328 [NEON_3R_VCGT] = 0x7,
5329 [NEON_3R_VCGE] = 0x7,
5330 [NEON_3R_VSHL] = 0xf,
5331 [NEON_3R_VQSHL] = 0xf,
5332 [NEON_3R_VRSHL] = 0xf,
5333 [NEON_3R_VQRSHL] = 0xf,
5334 [NEON_3R_VMAX] = 0x7,
5335 [NEON_3R_VMIN] = 0x7,
5336 [NEON_3R_VABD] = 0x7,
5337 [NEON_3R_VABA] = 0x7,
5338 [NEON_3R_VADD_VSUB] = 0xf,
5339 [NEON_3R_VTST_VCEQ] = 0x7,
5340 [NEON_3R_VML] = 0x7,
5341 [NEON_3R_VMUL] = 0x7,
5342 [NEON_3R_VPMAX] = 0x7,
5343 [NEON_3R_VPMIN] = 0x7,
5344 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5345 [NEON_3R_VPADD] = 0x7,
f1ecb913 5346 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5347 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5348 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5349 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5350 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5351 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5352 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5353 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5354};
5355
600b828c
PM
5356/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5357 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5358 * table A7-13.
5359 */
5360#define NEON_2RM_VREV64 0
5361#define NEON_2RM_VREV32 1
5362#define NEON_2RM_VREV16 2
5363#define NEON_2RM_VPADDL 4
5364#define NEON_2RM_VPADDL_U 5
9d935509
AB
5365#define NEON_2RM_AESE 6 /* Includes AESD */
5366#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5367#define NEON_2RM_VCLS 8
5368#define NEON_2RM_VCLZ 9
5369#define NEON_2RM_VCNT 10
5370#define NEON_2RM_VMVN 11
5371#define NEON_2RM_VPADAL 12
5372#define NEON_2RM_VPADAL_U 13
5373#define NEON_2RM_VQABS 14
5374#define NEON_2RM_VQNEG 15
5375#define NEON_2RM_VCGT0 16
5376#define NEON_2RM_VCGE0 17
5377#define NEON_2RM_VCEQ0 18
5378#define NEON_2RM_VCLE0 19
5379#define NEON_2RM_VCLT0 20
f1ecb913 5380#define NEON_2RM_SHA1H 21
600b828c
PM
5381#define NEON_2RM_VABS 22
5382#define NEON_2RM_VNEG 23
5383#define NEON_2RM_VCGT0_F 24
5384#define NEON_2RM_VCGE0_F 25
5385#define NEON_2RM_VCEQ0_F 26
5386#define NEON_2RM_VCLE0_F 27
5387#define NEON_2RM_VCLT0_F 28
5388#define NEON_2RM_VABS_F 30
5389#define NEON_2RM_VNEG_F 31
5390#define NEON_2RM_VSWP 32
5391#define NEON_2RM_VTRN 33
5392#define NEON_2RM_VUZP 34
5393#define NEON_2RM_VZIP 35
5394#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5395#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5396#define NEON_2RM_VSHLL 38
f1ecb913 5397#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5398#define NEON_2RM_VRINTN 40
2ce70625 5399#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5400#define NEON_2RM_VRINTA 42
5401#define NEON_2RM_VRINTZ 43
600b828c 5402#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5403#define NEON_2RM_VRINTM 45
600b828c 5404#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5405#define NEON_2RM_VRINTP 47
901ad525
WN
5406#define NEON_2RM_VCVTAU 48
5407#define NEON_2RM_VCVTAS 49
5408#define NEON_2RM_VCVTNU 50
5409#define NEON_2RM_VCVTNS 51
5410#define NEON_2RM_VCVTPU 52
5411#define NEON_2RM_VCVTPS 53
5412#define NEON_2RM_VCVTMU 54
5413#define NEON_2RM_VCVTMS 55
600b828c
PM
5414#define NEON_2RM_VRECPE 56
5415#define NEON_2RM_VRSQRTE 57
5416#define NEON_2RM_VRECPE_F 58
5417#define NEON_2RM_VRSQRTE_F 59
5418#define NEON_2RM_VCVT_FS 60
5419#define NEON_2RM_VCVT_FU 61
5420#define NEON_2RM_VCVT_SF 62
5421#define NEON_2RM_VCVT_UF 63
5422
5423static int neon_2rm_is_float_op(int op)
5424{
5425 /* Return true if this neon 2reg-misc op is float-to-float */
5426 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5427 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5428 op == NEON_2RM_VRINTM ||
5429 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5430 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5431}
5432
fe8fcf3d
PM
5433static bool neon_2rm_is_v8_op(int op)
5434{
5435 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5436 switch (op) {
5437 case NEON_2RM_VRINTN:
5438 case NEON_2RM_VRINTA:
5439 case NEON_2RM_VRINTM:
5440 case NEON_2RM_VRINTP:
5441 case NEON_2RM_VRINTZ:
5442 case NEON_2RM_VRINTX:
5443 case NEON_2RM_VCVTAU:
5444 case NEON_2RM_VCVTAS:
5445 case NEON_2RM_VCVTNU:
5446 case NEON_2RM_VCVTNS:
5447 case NEON_2RM_VCVTPU:
5448 case NEON_2RM_VCVTPS:
5449 case NEON_2RM_VCVTMU:
5450 case NEON_2RM_VCVTMS:
5451 return true;
5452 default:
5453 return false;
5454 }
5455}
5456
600b828c
PM
5457/* Each entry in this array has bit n set if the insn allows
5458 * size value n (otherwise it will UNDEF). Since unallocated
5459 * op values will have no bits set they always UNDEF.
5460 */
5461static const uint8_t neon_2rm_sizes[] = {
5462 [NEON_2RM_VREV64] = 0x7,
5463 [NEON_2RM_VREV32] = 0x3,
5464 [NEON_2RM_VREV16] = 0x1,
5465 [NEON_2RM_VPADDL] = 0x7,
5466 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5467 [NEON_2RM_AESE] = 0x1,
5468 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5469 [NEON_2RM_VCLS] = 0x7,
5470 [NEON_2RM_VCLZ] = 0x7,
5471 [NEON_2RM_VCNT] = 0x1,
5472 [NEON_2RM_VMVN] = 0x1,
5473 [NEON_2RM_VPADAL] = 0x7,
5474 [NEON_2RM_VPADAL_U] = 0x7,
5475 [NEON_2RM_VQABS] = 0x7,
5476 [NEON_2RM_VQNEG] = 0x7,
5477 [NEON_2RM_VCGT0] = 0x7,
5478 [NEON_2RM_VCGE0] = 0x7,
5479 [NEON_2RM_VCEQ0] = 0x7,
5480 [NEON_2RM_VCLE0] = 0x7,
5481 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5482 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5483 [NEON_2RM_VABS] = 0x7,
5484 [NEON_2RM_VNEG] = 0x7,
5485 [NEON_2RM_VCGT0_F] = 0x4,
5486 [NEON_2RM_VCGE0_F] = 0x4,
5487 [NEON_2RM_VCEQ0_F] = 0x4,
5488 [NEON_2RM_VCLE0_F] = 0x4,
5489 [NEON_2RM_VCLT0_F] = 0x4,
5490 [NEON_2RM_VABS_F] = 0x4,
5491 [NEON_2RM_VNEG_F] = 0x4,
5492 [NEON_2RM_VSWP] = 0x1,
5493 [NEON_2RM_VTRN] = 0x7,
5494 [NEON_2RM_VUZP] = 0x7,
5495 [NEON_2RM_VZIP] = 0x7,
5496 [NEON_2RM_VMOVN] = 0x7,
5497 [NEON_2RM_VQMOVN] = 0x7,
5498 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5499 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5500 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5501 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5502 [NEON_2RM_VRINTA] = 0x4,
5503 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5504 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5505 [NEON_2RM_VRINTM] = 0x4,
600b828c 5506 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5507 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5508 [NEON_2RM_VCVTAU] = 0x4,
5509 [NEON_2RM_VCVTAS] = 0x4,
5510 [NEON_2RM_VCVTNU] = 0x4,
5511 [NEON_2RM_VCVTNS] = 0x4,
5512 [NEON_2RM_VCVTPU] = 0x4,
5513 [NEON_2RM_VCVTPS] = 0x4,
5514 [NEON_2RM_VCVTMU] = 0x4,
5515 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5516 [NEON_2RM_VRECPE] = 0x4,
5517 [NEON_2RM_VRSQRTE] = 0x4,
5518 [NEON_2RM_VRECPE_F] = 0x4,
5519 [NEON_2RM_VRSQRTE_F] = 0x4,
5520 [NEON_2RM_VCVT_FS] = 0x4,
5521 [NEON_2RM_VCVT_FU] = 0x4,
5522 [NEON_2RM_VCVT_SF] = 0x4,
5523 [NEON_2RM_VCVT_UF] = 0x4,
5524};
5525
9ee6e8bb
PB
5526/* Translate a NEON data processing instruction. Return nonzero if the
5527 instruction is invalid.
ad69471c
PB
5528 We process data in a mixture of 32-bit and 64-bit chunks.
5529 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5530
7dcc1f89 5531static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5532{
5533 int op;
5534 int q;
5535 int rd, rn, rm;
5536 int size;
5537 int shift;
5538 int pass;
5539 int count;
5540 int pairwise;
5541 int u;
ca9a32e4 5542 uint32_t imm, mask;
39d5492a 5543 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5544 TCGv_i64 tmp64;
9ee6e8bb 5545
2c7ffc41
PM
5546 /* FIXME: this access check should not take precedence over UNDEF
5547 * for invalid encodings; we will generate incorrect syndrome information
5548 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5549 */
9dbbc748 5550 if (s->fp_excp_el) {
2c7ffc41 5551 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5552 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5553 return 0;
5554 }
5555
5df8bac1 5556 if (!s->vfp_enabled)
9ee6e8bb
PB
5557 return 1;
5558 q = (insn & (1 << 6)) != 0;
5559 u = (insn >> 24) & 1;
5560 VFP_DREG_D(rd, insn);
5561 VFP_DREG_N(rn, insn);
5562 VFP_DREG_M(rm, insn);
5563 size = (insn >> 20) & 3;
5564 if ((insn & (1 << 23)) == 0) {
5565 /* Three register same length. */
5566 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5567 /* Catch invalid op and bad size combinations: UNDEF */
5568 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5569 return 1;
5570 }
25f84f79
PM
5571 /* All insns of this form UNDEF for either this condition or the
5572 * superset of cases "Q==1"; we catch the latter later.
5573 */
5574 if (q && ((rd | rn | rm) & 1)) {
5575 return 1;
5576 }
f1ecb913
AB
5577 /*
5578 * The SHA-1/SHA-256 3-register instructions require special treatment
5579 * here, as their size field is overloaded as an op type selector, and
5580 * they all consume their input in a single pass.
5581 */
5582 if (op == NEON_3R_SHA) {
5583 if (!q) {
5584 return 1;
5585 }
5586 if (!u) { /* SHA-1 */
d614a513 5587 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5588 return 1;
5589 }
5590 tmp = tcg_const_i32(rd);
5591 tmp2 = tcg_const_i32(rn);
5592 tmp3 = tcg_const_i32(rm);
5593 tmp4 = tcg_const_i32(size);
5594 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5595 tcg_temp_free_i32(tmp4);
5596 } else { /* SHA-256 */
d614a513 5597 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5598 return 1;
5599 }
5600 tmp = tcg_const_i32(rd);
5601 tmp2 = tcg_const_i32(rn);
5602 tmp3 = tcg_const_i32(rm);
5603 switch (size) {
5604 case 0:
5605 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5606 break;
5607 case 1:
5608 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5609 break;
5610 case 2:
5611 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5612 break;
5613 }
5614 }
5615 tcg_temp_free_i32(tmp);
5616 tcg_temp_free_i32(tmp2);
5617 tcg_temp_free_i32(tmp3);
5618 return 0;
5619 }
62698be3
PM
5620 if (size == 3 && op != NEON_3R_LOGIC) {
5621 /* 64-bit element instructions. */
9ee6e8bb 5622 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5623 neon_load_reg64(cpu_V0, rn + pass);
5624 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5625 switch (op) {
62698be3 5626 case NEON_3R_VQADD:
9ee6e8bb 5627 if (u) {
02da0b2d
PM
5628 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5629 cpu_V0, cpu_V1);
2c0262af 5630 } else {
02da0b2d
PM
5631 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5632 cpu_V0, cpu_V1);
2c0262af 5633 }
9ee6e8bb 5634 break;
62698be3 5635 case NEON_3R_VQSUB:
9ee6e8bb 5636 if (u) {
02da0b2d
PM
5637 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5638 cpu_V0, cpu_V1);
ad69471c 5639 } else {
02da0b2d
PM
5640 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5641 cpu_V0, cpu_V1);
ad69471c
PB
5642 }
5643 break;
62698be3 5644 case NEON_3R_VSHL:
ad69471c
PB
5645 if (u) {
5646 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5647 } else {
5648 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5649 }
5650 break;
62698be3 5651 case NEON_3R_VQSHL:
ad69471c 5652 if (u) {
02da0b2d
PM
5653 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5654 cpu_V1, cpu_V0);
ad69471c 5655 } else {
02da0b2d
PM
5656 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5657 cpu_V1, cpu_V0);
ad69471c
PB
5658 }
5659 break;
62698be3 5660 case NEON_3R_VRSHL:
ad69471c
PB
5661 if (u) {
5662 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5663 } else {
ad69471c
PB
5664 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5665 }
5666 break;
62698be3 5667 case NEON_3R_VQRSHL:
ad69471c 5668 if (u) {
02da0b2d
PM
5669 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5670 cpu_V1, cpu_V0);
ad69471c 5671 } else {
02da0b2d
PM
5672 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5673 cpu_V1, cpu_V0);
1e8d4eec 5674 }
9ee6e8bb 5675 break;
62698be3 5676 case NEON_3R_VADD_VSUB:
9ee6e8bb 5677 if (u) {
ad69471c 5678 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5679 } else {
ad69471c 5680 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5681 }
5682 break;
5683 default:
5684 abort();
2c0262af 5685 }
ad69471c 5686 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5687 }
9ee6e8bb 5688 return 0;
2c0262af 5689 }
25f84f79 5690 pairwise = 0;
9ee6e8bb 5691 switch (op) {
62698be3
PM
5692 case NEON_3R_VSHL:
5693 case NEON_3R_VQSHL:
5694 case NEON_3R_VRSHL:
5695 case NEON_3R_VQRSHL:
9ee6e8bb 5696 {
ad69471c
PB
5697 int rtmp;
5698 /* Shift instruction operands are reversed. */
5699 rtmp = rn;
9ee6e8bb 5700 rn = rm;
ad69471c 5701 rm = rtmp;
9ee6e8bb 5702 }
2c0262af 5703 break;
25f84f79
PM
5704 case NEON_3R_VPADD:
5705 if (u) {
5706 return 1;
5707 }
5708 /* Fall through */
62698be3
PM
5709 case NEON_3R_VPMAX:
5710 case NEON_3R_VPMIN:
9ee6e8bb 5711 pairwise = 1;
2c0262af 5712 break;
25f84f79
PM
5713 case NEON_3R_FLOAT_ARITH:
5714 pairwise = (u && size < 2); /* if VPADD (float) */
5715 break;
5716 case NEON_3R_FLOAT_MINMAX:
5717 pairwise = u; /* if VPMIN/VPMAX (float) */
5718 break;
5719 case NEON_3R_FLOAT_CMP:
5720 if (!u && size) {
5721 /* no encoding for U=0 C=1x */
5722 return 1;
5723 }
5724 break;
5725 case NEON_3R_FLOAT_ACMP:
5726 if (!u) {
5727 return 1;
5728 }
5729 break;
505935fc
WN
5730 case NEON_3R_FLOAT_MISC:
5731 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5732 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5733 return 1;
5734 }
2c0262af 5735 break;
25f84f79
PM
5736 case NEON_3R_VMUL:
5737 if (u && (size != 0)) {
5738 /* UNDEF on invalid size for polynomial subcase */
5739 return 1;
5740 }
2c0262af 5741 break;
da97f52c 5742 case NEON_3R_VFM:
d614a513 5743 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5744 return 1;
5745 }
5746 break;
9ee6e8bb 5747 default:
2c0262af 5748 break;
9ee6e8bb 5749 }
dd8fbd78 5750
25f84f79
PM
5751 if (pairwise && q) {
5752 /* All the pairwise insns UNDEF if Q is set */
5753 return 1;
5754 }
5755
9ee6e8bb
PB
5756 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5757
5758 if (pairwise) {
5759 /* Pairwise. */
a5a14945
JR
5760 if (pass < 1) {
5761 tmp = neon_load_reg(rn, 0);
5762 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5763 } else {
a5a14945
JR
5764 tmp = neon_load_reg(rm, 0);
5765 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5766 }
5767 } else {
5768 /* Elementwise. */
dd8fbd78
FN
5769 tmp = neon_load_reg(rn, pass);
5770 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5771 }
5772 switch (op) {
62698be3 5773 case NEON_3R_VHADD:
9ee6e8bb
PB
5774 GEN_NEON_INTEGER_OP(hadd);
5775 break;
62698be3 5776 case NEON_3R_VQADD:
02da0b2d 5777 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5778 break;
62698be3 5779 case NEON_3R_VRHADD:
9ee6e8bb 5780 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5781 break;
62698be3 5782 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5783 switch ((u << 2) | size) {
5784 case 0: /* VAND */
dd8fbd78 5785 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5786 break;
5787 case 1: /* BIC */
f669df27 5788 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5789 break;
5790 case 2: /* VORR */
dd8fbd78 5791 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5792 break;
5793 case 3: /* VORN */
f669df27 5794 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5795 break;
5796 case 4: /* VEOR */
dd8fbd78 5797 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5798 break;
5799 case 5: /* VBSL */
dd8fbd78
FN
5800 tmp3 = neon_load_reg(rd, pass);
5801 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5802 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5803 break;
5804 case 6: /* VBIT */
dd8fbd78
FN
5805 tmp3 = neon_load_reg(rd, pass);
5806 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5807 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5808 break;
5809 case 7: /* VBIF */
dd8fbd78
FN
5810 tmp3 = neon_load_reg(rd, pass);
5811 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5812 tcg_temp_free_i32(tmp3);
9ee6e8bb 5813 break;
2c0262af
FB
5814 }
5815 break;
62698be3 5816 case NEON_3R_VHSUB:
9ee6e8bb
PB
5817 GEN_NEON_INTEGER_OP(hsub);
5818 break;
62698be3 5819 case NEON_3R_VQSUB:
02da0b2d 5820 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5821 break;
62698be3 5822 case NEON_3R_VCGT:
9ee6e8bb
PB
5823 GEN_NEON_INTEGER_OP(cgt);
5824 break;
62698be3 5825 case NEON_3R_VCGE:
9ee6e8bb
PB
5826 GEN_NEON_INTEGER_OP(cge);
5827 break;
62698be3 5828 case NEON_3R_VSHL:
ad69471c 5829 GEN_NEON_INTEGER_OP(shl);
2c0262af 5830 break;
62698be3 5831 case NEON_3R_VQSHL:
02da0b2d 5832 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5833 break;
62698be3 5834 case NEON_3R_VRSHL:
ad69471c 5835 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5836 break;
62698be3 5837 case NEON_3R_VQRSHL:
02da0b2d 5838 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5839 break;
62698be3 5840 case NEON_3R_VMAX:
9ee6e8bb
PB
5841 GEN_NEON_INTEGER_OP(max);
5842 break;
62698be3 5843 case NEON_3R_VMIN:
9ee6e8bb
PB
5844 GEN_NEON_INTEGER_OP(min);
5845 break;
62698be3 5846 case NEON_3R_VABD:
9ee6e8bb
PB
5847 GEN_NEON_INTEGER_OP(abd);
5848 break;
62698be3 5849 case NEON_3R_VABA:
9ee6e8bb 5850 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5851 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5852 tmp2 = neon_load_reg(rd, pass);
5853 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5854 break;
62698be3 5855 case NEON_3R_VADD_VSUB:
9ee6e8bb 5856 if (!u) { /* VADD */
62698be3 5857 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5858 } else { /* VSUB */
5859 switch (size) {
dd8fbd78
FN
5860 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5861 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5862 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5863 default: abort();
9ee6e8bb
PB
5864 }
5865 }
5866 break;
62698be3 5867 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5868 if (!u) { /* VTST */
5869 switch (size) {
dd8fbd78
FN
5870 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5871 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5872 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5873 default: abort();
9ee6e8bb
PB
5874 }
5875 } else { /* VCEQ */
5876 switch (size) {
dd8fbd78
FN
5877 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5878 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5879 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5880 default: abort();
9ee6e8bb
PB
5881 }
5882 }
5883 break;
62698be3 5884 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5885 switch (size) {
dd8fbd78
FN
5886 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5887 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5888 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5889 default: abort();
9ee6e8bb 5890 }
7d1b0095 5891 tcg_temp_free_i32(tmp2);
dd8fbd78 5892 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5893 if (u) { /* VMLS */
dd8fbd78 5894 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5895 } else { /* VMLA */
dd8fbd78 5896 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5897 }
5898 break;
62698be3 5899 case NEON_3R_VMUL:
9ee6e8bb 5900 if (u) { /* polynomial */
dd8fbd78 5901 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5902 } else { /* Integer */
5903 switch (size) {
dd8fbd78
FN
5904 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5905 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5906 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5907 default: abort();
9ee6e8bb
PB
5908 }
5909 }
5910 break;
62698be3 5911 case NEON_3R_VPMAX:
9ee6e8bb
PB
5912 GEN_NEON_INTEGER_OP(pmax);
5913 break;
62698be3 5914 case NEON_3R_VPMIN:
9ee6e8bb
PB
5915 GEN_NEON_INTEGER_OP(pmin);
5916 break;
62698be3 5917 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5918 if (!u) { /* VQDMULH */
5919 switch (size) {
02da0b2d
PM
5920 case 1:
5921 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5922 break;
5923 case 2:
5924 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5925 break;
62698be3 5926 default: abort();
9ee6e8bb 5927 }
62698be3 5928 } else { /* VQRDMULH */
9ee6e8bb 5929 switch (size) {
02da0b2d
PM
5930 case 1:
5931 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5932 break;
5933 case 2:
5934 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5935 break;
62698be3 5936 default: abort();
9ee6e8bb
PB
5937 }
5938 }
5939 break;
62698be3 5940 case NEON_3R_VPADD:
9ee6e8bb 5941 switch (size) {
dd8fbd78
FN
5942 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5943 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5944 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5945 default: abort();
9ee6e8bb
PB
5946 }
5947 break;
62698be3 5948 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5949 {
5950 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5951 switch ((u << 2) | size) {
5952 case 0: /* VADD */
aa47cfdd
PM
5953 case 4: /* VPADD */
5954 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5955 break;
5956 case 2: /* VSUB */
aa47cfdd 5957 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5958 break;
5959 case 6: /* VABD */
aa47cfdd 5960 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5961 break;
5962 default:
62698be3 5963 abort();
9ee6e8bb 5964 }
aa47cfdd 5965 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5966 break;
aa47cfdd 5967 }
62698be3 5968 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5969 {
5970 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5971 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5972 if (!u) {
7d1b0095 5973 tcg_temp_free_i32(tmp2);
dd8fbd78 5974 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5975 if (size == 0) {
aa47cfdd 5976 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5977 } else {
aa47cfdd 5978 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5979 }
5980 }
aa47cfdd 5981 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5982 break;
aa47cfdd 5983 }
62698be3 5984 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5985 {
5986 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5987 if (!u) {
aa47cfdd 5988 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5989 } else {
aa47cfdd
PM
5990 if (size == 0) {
5991 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5992 } else {
5993 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5994 }
b5ff1b31 5995 }
aa47cfdd 5996 tcg_temp_free_ptr(fpstatus);
2c0262af 5997 break;
aa47cfdd 5998 }
62698be3 5999 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
6000 {
6001 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6002 if (size == 0) {
6003 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
6004 } else {
6005 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
6006 }
6007 tcg_temp_free_ptr(fpstatus);
2c0262af 6008 break;
aa47cfdd 6009 }
62698be3 6010 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
6011 {
6012 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6013 if (size == 0) {
f71a2ae5 6014 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 6015 } else {
f71a2ae5 6016 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
6017 }
6018 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6019 break;
aa47cfdd 6020 }
505935fc
WN
6021 case NEON_3R_FLOAT_MISC:
6022 if (u) {
6023 /* VMAXNM/VMINNM */
6024 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6025 if (size == 0) {
f71a2ae5 6026 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 6027 } else {
f71a2ae5 6028 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
6029 }
6030 tcg_temp_free_ptr(fpstatus);
6031 } else {
6032 if (size == 0) {
6033 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
6034 } else {
6035 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
6036 }
6037 }
2c0262af 6038 break;
da97f52c
PM
6039 case NEON_3R_VFM:
6040 {
6041 /* VFMA, VFMS: fused multiply-add */
6042 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6043 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
6044 if (size) {
6045 /* VFMS */
6046 gen_helper_vfp_negs(tmp, tmp);
6047 }
6048 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
6049 tcg_temp_free_i32(tmp3);
6050 tcg_temp_free_ptr(fpstatus);
6051 break;
6052 }
9ee6e8bb
PB
6053 default:
6054 abort();
2c0262af 6055 }
7d1b0095 6056 tcg_temp_free_i32(tmp2);
dd8fbd78 6057
9ee6e8bb
PB
6058 /* Save the result. For elementwise operations we can put it
6059 straight into the destination register. For pairwise operations
6060 we have to be careful to avoid clobbering the source operands. */
6061 if (pairwise && rd == rm) {
dd8fbd78 6062 neon_store_scratch(pass, tmp);
9ee6e8bb 6063 } else {
dd8fbd78 6064 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6065 }
6066
6067 } /* for pass */
6068 if (pairwise && rd == rm) {
6069 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
6070 tmp = neon_load_scratch(pass);
6071 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6072 }
6073 }
ad69471c 6074 /* End of 3 register same size operations. */
9ee6e8bb
PB
6075 } else if (insn & (1 << 4)) {
6076 if ((insn & 0x00380080) != 0) {
6077 /* Two registers and shift. */
6078 op = (insn >> 8) & 0xf;
6079 if (insn & (1 << 7)) {
cc13115b
PM
6080 /* 64-bit shift. */
6081 if (op > 7) {
6082 return 1;
6083 }
9ee6e8bb
PB
6084 size = 3;
6085 } else {
6086 size = 2;
6087 while ((insn & (1 << (size + 19))) == 0)
6088 size--;
6089 }
6090 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6091 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6092 by immediate using the variable shift operations. */
6093 if (op < 8) {
6094 /* Shift by immediate:
6095 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6096 if (q && ((rd | rm) & 1)) {
6097 return 1;
6098 }
6099 if (!u && (op == 4 || op == 6)) {
6100 return 1;
6101 }
9ee6e8bb
PB
6102 /* Right shifts are encoded as N - shift, where N is the
6103 element size in bits. */
6104 if (op <= 4)
6105 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6106 if (size == 3) {
6107 count = q + 1;
6108 } else {
6109 count = q ? 4: 2;
6110 }
6111 switch (size) {
6112 case 0:
6113 imm = (uint8_t) shift;
6114 imm |= imm << 8;
6115 imm |= imm << 16;
6116 break;
6117 case 1:
6118 imm = (uint16_t) shift;
6119 imm |= imm << 16;
6120 break;
6121 case 2:
6122 case 3:
6123 imm = shift;
6124 break;
6125 default:
6126 abort();
6127 }
6128
6129 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6130 if (size == 3) {
6131 neon_load_reg64(cpu_V0, rm + pass);
6132 tcg_gen_movi_i64(cpu_V1, imm);
6133 switch (op) {
6134 case 0: /* VSHR */
6135 case 1: /* VSRA */
6136 if (u)
6137 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6138 else
ad69471c 6139 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6140 break;
ad69471c
PB
6141 case 2: /* VRSHR */
6142 case 3: /* VRSRA */
6143 if (u)
6144 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6145 else
ad69471c 6146 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6147 break;
ad69471c 6148 case 4: /* VSRI */
ad69471c
PB
6149 case 5: /* VSHL, VSLI */
6150 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6151 break;
0322b26e 6152 case 6: /* VQSHLU */
02da0b2d
PM
6153 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6154 cpu_V0, cpu_V1);
ad69471c 6155 break;
0322b26e
PM
6156 case 7: /* VQSHL */
6157 if (u) {
02da0b2d 6158 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6159 cpu_V0, cpu_V1);
6160 } else {
02da0b2d 6161 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6162 cpu_V0, cpu_V1);
6163 }
9ee6e8bb 6164 break;
9ee6e8bb 6165 }
ad69471c
PB
6166 if (op == 1 || op == 3) {
6167 /* Accumulate. */
5371cb81 6168 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6169 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6170 } else if (op == 4 || (op == 5 && u)) {
6171 /* Insert */
923e6509
CL
6172 neon_load_reg64(cpu_V1, rd + pass);
6173 uint64_t mask;
6174 if (shift < -63 || shift > 63) {
6175 mask = 0;
6176 } else {
6177 if (op == 4) {
6178 mask = 0xffffffffffffffffull >> -shift;
6179 } else {
6180 mask = 0xffffffffffffffffull << shift;
6181 }
6182 }
6183 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6184 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6185 }
6186 neon_store_reg64(cpu_V0, rd + pass);
6187 } else { /* size < 3 */
6188 /* Operands in T0 and T1. */
dd8fbd78 6189 tmp = neon_load_reg(rm, pass);
7d1b0095 6190 tmp2 = tcg_temp_new_i32();
dd8fbd78 6191 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6192 switch (op) {
6193 case 0: /* VSHR */
6194 case 1: /* VSRA */
6195 GEN_NEON_INTEGER_OP(shl);
6196 break;
6197 case 2: /* VRSHR */
6198 case 3: /* VRSRA */
6199 GEN_NEON_INTEGER_OP(rshl);
6200 break;
6201 case 4: /* VSRI */
ad69471c
PB
6202 case 5: /* VSHL, VSLI */
6203 switch (size) {
dd8fbd78
FN
6204 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6205 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6206 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6207 default: abort();
ad69471c
PB
6208 }
6209 break;
0322b26e 6210 case 6: /* VQSHLU */
ad69471c 6211 switch (size) {
0322b26e 6212 case 0:
02da0b2d
PM
6213 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6214 tmp, tmp2);
0322b26e
PM
6215 break;
6216 case 1:
02da0b2d
PM
6217 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6218 tmp, tmp2);
0322b26e
PM
6219 break;
6220 case 2:
02da0b2d
PM
6221 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6222 tmp, tmp2);
0322b26e
PM
6223 break;
6224 default:
cc13115b 6225 abort();
ad69471c
PB
6226 }
6227 break;
0322b26e 6228 case 7: /* VQSHL */
02da0b2d 6229 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6230 break;
ad69471c 6231 }
7d1b0095 6232 tcg_temp_free_i32(tmp2);
ad69471c
PB
6233
6234 if (op == 1 || op == 3) {
6235 /* Accumulate. */
dd8fbd78 6236 tmp2 = neon_load_reg(rd, pass);
5371cb81 6237 gen_neon_add(size, tmp, tmp2);
7d1b0095 6238 tcg_temp_free_i32(tmp2);
ad69471c
PB
6239 } else if (op == 4 || (op == 5 && u)) {
6240 /* Insert */
6241 switch (size) {
6242 case 0:
6243 if (op == 4)
ca9a32e4 6244 mask = 0xff >> -shift;
ad69471c 6245 else
ca9a32e4
JR
6246 mask = (uint8_t)(0xff << shift);
6247 mask |= mask << 8;
6248 mask |= mask << 16;
ad69471c
PB
6249 break;
6250 case 1:
6251 if (op == 4)
ca9a32e4 6252 mask = 0xffff >> -shift;
ad69471c 6253 else
ca9a32e4
JR
6254 mask = (uint16_t)(0xffff << shift);
6255 mask |= mask << 16;
ad69471c
PB
6256 break;
6257 case 2:
ca9a32e4
JR
6258 if (shift < -31 || shift > 31) {
6259 mask = 0;
6260 } else {
6261 if (op == 4)
6262 mask = 0xffffffffu >> -shift;
6263 else
6264 mask = 0xffffffffu << shift;
6265 }
ad69471c
PB
6266 break;
6267 default:
6268 abort();
6269 }
dd8fbd78 6270 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6271 tcg_gen_andi_i32(tmp, tmp, mask);
6272 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6273 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6274 tcg_temp_free_i32(tmp2);
ad69471c 6275 }
dd8fbd78 6276 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6277 }
6278 } /* for pass */
6279 } else if (op < 10) {
ad69471c 6280 /* Shift by immediate and narrow:
9ee6e8bb 6281 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6282 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6283 if (rm & 1) {
6284 return 1;
6285 }
9ee6e8bb
PB
6286 shift = shift - (1 << (size + 3));
6287 size++;
92cdfaeb 6288 if (size == 3) {
a7812ae4 6289 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6290 neon_load_reg64(cpu_V0, rm);
6291 neon_load_reg64(cpu_V1, rm + 1);
6292 for (pass = 0; pass < 2; pass++) {
6293 TCGv_i64 in;
6294 if (pass == 0) {
6295 in = cpu_V0;
6296 } else {
6297 in = cpu_V1;
6298 }
ad69471c 6299 if (q) {
0b36f4cd 6300 if (input_unsigned) {
92cdfaeb 6301 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6302 } else {
92cdfaeb 6303 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6304 }
ad69471c 6305 } else {
0b36f4cd 6306 if (input_unsigned) {
92cdfaeb 6307 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6308 } else {
92cdfaeb 6309 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6310 }
ad69471c 6311 }
7d1b0095 6312 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6313 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6314 neon_store_reg(rd, pass, tmp);
6315 } /* for pass */
6316 tcg_temp_free_i64(tmp64);
6317 } else {
6318 if (size == 1) {
6319 imm = (uint16_t)shift;
6320 imm |= imm << 16;
2c0262af 6321 } else {
92cdfaeb
PM
6322 /* size == 2 */
6323 imm = (uint32_t)shift;
6324 }
6325 tmp2 = tcg_const_i32(imm);
6326 tmp4 = neon_load_reg(rm + 1, 0);
6327 tmp5 = neon_load_reg(rm + 1, 1);
6328 for (pass = 0; pass < 2; pass++) {
6329 if (pass == 0) {
6330 tmp = neon_load_reg(rm, 0);
6331 } else {
6332 tmp = tmp4;
6333 }
0b36f4cd
CL
6334 gen_neon_shift_narrow(size, tmp, tmp2, q,
6335 input_unsigned);
92cdfaeb
PM
6336 if (pass == 0) {
6337 tmp3 = neon_load_reg(rm, 1);
6338 } else {
6339 tmp3 = tmp5;
6340 }
0b36f4cd
CL
6341 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6342 input_unsigned);
36aa55dc 6343 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6344 tcg_temp_free_i32(tmp);
6345 tcg_temp_free_i32(tmp3);
6346 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6347 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6348 neon_store_reg(rd, pass, tmp);
6349 } /* for pass */
c6067f04 6350 tcg_temp_free_i32(tmp2);
b75263d6 6351 }
9ee6e8bb 6352 } else if (op == 10) {
cc13115b
PM
6353 /* VSHLL, VMOVL */
6354 if (q || (rd & 1)) {
9ee6e8bb 6355 return 1;
cc13115b 6356 }
ad69471c
PB
6357 tmp = neon_load_reg(rm, 0);
6358 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6359 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6360 if (pass == 1)
6361 tmp = tmp2;
6362
6363 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6364
9ee6e8bb
PB
6365 if (shift != 0) {
6366 /* The shift is less than the width of the source
ad69471c
PB
6367 type, so we can just shift the whole register. */
6368 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6369 /* Widen the result of shift: we need to clear
6370 * the potential overflow bits resulting from
6371 * left bits of the narrow input appearing as
6372 * right bits of left the neighbour narrow
6373 * input. */
ad69471c
PB
6374 if (size < 2 || !u) {
6375 uint64_t imm64;
6376 if (size == 0) {
6377 imm = (0xffu >> (8 - shift));
6378 imm |= imm << 16;
acdf01ef 6379 } else if (size == 1) {
ad69471c 6380 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6381 } else {
6382 /* size == 2 */
6383 imm = 0xffffffff >> (32 - shift);
6384 }
6385 if (size < 2) {
6386 imm64 = imm | (((uint64_t)imm) << 32);
6387 } else {
6388 imm64 = imm;
9ee6e8bb 6389 }
acdf01ef 6390 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6391 }
6392 }
ad69471c 6393 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6394 }
f73534a5 6395 } else if (op >= 14) {
9ee6e8bb 6396 /* VCVT fixed-point. */
cc13115b
PM
6397 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6398 return 1;
6399 }
f73534a5
PM
6400 /* We have already masked out the must-be-1 top bit of imm6,
6401 * hence this 32-shift where the ARM ARM has 64-imm6.
6402 */
6403 shift = 32 - shift;
9ee6e8bb 6404 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6405 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6406 if (!(op & 1)) {
9ee6e8bb 6407 if (u)
5500b06c 6408 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6409 else
5500b06c 6410 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6411 } else {
6412 if (u)
5500b06c 6413 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6414 else
5500b06c 6415 gen_vfp_tosl(0, shift, 1);
2c0262af 6416 }
4373f3ce 6417 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6418 }
6419 } else {
9ee6e8bb
PB
6420 return 1;
6421 }
6422 } else { /* (insn & 0x00380080) == 0 */
6423 int invert;
7d80fee5
PM
6424 if (q && (rd & 1)) {
6425 return 1;
6426 }
9ee6e8bb
PB
6427
6428 op = (insn >> 8) & 0xf;
6429 /* One register and immediate. */
6430 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6431 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6432 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6433 * We choose to not special-case this and will behave as if a
6434 * valid constant encoding of 0 had been given.
6435 */
9ee6e8bb
PB
6436 switch (op) {
6437 case 0: case 1:
6438 /* no-op */
6439 break;
6440 case 2: case 3:
6441 imm <<= 8;
6442 break;
6443 case 4: case 5:
6444 imm <<= 16;
6445 break;
6446 case 6: case 7:
6447 imm <<= 24;
6448 break;
6449 case 8: case 9:
6450 imm |= imm << 16;
6451 break;
6452 case 10: case 11:
6453 imm = (imm << 8) | (imm << 24);
6454 break;
6455 case 12:
8e31209e 6456 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6457 break;
6458 case 13:
6459 imm = (imm << 16) | 0xffff;
6460 break;
6461 case 14:
6462 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6463 if (invert)
6464 imm = ~imm;
6465 break;
6466 case 15:
7d80fee5
PM
6467 if (invert) {
6468 return 1;
6469 }
9ee6e8bb
PB
6470 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6471 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6472 break;
6473 }
6474 if (invert)
6475 imm = ~imm;
6476
9ee6e8bb
PB
6477 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6478 if (op & 1 && op < 12) {
ad69471c 6479 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6480 if (invert) {
6481 /* The immediate value has already been inverted, so
6482 BIC becomes AND. */
ad69471c 6483 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6484 } else {
ad69471c 6485 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6486 }
9ee6e8bb 6487 } else {
ad69471c 6488 /* VMOV, VMVN. */
7d1b0095 6489 tmp = tcg_temp_new_i32();
9ee6e8bb 6490 if (op == 14 && invert) {
a5a14945 6491 int n;
ad69471c
PB
6492 uint32_t val;
6493 val = 0;
9ee6e8bb
PB
6494 for (n = 0; n < 4; n++) {
6495 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6496 val |= 0xff << (n * 8);
9ee6e8bb 6497 }
ad69471c
PB
6498 tcg_gen_movi_i32(tmp, val);
6499 } else {
6500 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6501 }
9ee6e8bb 6502 }
ad69471c 6503 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6504 }
6505 }
e4b3861d 6506 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6507 if (size != 3) {
6508 op = (insn >> 8) & 0xf;
6509 if ((insn & (1 << 6)) == 0) {
6510 /* Three registers of different lengths. */
6511 int src1_wide;
6512 int src2_wide;
6513 int prewiden;
526d0096
PM
6514 /* undefreq: bit 0 : UNDEF if size == 0
6515 * bit 1 : UNDEF if size == 1
6516 * bit 2 : UNDEF if size == 2
6517 * bit 3 : UNDEF if U == 1
6518 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6519 */
6520 int undefreq;
6521 /* prewiden, src1_wide, src2_wide, undefreq */
6522 static const int neon_3reg_wide[16][4] = {
6523 {1, 0, 0, 0}, /* VADDL */
6524 {1, 1, 0, 0}, /* VADDW */
6525 {1, 0, 0, 0}, /* VSUBL */
6526 {1, 1, 0, 0}, /* VSUBW */
6527 {0, 1, 1, 0}, /* VADDHN */
6528 {0, 0, 0, 0}, /* VABAL */
6529 {0, 1, 1, 0}, /* VSUBHN */
6530 {0, 0, 0, 0}, /* VABDL */
6531 {0, 0, 0, 0}, /* VMLAL */
526d0096 6532 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6533 {0, 0, 0, 0}, /* VMLSL */
526d0096 6534 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6535 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6536 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6537 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6538 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6539 };
6540
6541 prewiden = neon_3reg_wide[op][0];
6542 src1_wide = neon_3reg_wide[op][1];
6543 src2_wide = neon_3reg_wide[op][2];
695272dc 6544 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6545
526d0096
PM
6546 if ((undefreq & (1 << size)) ||
6547 ((undefreq & 8) && u)) {
695272dc
PM
6548 return 1;
6549 }
6550 if ((src1_wide && (rn & 1)) ||
6551 (src2_wide && (rm & 1)) ||
6552 (!src2_wide && (rd & 1))) {
ad69471c 6553 return 1;
695272dc 6554 }
ad69471c 6555
4e624eda
PM
6556 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6557 * outside the loop below as it only performs a single pass.
6558 */
6559 if (op == 14 && size == 2) {
6560 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6561
d614a513 6562 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6563 return 1;
6564 }
6565 tcg_rn = tcg_temp_new_i64();
6566 tcg_rm = tcg_temp_new_i64();
6567 tcg_rd = tcg_temp_new_i64();
6568 neon_load_reg64(tcg_rn, rn);
6569 neon_load_reg64(tcg_rm, rm);
6570 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6571 neon_store_reg64(tcg_rd, rd);
6572 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6573 neon_store_reg64(tcg_rd, rd + 1);
6574 tcg_temp_free_i64(tcg_rn);
6575 tcg_temp_free_i64(tcg_rm);
6576 tcg_temp_free_i64(tcg_rd);
6577 return 0;
6578 }
6579
9ee6e8bb
PB
6580 /* Avoid overlapping operands. Wide source operands are
6581 always aligned so will never overlap with wide
6582 destinations in problematic ways. */
8f8e3aa4 6583 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6584 tmp = neon_load_reg(rm, 1);
6585 neon_store_scratch(2, tmp);
8f8e3aa4 6586 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6587 tmp = neon_load_reg(rn, 1);
6588 neon_store_scratch(2, tmp);
9ee6e8bb 6589 }
39d5492a 6590 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6591 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6592 if (src1_wide) {
6593 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6594 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6595 } else {
ad69471c 6596 if (pass == 1 && rd == rn) {
dd8fbd78 6597 tmp = neon_load_scratch(2);
9ee6e8bb 6598 } else {
ad69471c
PB
6599 tmp = neon_load_reg(rn, pass);
6600 }
6601 if (prewiden) {
6602 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6603 }
6604 }
ad69471c
PB
6605 if (src2_wide) {
6606 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6607 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6608 } else {
ad69471c 6609 if (pass == 1 && rd == rm) {
dd8fbd78 6610 tmp2 = neon_load_scratch(2);
9ee6e8bb 6611 } else {
ad69471c
PB
6612 tmp2 = neon_load_reg(rm, pass);
6613 }
6614 if (prewiden) {
6615 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6616 }
9ee6e8bb
PB
6617 }
6618 switch (op) {
6619 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6620 gen_neon_addl(size);
9ee6e8bb 6621 break;
79b0e534 6622 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6623 gen_neon_subl(size);
9ee6e8bb
PB
6624 break;
6625 case 5: case 7: /* VABAL, VABDL */
6626 switch ((size << 1) | u) {
ad69471c
PB
6627 case 0:
6628 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6629 break;
6630 case 1:
6631 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6632 break;
6633 case 2:
6634 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6635 break;
6636 case 3:
6637 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6638 break;
6639 case 4:
6640 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6641 break;
6642 case 5:
6643 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6644 break;
9ee6e8bb
PB
6645 default: abort();
6646 }
7d1b0095
PM
6647 tcg_temp_free_i32(tmp2);
6648 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6649 break;
6650 case 8: case 9: case 10: case 11: case 12: case 13:
6651 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6652 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6653 break;
6654 case 14: /* Polynomial VMULL */
e5ca24cb 6655 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6656 tcg_temp_free_i32(tmp2);
6657 tcg_temp_free_i32(tmp);
e5ca24cb 6658 break;
695272dc
PM
6659 default: /* 15 is RESERVED: caught earlier */
6660 abort();
9ee6e8bb 6661 }
ebcd88ce
PM
6662 if (op == 13) {
6663 /* VQDMULL */
6664 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6665 neon_store_reg64(cpu_V0, rd + pass);
6666 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6667 /* Accumulate. */
ebcd88ce 6668 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6669 switch (op) {
4dc064e6
PM
6670 case 10: /* VMLSL */
6671 gen_neon_negl(cpu_V0, size);
6672 /* Fall through */
6673 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6674 gen_neon_addl(size);
9ee6e8bb
PB
6675 break;
6676 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6677 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6678 if (op == 11) {
6679 gen_neon_negl(cpu_V0, size);
6680 }
ad69471c
PB
6681 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6682 break;
9ee6e8bb
PB
6683 default:
6684 abort();
6685 }
ad69471c 6686 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6687 } else if (op == 4 || op == 6) {
6688 /* Narrowing operation. */
7d1b0095 6689 tmp = tcg_temp_new_i32();
79b0e534 6690 if (!u) {
9ee6e8bb 6691 switch (size) {
ad69471c
PB
6692 case 0:
6693 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6694 break;
6695 case 1:
6696 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6697 break;
6698 case 2:
6699 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6700 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6701 break;
9ee6e8bb
PB
6702 default: abort();
6703 }
6704 } else {
6705 switch (size) {
ad69471c
PB
6706 case 0:
6707 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6708 break;
6709 case 1:
6710 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6711 break;
6712 case 2:
6713 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6714 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6715 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6716 break;
9ee6e8bb
PB
6717 default: abort();
6718 }
6719 }
ad69471c
PB
6720 if (pass == 0) {
6721 tmp3 = tmp;
6722 } else {
6723 neon_store_reg(rd, 0, tmp3);
6724 neon_store_reg(rd, 1, tmp);
6725 }
9ee6e8bb
PB
6726 } else {
6727 /* Write back the result. */
ad69471c 6728 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6729 }
6730 }
6731 } else {
3e3326df
PM
6732 /* Two registers and a scalar. NB that for ops of this form
6733 * the ARM ARM labels bit 24 as Q, but it is in our variable
6734 * 'u', not 'q'.
6735 */
6736 if (size == 0) {
6737 return 1;
6738 }
9ee6e8bb 6739 switch (op) {
9ee6e8bb 6740 case 1: /* Float VMLA scalar */
9ee6e8bb 6741 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6742 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6743 if (size == 1) {
6744 return 1;
6745 }
6746 /* fall through */
6747 case 0: /* Integer VMLA scalar */
6748 case 4: /* Integer VMLS scalar */
6749 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6750 case 12: /* VQDMULH scalar */
6751 case 13: /* VQRDMULH scalar */
3e3326df
PM
6752 if (u && ((rd | rn) & 1)) {
6753 return 1;
6754 }
dd8fbd78
FN
6755 tmp = neon_get_scalar(size, rm);
6756 neon_store_scratch(0, tmp);
9ee6e8bb 6757 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6758 tmp = neon_load_scratch(0);
6759 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6760 if (op == 12) {
6761 if (size == 1) {
02da0b2d 6762 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6763 } else {
02da0b2d 6764 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6765 }
6766 } else if (op == 13) {
6767 if (size == 1) {
02da0b2d 6768 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6769 } else {
02da0b2d 6770 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6771 }
6772 } else if (op & 1) {
aa47cfdd
PM
6773 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6774 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6775 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6776 } else {
6777 switch (size) {
dd8fbd78
FN
6778 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6779 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6780 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6781 default: abort();
9ee6e8bb
PB
6782 }
6783 }
7d1b0095 6784 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6785 if (op < 8) {
6786 /* Accumulate. */
dd8fbd78 6787 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6788 switch (op) {
6789 case 0:
dd8fbd78 6790 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6791 break;
6792 case 1:
aa47cfdd
PM
6793 {
6794 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6795 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6796 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6797 break;
aa47cfdd 6798 }
9ee6e8bb 6799 case 4:
dd8fbd78 6800 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6801 break;
6802 case 5:
aa47cfdd
PM
6803 {
6804 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6805 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6806 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6807 break;
aa47cfdd 6808 }
9ee6e8bb
PB
6809 default:
6810 abort();
6811 }
7d1b0095 6812 tcg_temp_free_i32(tmp2);
9ee6e8bb 6813 }
dd8fbd78 6814 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6815 }
6816 break;
9ee6e8bb 6817 case 3: /* VQDMLAL scalar */
9ee6e8bb 6818 case 7: /* VQDMLSL scalar */
9ee6e8bb 6819 case 11: /* VQDMULL scalar */
3e3326df 6820 if (u == 1) {
ad69471c 6821 return 1;
3e3326df
PM
6822 }
6823 /* fall through */
6824 case 2: /* VMLAL sclar */
6825 case 6: /* VMLSL scalar */
6826 case 10: /* VMULL scalar */
6827 if (rd & 1) {
6828 return 1;
6829 }
dd8fbd78 6830 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6831 /* We need a copy of tmp2 because gen_neon_mull
6832 * deletes it during pass 0. */
7d1b0095 6833 tmp4 = tcg_temp_new_i32();
c6067f04 6834 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6835 tmp3 = neon_load_reg(rn, 1);
ad69471c 6836
9ee6e8bb 6837 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6838 if (pass == 0) {
6839 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6840 } else {
dd8fbd78 6841 tmp = tmp3;
c6067f04 6842 tmp2 = tmp4;
9ee6e8bb 6843 }
ad69471c 6844 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6845 if (op != 11) {
6846 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6847 }
9ee6e8bb 6848 switch (op) {
4dc064e6
PM
6849 case 6:
6850 gen_neon_negl(cpu_V0, size);
6851 /* Fall through */
6852 case 2:
ad69471c 6853 gen_neon_addl(size);
9ee6e8bb
PB
6854 break;
6855 case 3: case 7:
ad69471c 6856 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6857 if (op == 7) {
6858 gen_neon_negl(cpu_V0, size);
6859 }
ad69471c 6860 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6861 break;
6862 case 10:
6863 /* no-op */
6864 break;
6865 case 11:
ad69471c 6866 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6867 break;
6868 default:
6869 abort();
6870 }
ad69471c 6871 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6872 }
dd8fbd78 6873
dd8fbd78 6874
9ee6e8bb
PB
6875 break;
6876 default: /* 14 and 15 are RESERVED */
6877 return 1;
6878 }
6879 }
6880 } else { /* size == 3 */
6881 if (!u) {
6882 /* Extract. */
9ee6e8bb 6883 imm = (insn >> 8) & 0xf;
ad69471c
PB
6884
6885 if (imm > 7 && !q)
6886 return 1;
6887
52579ea1
PM
6888 if (q && ((rd | rn | rm) & 1)) {
6889 return 1;
6890 }
6891
ad69471c
PB
6892 if (imm == 0) {
6893 neon_load_reg64(cpu_V0, rn);
6894 if (q) {
6895 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6896 }
ad69471c
PB
6897 } else if (imm == 8) {
6898 neon_load_reg64(cpu_V0, rn + 1);
6899 if (q) {
6900 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6901 }
ad69471c 6902 } else if (q) {
a7812ae4 6903 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6904 if (imm < 8) {
6905 neon_load_reg64(cpu_V0, rn);
a7812ae4 6906 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6907 } else {
6908 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6909 neon_load_reg64(tmp64, rm);
ad69471c
PB
6910 }
6911 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6912 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6913 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6914 if (imm < 8) {
6915 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6916 } else {
ad69471c
PB
6917 neon_load_reg64(cpu_V1, rm + 1);
6918 imm -= 8;
9ee6e8bb 6919 }
ad69471c 6920 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6921 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6922 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6923 tcg_temp_free_i64(tmp64);
ad69471c 6924 } else {
a7812ae4 6925 /* BUGFIX */
ad69471c 6926 neon_load_reg64(cpu_V0, rn);
a7812ae4 6927 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6928 neon_load_reg64(cpu_V1, rm);
a7812ae4 6929 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6930 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6931 }
6932 neon_store_reg64(cpu_V0, rd);
6933 if (q) {
6934 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6935 }
6936 } else if ((insn & (1 << 11)) == 0) {
6937 /* Two register misc. */
6938 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6939 size = (insn >> 18) & 3;
600b828c
PM
6940 /* UNDEF for unknown op values and bad op-size combinations */
6941 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6942 return 1;
6943 }
fe8fcf3d
PM
6944 if (neon_2rm_is_v8_op(op) &&
6945 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6946 return 1;
6947 }
fc2a9b37
PM
6948 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6949 q && ((rm | rd) & 1)) {
6950 return 1;
6951 }
9ee6e8bb 6952 switch (op) {
600b828c 6953 case NEON_2RM_VREV64:
9ee6e8bb 6954 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6955 tmp = neon_load_reg(rm, pass * 2);
6956 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6957 switch (size) {
dd8fbd78
FN
6958 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6959 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6960 case 2: /* no-op */ break;
6961 default: abort();
6962 }
dd8fbd78 6963 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6964 if (size == 2) {
dd8fbd78 6965 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6966 } else {
9ee6e8bb 6967 switch (size) {
dd8fbd78
FN
6968 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6969 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6970 default: abort();
6971 }
dd8fbd78 6972 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6973 }
6974 }
6975 break;
600b828c
PM
6976 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6977 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6978 for (pass = 0; pass < q + 1; pass++) {
6979 tmp = neon_load_reg(rm, pass * 2);
6980 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6981 tmp = neon_load_reg(rm, pass * 2 + 1);
6982 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6983 switch (size) {
6984 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6985 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6986 case 2: tcg_gen_add_i64(CPU_V001); break;
6987 default: abort();
6988 }
600b828c 6989 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6990 /* Accumulate. */
ad69471c
PB
6991 neon_load_reg64(cpu_V1, rd + pass);
6992 gen_neon_addl(size);
9ee6e8bb 6993 }
ad69471c 6994 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6995 }
6996 break;
600b828c 6997 case NEON_2RM_VTRN:
9ee6e8bb 6998 if (size == 2) {
a5a14945 6999 int n;
9ee6e8bb 7000 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
7001 tmp = neon_load_reg(rm, n);
7002 tmp2 = neon_load_reg(rd, n + 1);
7003 neon_store_reg(rm, n, tmp2);
7004 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
7005 }
7006 } else {
7007 goto elementwise;
7008 }
7009 break;
600b828c 7010 case NEON_2RM_VUZP:
02acedf9 7011 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 7012 return 1;
9ee6e8bb
PB
7013 }
7014 break;
600b828c 7015 case NEON_2RM_VZIP:
d68a6f3a 7016 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 7017 return 1;
9ee6e8bb
PB
7018 }
7019 break;
600b828c
PM
7020 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
7021 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
7022 if (rm & 1) {
7023 return 1;
7024 }
39d5492a 7025 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 7026 for (pass = 0; pass < 2; pass++) {
ad69471c 7027 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 7028 tmp = tcg_temp_new_i32();
600b828c
PM
7029 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
7030 tmp, cpu_V0);
ad69471c
PB
7031 if (pass == 0) {
7032 tmp2 = tmp;
7033 } else {
7034 neon_store_reg(rd, 0, tmp2);
7035 neon_store_reg(rd, 1, tmp);
9ee6e8bb 7036 }
9ee6e8bb
PB
7037 }
7038 break;
600b828c 7039 case NEON_2RM_VSHLL:
fc2a9b37 7040 if (q || (rd & 1)) {
9ee6e8bb 7041 return 1;
600b828c 7042 }
ad69471c
PB
7043 tmp = neon_load_reg(rm, 0);
7044 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 7045 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
7046 if (pass == 1)
7047 tmp = tmp2;
7048 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 7049 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 7050 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
7051 }
7052 break;
600b828c 7053 case NEON_2RM_VCVT_F16_F32:
d614a513 7054 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7055 q || (rm & 1)) {
7056 return 1;
7057 }
7d1b0095
PM
7058 tmp = tcg_temp_new_i32();
7059 tmp2 = tcg_temp_new_i32();
60011498 7060 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 7061 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 7062 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 7063 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7064 tcg_gen_shli_i32(tmp2, tmp2, 16);
7065 tcg_gen_or_i32(tmp2, tmp2, tmp);
7066 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 7067 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
7068 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
7069 neon_store_reg(rd, 0, tmp2);
7d1b0095 7070 tmp2 = tcg_temp_new_i32();
2d981da7 7071 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
7072 tcg_gen_shli_i32(tmp2, tmp2, 16);
7073 tcg_gen_or_i32(tmp2, tmp2, tmp);
7074 neon_store_reg(rd, 1, tmp2);
7d1b0095 7075 tcg_temp_free_i32(tmp);
60011498 7076 break;
600b828c 7077 case NEON_2RM_VCVT_F32_F16:
d614a513 7078 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
7079 q || (rd & 1)) {
7080 return 1;
7081 }
7d1b0095 7082 tmp3 = tcg_temp_new_i32();
60011498
PB
7083 tmp = neon_load_reg(rm, 0);
7084 tmp2 = neon_load_reg(rm, 1);
7085 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7086 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7087 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7088 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7089 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7090 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7091 tcg_temp_free_i32(tmp);
60011498 7092 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7093 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7094 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7095 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7096 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7097 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7098 tcg_temp_free_i32(tmp2);
7099 tcg_temp_free_i32(tmp3);
60011498 7100 break;
9d935509 7101 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7102 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7103 || ((rm | rd) & 1)) {
7104 return 1;
7105 }
7106 tmp = tcg_const_i32(rd);
7107 tmp2 = tcg_const_i32(rm);
7108
7109 /* Bit 6 is the lowest opcode bit; it distinguishes between
7110 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7111 */
7112 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7113
7114 if (op == NEON_2RM_AESE) {
7115 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7116 } else {
7117 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7118 }
7119 tcg_temp_free_i32(tmp);
7120 tcg_temp_free_i32(tmp2);
7121 tcg_temp_free_i32(tmp3);
7122 break;
f1ecb913 7123 case NEON_2RM_SHA1H:
d614a513 7124 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7125 || ((rm | rd) & 1)) {
7126 return 1;
7127 }
7128 tmp = tcg_const_i32(rd);
7129 tmp2 = tcg_const_i32(rm);
7130
7131 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7132
7133 tcg_temp_free_i32(tmp);
7134 tcg_temp_free_i32(tmp2);
7135 break;
7136 case NEON_2RM_SHA1SU1:
7137 if ((rm | rd) & 1) {
7138 return 1;
7139 }
7140 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7141 if (q) {
d614a513 7142 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7143 return 1;
7144 }
d614a513 7145 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7146 return 1;
7147 }
7148 tmp = tcg_const_i32(rd);
7149 tmp2 = tcg_const_i32(rm);
7150 if (q) {
7151 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7152 } else {
7153 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7154 }
7155 tcg_temp_free_i32(tmp);
7156 tcg_temp_free_i32(tmp2);
7157 break;
9ee6e8bb
PB
7158 default:
7159 elementwise:
7160 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7161 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7162 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7163 neon_reg_offset(rm, pass));
39d5492a 7164 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7165 } else {
dd8fbd78 7166 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7167 }
7168 switch (op) {
600b828c 7169 case NEON_2RM_VREV32:
9ee6e8bb 7170 switch (size) {
dd8fbd78
FN
7171 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7172 case 1: gen_swap_half(tmp); break;
600b828c 7173 default: abort();
9ee6e8bb
PB
7174 }
7175 break;
600b828c 7176 case NEON_2RM_VREV16:
dd8fbd78 7177 gen_rev16(tmp);
9ee6e8bb 7178 break;
600b828c 7179 case NEON_2RM_VCLS:
9ee6e8bb 7180 switch (size) {
dd8fbd78
FN
7181 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7182 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7183 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7184 default: abort();
9ee6e8bb
PB
7185 }
7186 break;
600b828c 7187 case NEON_2RM_VCLZ:
9ee6e8bb 7188 switch (size) {
dd8fbd78
FN
7189 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7190 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7191 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7192 default: abort();
9ee6e8bb
PB
7193 }
7194 break;
600b828c 7195 case NEON_2RM_VCNT:
dd8fbd78 7196 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7197 break;
600b828c 7198 case NEON_2RM_VMVN:
dd8fbd78 7199 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7200 break;
600b828c 7201 case NEON_2RM_VQABS:
9ee6e8bb 7202 switch (size) {
02da0b2d
PM
7203 case 0:
7204 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7205 break;
7206 case 1:
7207 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7208 break;
7209 case 2:
7210 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7211 break;
600b828c 7212 default: abort();
9ee6e8bb
PB
7213 }
7214 break;
600b828c 7215 case NEON_2RM_VQNEG:
9ee6e8bb 7216 switch (size) {
02da0b2d
PM
7217 case 0:
7218 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7219 break;
7220 case 1:
7221 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7222 break;
7223 case 2:
7224 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7225 break;
600b828c 7226 default: abort();
9ee6e8bb
PB
7227 }
7228 break;
600b828c 7229 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7230 tmp2 = tcg_const_i32(0);
9ee6e8bb 7231 switch(size) {
dd8fbd78
FN
7232 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7233 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7234 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7235 default: abort();
9ee6e8bb 7236 }
39d5492a 7237 tcg_temp_free_i32(tmp2);
600b828c 7238 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7239 tcg_gen_not_i32(tmp, tmp);
600b828c 7240 }
9ee6e8bb 7241 break;
600b828c 7242 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7243 tmp2 = tcg_const_i32(0);
9ee6e8bb 7244 switch(size) {
dd8fbd78
FN
7245 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7246 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7247 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7248 default: abort();
9ee6e8bb 7249 }
39d5492a 7250 tcg_temp_free_i32(tmp2);
600b828c 7251 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7252 tcg_gen_not_i32(tmp, tmp);
600b828c 7253 }
9ee6e8bb 7254 break;
600b828c 7255 case NEON_2RM_VCEQ0:
dd8fbd78 7256 tmp2 = tcg_const_i32(0);
9ee6e8bb 7257 switch(size) {
dd8fbd78
FN
7258 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7259 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7260 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7261 default: abort();
9ee6e8bb 7262 }
39d5492a 7263 tcg_temp_free_i32(tmp2);
9ee6e8bb 7264 break;
600b828c 7265 case NEON_2RM_VABS:
9ee6e8bb 7266 switch(size) {
dd8fbd78
FN
7267 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7268 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7269 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7270 default: abort();
9ee6e8bb
PB
7271 }
7272 break;
600b828c 7273 case NEON_2RM_VNEG:
dd8fbd78
FN
7274 tmp2 = tcg_const_i32(0);
7275 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7276 tcg_temp_free_i32(tmp2);
9ee6e8bb 7277 break;
600b828c 7278 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7279 {
7280 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7281 tmp2 = tcg_const_i32(0);
aa47cfdd 7282 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7283 tcg_temp_free_i32(tmp2);
aa47cfdd 7284 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7285 break;
aa47cfdd 7286 }
600b828c 7287 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7288 {
7289 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7290 tmp2 = tcg_const_i32(0);
aa47cfdd 7291 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7292 tcg_temp_free_i32(tmp2);
aa47cfdd 7293 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7294 break;
aa47cfdd 7295 }
600b828c 7296 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7297 {
7298 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7299 tmp2 = tcg_const_i32(0);
aa47cfdd 7300 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7301 tcg_temp_free_i32(tmp2);
aa47cfdd 7302 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7303 break;
aa47cfdd 7304 }
600b828c 7305 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7306 {
7307 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7308 tmp2 = tcg_const_i32(0);
aa47cfdd 7309 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7310 tcg_temp_free_i32(tmp2);
aa47cfdd 7311 tcg_temp_free_ptr(fpstatus);
0e326109 7312 break;
aa47cfdd 7313 }
600b828c 7314 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7315 {
7316 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7317 tmp2 = tcg_const_i32(0);
aa47cfdd 7318 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7319 tcg_temp_free_i32(tmp2);
aa47cfdd 7320 tcg_temp_free_ptr(fpstatus);
0e326109 7321 break;
aa47cfdd 7322 }
600b828c 7323 case NEON_2RM_VABS_F:
4373f3ce 7324 gen_vfp_abs(0);
9ee6e8bb 7325 break;
600b828c 7326 case NEON_2RM_VNEG_F:
4373f3ce 7327 gen_vfp_neg(0);
9ee6e8bb 7328 break;
600b828c 7329 case NEON_2RM_VSWP:
dd8fbd78
FN
7330 tmp2 = neon_load_reg(rd, pass);
7331 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7332 break;
600b828c 7333 case NEON_2RM_VTRN:
dd8fbd78 7334 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7335 switch (size) {
dd8fbd78
FN
7336 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7337 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7338 default: abort();
9ee6e8bb 7339 }
dd8fbd78 7340 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7341 break;
34f7b0a2
WN
7342 case NEON_2RM_VRINTN:
7343 case NEON_2RM_VRINTA:
7344 case NEON_2RM_VRINTM:
7345 case NEON_2RM_VRINTP:
7346 case NEON_2RM_VRINTZ:
7347 {
7348 TCGv_i32 tcg_rmode;
7349 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7350 int rmode;
7351
7352 if (op == NEON_2RM_VRINTZ) {
7353 rmode = FPROUNDING_ZERO;
7354 } else {
7355 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7356 }
7357
7358 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7359 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7360 cpu_env);
7361 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7362 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7363 cpu_env);
7364 tcg_temp_free_ptr(fpstatus);
7365 tcg_temp_free_i32(tcg_rmode);
7366 break;
7367 }
2ce70625
WN
7368 case NEON_2RM_VRINTX:
7369 {
7370 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7371 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7372 tcg_temp_free_ptr(fpstatus);
7373 break;
7374 }
901ad525
WN
7375 case NEON_2RM_VCVTAU:
7376 case NEON_2RM_VCVTAS:
7377 case NEON_2RM_VCVTNU:
7378 case NEON_2RM_VCVTNS:
7379 case NEON_2RM_VCVTPU:
7380 case NEON_2RM_VCVTPS:
7381 case NEON_2RM_VCVTMU:
7382 case NEON_2RM_VCVTMS:
7383 {
7384 bool is_signed = !extract32(insn, 7, 1);
7385 TCGv_ptr fpst = get_fpstatus_ptr(1);
7386 TCGv_i32 tcg_rmode, tcg_shift;
7387 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7388
7389 tcg_shift = tcg_const_i32(0);
7390 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7391 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7392 cpu_env);
7393
7394 if (is_signed) {
7395 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7396 tcg_shift, fpst);
7397 } else {
7398 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7399 tcg_shift, fpst);
7400 }
7401
7402 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7403 cpu_env);
7404 tcg_temp_free_i32(tcg_rmode);
7405 tcg_temp_free_i32(tcg_shift);
7406 tcg_temp_free_ptr(fpst);
7407 break;
7408 }
600b828c 7409 case NEON_2RM_VRECPE:
b6d4443a
AB
7410 {
7411 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7412 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7413 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7414 break;
b6d4443a 7415 }
600b828c 7416 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7417 {
7418 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7419 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7420 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7421 break;
c2fb418e 7422 }
600b828c 7423 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7424 {
7425 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7426 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7427 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7428 break;
b6d4443a 7429 }
600b828c 7430 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7431 {
7432 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7433 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7434 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7435 break;
c2fb418e 7436 }
600b828c 7437 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7438 gen_vfp_sito(0, 1);
9ee6e8bb 7439 break;
600b828c 7440 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7441 gen_vfp_uito(0, 1);
9ee6e8bb 7442 break;
600b828c 7443 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7444 gen_vfp_tosiz(0, 1);
9ee6e8bb 7445 break;
600b828c 7446 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7447 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7448 break;
7449 default:
600b828c
PM
7450 /* Reserved op values were caught by the
7451 * neon_2rm_sizes[] check earlier.
7452 */
7453 abort();
9ee6e8bb 7454 }
600b828c 7455 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7456 tcg_gen_st_f32(cpu_F0s, cpu_env,
7457 neon_reg_offset(rd, pass));
9ee6e8bb 7458 } else {
dd8fbd78 7459 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7460 }
7461 }
7462 break;
7463 }
7464 } else if ((insn & (1 << 10)) == 0) {
7465 /* VTBL, VTBX. */
56907d77
PM
7466 int n = ((insn >> 8) & 3) + 1;
7467 if ((rn + n) > 32) {
7468 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7469 * helper function running off the end of the register file.
7470 */
7471 return 1;
7472 }
7473 n <<= 3;
9ee6e8bb 7474 if (insn & (1 << 6)) {
8f8e3aa4 7475 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7476 } else {
7d1b0095 7477 tmp = tcg_temp_new_i32();
8f8e3aa4 7478 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7479 }
8f8e3aa4 7480 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7481 tmp4 = tcg_const_i32(rn);
7482 tmp5 = tcg_const_i32(n);
9ef39277 7483 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7484 tcg_temp_free_i32(tmp);
9ee6e8bb 7485 if (insn & (1 << 6)) {
8f8e3aa4 7486 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7487 } else {
7d1b0095 7488 tmp = tcg_temp_new_i32();
8f8e3aa4 7489 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7490 }
8f8e3aa4 7491 tmp3 = neon_load_reg(rm, 1);
9ef39277 7492 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7493 tcg_temp_free_i32(tmp5);
7494 tcg_temp_free_i32(tmp4);
8f8e3aa4 7495 neon_store_reg(rd, 0, tmp2);
3018f259 7496 neon_store_reg(rd, 1, tmp3);
7d1b0095 7497 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7498 } else if ((insn & 0x380) == 0) {
7499 /* VDUP */
133da6aa
JR
7500 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7501 return 1;
7502 }
9ee6e8bb 7503 if (insn & (1 << 19)) {
dd8fbd78 7504 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7505 } else {
dd8fbd78 7506 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7507 }
7508 if (insn & (1 << 16)) {
dd8fbd78 7509 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7510 } else if (insn & (1 << 17)) {
7511 if ((insn >> 18) & 1)
dd8fbd78 7512 gen_neon_dup_high16(tmp);
9ee6e8bb 7513 else
dd8fbd78 7514 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7515 }
7516 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7517 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7518 tcg_gen_mov_i32(tmp2, tmp);
7519 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7520 }
7d1b0095 7521 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7522 } else {
7523 return 1;
7524 }
7525 }
7526 }
7527 return 0;
7528}
7529
7dcc1f89 7530static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7531{
4b6a83fb
PM
7532 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7533 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7534
7535 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7536
7537 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7538 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7539 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7540 return 1;
7541 }
d614a513 7542 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7543 return disas_iwmmxt_insn(s, insn);
d614a513 7544 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7545 return disas_dsp_insn(s, insn);
c0f4af17
PM
7546 }
7547 return 1;
4b6a83fb
PM
7548 }
7549
7550 /* Otherwise treat as a generic register access */
7551 is64 = (insn & (1 << 25)) == 0;
7552 if (!is64 && ((insn & (1 << 4)) == 0)) {
7553 /* cdp */
7554 return 1;
7555 }
7556
7557 crm = insn & 0xf;
7558 if (is64) {
7559 crn = 0;
7560 opc1 = (insn >> 4) & 0xf;
7561 opc2 = 0;
7562 rt2 = (insn >> 16) & 0xf;
7563 } else {
7564 crn = (insn >> 16) & 0xf;
7565 opc1 = (insn >> 21) & 7;
7566 opc2 = (insn >> 5) & 7;
7567 rt2 = 0;
7568 }
7569 isread = (insn >> 20) & 1;
7570 rt = (insn >> 12) & 0xf;
7571
60322b39 7572 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7573 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7574 if (ri) {
7575 /* Check access permissions */
dcbff19b 7576 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7577 return 1;
7578 }
7579
c0f4af17 7580 if (ri->accessfn ||
d614a513 7581 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7582 /* Emit code to perform further access permissions checks at
7583 * runtime; this may result in an exception.
c0f4af17
PM
7584 * Note that on XScale all cp0..c13 registers do an access check
7585 * call in order to handle c15_cpar.
f59df3f2
PM
7586 */
7587 TCGv_ptr tmpptr;
3f208fd7 7588 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7589 uint32_t syndrome;
7590
7591 /* Note that since we are an implementation which takes an
7592 * exception on a trapped conditional instruction only if the
7593 * instruction passes its condition code check, we can take
7594 * advantage of the clause in the ARM ARM that allows us to set
7595 * the COND field in the instruction to 0xE in all cases.
7596 * We could fish the actual condition out of the insn (ARM)
7597 * or the condexec bits (Thumb) but it isn't necessary.
7598 */
7599 switch (cpnum) {
7600 case 14:
7601 if (is64) {
7602 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7603 isread, false);
8bcbf37c
PM
7604 } else {
7605 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7606 rt, isread, false);
8bcbf37c
PM
7607 }
7608 break;
7609 case 15:
7610 if (is64) {
7611 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7612 isread, false);
8bcbf37c
PM
7613 } else {
7614 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7615 rt, isread, false);
8bcbf37c
PM
7616 }
7617 break;
7618 default:
7619 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7620 * so this can only happen if this is an ARMv7 or earlier CPU,
7621 * in which case the syndrome information won't actually be
7622 * guest visible.
7623 */
d614a513 7624 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7625 syndrome = syn_uncategorized();
7626 break;
7627 }
7628
43bfa4a1 7629 gen_set_condexec(s);
3977ee5d 7630 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7631 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7632 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7633 tcg_isread = tcg_const_i32(isread);
7634 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7635 tcg_isread);
f59df3f2 7636 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7637 tcg_temp_free_i32(tcg_syn);
3f208fd7 7638 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7639 }
7640
4b6a83fb
PM
7641 /* Handle special cases first */
7642 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7643 case ARM_CP_NOP:
7644 return 0;
7645 case ARM_CP_WFI:
7646 if (isread) {
7647 return 1;
7648 }
eaed129d 7649 gen_set_pc_im(s, s->pc);
4b6a83fb 7650 s->is_jmp = DISAS_WFI;
2bee5105 7651 return 0;
4b6a83fb
PM
7652 default:
7653 break;
7654 }
7655
bd79255d 7656 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7657 gen_io_start();
7658 }
7659
4b6a83fb
PM
7660 if (isread) {
7661 /* Read */
7662 if (is64) {
7663 TCGv_i64 tmp64;
7664 TCGv_i32 tmp;
7665 if (ri->type & ARM_CP_CONST) {
7666 tmp64 = tcg_const_i64(ri->resetvalue);
7667 } else if (ri->readfn) {
7668 TCGv_ptr tmpptr;
4b6a83fb
PM
7669 tmp64 = tcg_temp_new_i64();
7670 tmpptr = tcg_const_ptr(ri);
7671 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7672 tcg_temp_free_ptr(tmpptr);
7673 } else {
7674 tmp64 = tcg_temp_new_i64();
7675 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7676 }
7677 tmp = tcg_temp_new_i32();
ecc7b3aa 7678 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7679 store_reg(s, rt, tmp);
7680 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7681 tmp = tcg_temp_new_i32();
ecc7b3aa 7682 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7683 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7684 store_reg(s, rt2, tmp);
7685 } else {
39d5492a 7686 TCGv_i32 tmp;
4b6a83fb
PM
7687 if (ri->type & ARM_CP_CONST) {
7688 tmp = tcg_const_i32(ri->resetvalue);
7689 } else if (ri->readfn) {
7690 TCGv_ptr tmpptr;
4b6a83fb
PM
7691 tmp = tcg_temp_new_i32();
7692 tmpptr = tcg_const_ptr(ri);
7693 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7694 tcg_temp_free_ptr(tmpptr);
7695 } else {
7696 tmp = load_cpu_offset(ri->fieldoffset);
7697 }
7698 if (rt == 15) {
7699 /* Destination register of r15 for 32 bit loads sets
7700 * the condition codes from the high 4 bits of the value
7701 */
7702 gen_set_nzcv(tmp);
7703 tcg_temp_free_i32(tmp);
7704 } else {
7705 store_reg(s, rt, tmp);
7706 }
7707 }
7708 } else {
7709 /* Write */
7710 if (ri->type & ARM_CP_CONST) {
7711 /* If not forbidden by access permissions, treat as WI */
7712 return 0;
7713 }
7714
7715 if (is64) {
39d5492a 7716 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7717 TCGv_i64 tmp64 = tcg_temp_new_i64();
7718 tmplo = load_reg(s, rt);
7719 tmphi = load_reg(s, rt2);
7720 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7721 tcg_temp_free_i32(tmplo);
7722 tcg_temp_free_i32(tmphi);
7723 if (ri->writefn) {
7724 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7725 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7726 tcg_temp_free_ptr(tmpptr);
7727 } else {
7728 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7729 }
7730 tcg_temp_free_i64(tmp64);
7731 } else {
7732 if (ri->writefn) {
39d5492a 7733 TCGv_i32 tmp;
4b6a83fb 7734 TCGv_ptr tmpptr;
4b6a83fb
PM
7735 tmp = load_reg(s, rt);
7736 tmpptr = tcg_const_ptr(ri);
7737 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7738 tcg_temp_free_ptr(tmpptr);
7739 tcg_temp_free_i32(tmp);
7740 } else {
39d5492a 7741 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7742 store_cpu_offset(tmp, ri->fieldoffset);
7743 }
7744 }
2452731c
PM
7745 }
7746
bd79255d 7747 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7748 /* I/O operations must end the TB here (whether read or write) */
7749 gen_io_end();
7750 gen_lookup_tb(s);
7751 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7752 /* We default to ending the TB on a coprocessor register write,
7753 * but allow this to be suppressed by the register definition
7754 * (usually only necessary to work around guest bugs).
7755 */
2452731c 7756 gen_lookup_tb(s);
4b6a83fb 7757 }
2452731c 7758
4b6a83fb
PM
7759 return 0;
7760 }
7761
626187d8
PM
7762 /* Unknown register; this might be a guest error or a QEMU
7763 * unimplemented feature.
7764 */
7765 if (is64) {
7766 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7767 "64 bit system register cp:%d opc1: %d crm:%d "
7768 "(%s)\n",
7769 isread ? "read" : "write", cpnum, opc1, crm,
7770 s->ns ? "non-secure" : "secure");
626187d8
PM
7771 } else {
7772 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7773 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7774 "(%s)\n",
7775 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7776 s->ns ? "non-secure" : "secure");
626187d8
PM
7777 }
7778
4a9a539f 7779 return 1;
9ee6e8bb
PB
7780}
7781
5e3f878a
PB
7782
7783/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7784static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7785{
39d5492a 7786 TCGv_i32 tmp;
7d1b0095 7787 tmp = tcg_temp_new_i32();
ecc7b3aa 7788 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7789 store_reg(s, rlow, tmp);
7d1b0095 7790 tmp = tcg_temp_new_i32();
5e3f878a 7791 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7792 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7793 store_reg(s, rhigh, tmp);
7794}
7795
7796/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7797static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7798{
a7812ae4 7799 TCGv_i64 tmp;
39d5492a 7800 TCGv_i32 tmp2;
5e3f878a 7801
36aa55dc 7802 /* Load value and extend to 64 bits. */
a7812ae4 7803 tmp = tcg_temp_new_i64();
5e3f878a
PB
7804 tmp2 = load_reg(s, rlow);
7805 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7806 tcg_temp_free_i32(tmp2);
5e3f878a 7807 tcg_gen_add_i64(val, val, tmp);
b75263d6 7808 tcg_temp_free_i64(tmp);
5e3f878a
PB
7809}
7810
7811/* load and add a 64-bit value from a register pair. */
a7812ae4 7812static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7813{
a7812ae4 7814 TCGv_i64 tmp;
39d5492a
PM
7815 TCGv_i32 tmpl;
7816 TCGv_i32 tmph;
5e3f878a
PB
7817
7818 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7819 tmpl = load_reg(s, rlow);
7820 tmph = load_reg(s, rhigh);
a7812ae4 7821 tmp = tcg_temp_new_i64();
36aa55dc 7822 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7823 tcg_temp_free_i32(tmpl);
7824 tcg_temp_free_i32(tmph);
5e3f878a 7825 tcg_gen_add_i64(val, val, tmp);
b75263d6 7826 tcg_temp_free_i64(tmp);
5e3f878a
PB
7827}
7828
c9f10124 7829/* Set N and Z flags from hi|lo. */
39d5492a 7830static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7831{
c9f10124
RH
7832 tcg_gen_mov_i32(cpu_NF, hi);
7833 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7834}
7835
426f5abc
PB
7836/* Load/Store exclusive instructions are implemented by remembering
7837 the value/address loaded, and seeing if these are the same
354161b3 7838 when the store is performed. This should be sufficient to implement
426f5abc 7839 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7840 regular stores. The compare vs the remembered value is done during
7841 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7842static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7843 TCGv_i32 addr, int size)
426f5abc 7844{
94ee24e7 7845 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7846 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7847
50225ad0
PM
7848 s->is_ldex = true;
7849
426f5abc 7850 if (size == 3) {
39d5492a 7851 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7852 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7853
354161b3
EC
7854 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7855 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7856 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7857 tcg_temp_free_i64(t64);
7858
7859 store_reg(s, rt2, tmp2);
03d05e2d 7860 } else {
354161b3 7861 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7862 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7863 }
03d05e2d
PM
7864
7865 store_reg(s, rt, tmp);
7866 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7867}
7868
7869static void gen_clrex(DisasContext *s)
7870{
03d05e2d 7871 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7872}
7873
426f5abc 7874static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7875 TCGv_i32 addr, int size)
426f5abc 7876{
354161b3
EC
7877 TCGv_i32 t0, t1, t2;
7878 TCGv_i64 extaddr;
7879 TCGv taddr;
42a268c2
RH
7880 TCGLabel *done_label;
7881 TCGLabel *fail_label;
354161b3 7882 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7883
7884 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7885 [addr] = {Rt};
7886 {Rd} = 0;
7887 } else {
7888 {Rd} = 1;
7889 } */
7890 fail_label = gen_new_label();
7891 done_label = gen_new_label();
03d05e2d
PM
7892 extaddr = tcg_temp_new_i64();
7893 tcg_gen_extu_i32_i64(extaddr, addr);
7894 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7895 tcg_temp_free_i64(extaddr);
7896
354161b3
EC
7897 taddr = gen_aa32_addr(s, addr, opc);
7898 t0 = tcg_temp_new_i32();
7899 t1 = load_reg(s, rt);
426f5abc 7900 if (size == 3) {
354161b3
EC
7901 TCGv_i64 o64 = tcg_temp_new_i64();
7902 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7903
354161b3
EC
7904 t2 = load_reg(s, rt2);
7905 tcg_gen_concat_i32_i64(n64, t1, t2);
7906 tcg_temp_free_i32(t2);
7907 gen_aa32_frob64(s, n64);
03d05e2d 7908
354161b3
EC
7909 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7910 get_mem_index(s), opc);
7911 tcg_temp_free_i64(n64);
7912
7913 gen_aa32_frob64(s, o64);
7914 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7915 tcg_gen_extrl_i64_i32(t0, o64);
7916
7917 tcg_temp_free_i64(o64);
7918 } else {
7919 t2 = tcg_temp_new_i32();
7920 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7921 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7922 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7923 tcg_temp_free_i32(t2);
426f5abc 7924 }
354161b3
EC
7925 tcg_temp_free_i32(t1);
7926 tcg_temp_free(taddr);
7927 tcg_gen_mov_i32(cpu_R[rd], t0);
7928 tcg_temp_free_i32(t0);
426f5abc 7929 tcg_gen_br(done_label);
354161b3 7930
426f5abc
PB
7931 gen_set_label(fail_label);
7932 tcg_gen_movi_i32(cpu_R[rd], 1);
7933 gen_set_label(done_label);
03d05e2d 7934 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7935}
426f5abc 7936
81465888
PM
7937/* gen_srs:
7938 * @env: CPUARMState
7939 * @s: DisasContext
7940 * @mode: mode field from insn (which stack to store to)
7941 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7942 * @writeback: true if writeback bit set
7943 *
7944 * Generate code for the SRS (Store Return State) insn.
7945 */
7946static void gen_srs(DisasContext *s,
7947 uint32_t mode, uint32_t amode, bool writeback)
7948{
7949 int32_t offset;
cbc0326b
PM
7950 TCGv_i32 addr, tmp;
7951 bool undef = false;
7952
7953 /* SRS is:
7954 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7955 * and specified mode is monitor mode
cbc0326b
PM
7956 * - UNDEFINED in Hyp mode
7957 * - UNPREDICTABLE in User or System mode
7958 * - UNPREDICTABLE if the specified mode is:
7959 * -- not implemented
7960 * -- not a valid mode number
7961 * -- a mode that's at a higher exception level
7962 * -- Monitor, if we are Non-secure
f01377f5 7963 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7964 */
ba63cf47 7965 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7966 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7967 return;
7968 }
7969
7970 if (s->current_el == 0 || s->current_el == 2) {
7971 undef = true;
7972 }
7973
7974 switch (mode) {
7975 case ARM_CPU_MODE_USR:
7976 case ARM_CPU_MODE_FIQ:
7977 case ARM_CPU_MODE_IRQ:
7978 case ARM_CPU_MODE_SVC:
7979 case ARM_CPU_MODE_ABT:
7980 case ARM_CPU_MODE_UND:
7981 case ARM_CPU_MODE_SYS:
7982 break;
7983 case ARM_CPU_MODE_HYP:
7984 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7985 undef = true;
7986 }
7987 break;
7988 case ARM_CPU_MODE_MON:
7989 /* No need to check specifically for "are we non-secure" because
7990 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7991 * so if this isn't EL3 then we must be non-secure.
7992 */
7993 if (s->current_el != 3) {
7994 undef = true;
7995 }
7996 break;
7997 default:
7998 undef = true;
7999 }
8000
8001 if (undef) {
8002 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
8003 default_exception_el(s));
8004 return;
8005 }
8006
8007 addr = tcg_temp_new_i32();
8008 tmp = tcg_const_i32(mode);
f01377f5
PM
8009 /* get_r13_banked() will raise an exception if called from System mode */
8010 gen_set_condexec(s);
8011 gen_set_pc_im(s, s->pc - 4);
81465888
PM
8012 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8013 tcg_temp_free_i32(tmp);
8014 switch (amode) {
8015 case 0: /* DA */
8016 offset = -4;
8017 break;
8018 case 1: /* IA */
8019 offset = 0;
8020 break;
8021 case 2: /* DB */
8022 offset = -8;
8023 break;
8024 case 3: /* IB */
8025 offset = 4;
8026 break;
8027 default:
8028 abort();
8029 }
8030 tcg_gen_addi_i32(addr, addr, offset);
8031 tmp = load_reg(s, 14);
12dcc321 8032 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8033 tcg_temp_free_i32(tmp);
81465888
PM
8034 tmp = load_cpu_field(spsr);
8035 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 8036 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8037 tcg_temp_free_i32(tmp);
81465888
PM
8038 if (writeback) {
8039 switch (amode) {
8040 case 0:
8041 offset = -8;
8042 break;
8043 case 1:
8044 offset = 4;
8045 break;
8046 case 2:
8047 offset = -4;
8048 break;
8049 case 3:
8050 offset = 0;
8051 break;
8052 default:
8053 abort();
8054 }
8055 tcg_gen_addi_i32(addr, addr, offset);
8056 tmp = tcg_const_i32(mode);
8057 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8058 tcg_temp_free_i32(tmp);
8059 }
8060 tcg_temp_free_i32(addr);
f01377f5 8061 s->is_jmp = DISAS_UPDATE;
81465888
PM
8062}
8063
f4df2210 8064static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 8065{
f4df2210 8066 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
8067 TCGv_i32 tmp;
8068 TCGv_i32 tmp2;
8069 TCGv_i32 tmp3;
8070 TCGv_i32 addr;
a7812ae4 8071 TCGv_i64 tmp64;
9ee6e8bb 8072
e13886e3
PM
8073 /* M variants do not implement ARM mode; this must raise the INVSTATE
8074 * UsageFault exception.
8075 */
b53d8923 8076 if (arm_dc_feature(s, ARM_FEATURE_M)) {
e13886e3
PM
8077 gen_exception_insn(s, 4, EXCP_INVSTATE, syn_uncategorized(),
8078 default_exception_el(s));
8079 return;
b53d8923 8080 }
9ee6e8bb
PB
8081 cond = insn >> 28;
8082 if (cond == 0xf){
be5e7a76
DES
8083 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8084 * choose to UNDEF. In ARMv5 and above the space is used
8085 * for miscellaneous unconditional instructions.
8086 */
8087 ARCH(5);
8088
9ee6e8bb
PB
8089 /* Unconditional instructions. */
8090 if (((insn >> 25) & 7) == 1) {
8091 /* NEON Data processing. */
d614a513 8092 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8093 goto illegal_op;
d614a513 8094 }
9ee6e8bb 8095
7dcc1f89 8096 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8097 goto illegal_op;
7dcc1f89 8098 }
9ee6e8bb
PB
8099 return;
8100 }
8101 if ((insn & 0x0f100000) == 0x04000000) {
8102 /* NEON load/store. */
d614a513 8103 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8104 goto illegal_op;
d614a513 8105 }
9ee6e8bb 8106
7dcc1f89 8107 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8108 goto illegal_op;
7dcc1f89 8109 }
9ee6e8bb
PB
8110 return;
8111 }
6a57f3eb
WN
8112 if ((insn & 0x0f000e10) == 0x0e000a00) {
8113 /* VFP. */
7dcc1f89 8114 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8115 goto illegal_op;
8116 }
8117 return;
8118 }
3d185e5d
PM
8119 if (((insn & 0x0f30f000) == 0x0510f000) ||
8120 ((insn & 0x0f30f010) == 0x0710f000)) {
8121 if ((insn & (1 << 22)) == 0) {
8122 /* PLDW; v7MP */
d614a513 8123 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8124 goto illegal_op;
8125 }
8126 }
8127 /* Otherwise PLD; v5TE+ */
be5e7a76 8128 ARCH(5TE);
3d185e5d
PM
8129 return;
8130 }
8131 if (((insn & 0x0f70f000) == 0x0450f000) ||
8132 ((insn & 0x0f70f010) == 0x0650f000)) {
8133 ARCH(7);
8134 return; /* PLI; V7 */
8135 }
8136 if (((insn & 0x0f700000) == 0x04100000) ||
8137 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8138 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8139 goto illegal_op;
8140 }
8141 return; /* v7MP: Unallocated memory hint: must NOP */
8142 }
8143
8144 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8145 ARCH(6);
8146 /* setend */
9886ecdf
PB
8147 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8148 gen_helper_setend(cpu_env);
8149 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8150 }
8151 return;
8152 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8153 switch ((insn >> 4) & 0xf) {
8154 case 1: /* clrex */
8155 ARCH(6K);
426f5abc 8156 gen_clrex(s);
9ee6e8bb
PB
8157 return;
8158 case 4: /* dsb */
8159 case 5: /* dmb */
9ee6e8bb 8160 ARCH(7);
61e4c432 8161 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8162 return;
6df99dec
SS
8163 case 6: /* isb */
8164 /* We need to break the TB after this insn to execute
8165 * self-modifying code correctly and also to take
8166 * any pending interrupts immediately.
8167 */
8168 gen_lookup_tb(s);
8169 return;
9ee6e8bb
PB
8170 default:
8171 goto illegal_op;
8172 }
8173 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8174 /* srs */
81465888
PM
8175 ARCH(6);
8176 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8177 return;
ea825eee 8178 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8179 /* rfe */
c67b6b71 8180 int32_t offset;
9ee6e8bb
PB
8181 if (IS_USER(s))
8182 goto illegal_op;
8183 ARCH(6);
8184 rn = (insn >> 16) & 0xf;
b0109805 8185 addr = load_reg(s, rn);
9ee6e8bb
PB
8186 i = (insn >> 23) & 3;
8187 switch (i) {
b0109805 8188 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8189 case 1: offset = 0; break; /* IA */
8190 case 2: offset = -8; break; /* DB */
b0109805 8191 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8192 default: abort();
8193 }
8194 if (offset)
b0109805
PB
8195 tcg_gen_addi_i32(addr, addr, offset);
8196 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8197 tmp = tcg_temp_new_i32();
12dcc321 8198 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8199 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8200 tmp2 = tcg_temp_new_i32();
12dcc321 8201 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8202 if (insn & (1 << 21)) {
8203 /* Base writeback. */
8204 switch (i) {
b0109805 8205 case 0: offset = -8; break;
c67b6b71
FN
8206 case 1: offset = 4; break;
8207 case 2: offset = -4; break;
b0109805 8208 case 3: offset = 0; break;
9ee6e8bb
PB
8209 default: abort();
8210 }
8211 if (offset)
b0109805
PB
8212 tcg_gen_addi_i32(addr, addr, offset);
8213 store_reg(s, rn, addr);
8214 } else {
7d1b0095 8215 tcg_temp_free_i32(addr);
9ee6e8bb 8216 }
b0109805 8217 gen_rfe(s, tmp, tmp2);
c67b6b71 8218 return;
9ee6e8bb
PB
8219 } else if ((insn & 0x0e000000) == 0x0a000000) {
8220 /* branch link and change to thumb (blx <offset>) */
8221 int32_t offset;
8222
8223 val = (uint32_t)s->pc;
7d1b0095 8224 tmp = tcg_temp_new_i32();
d9ba4830
PB
8225 tcg_gen_movi_i32(tmp, val);
8226 store_reg(s, 14, tmp);
9ee6e8bb
PB
8227 /* Sign-extend the 24-bit offset */
8228 offset = (((int32_t)insn) << 8) >> 8;
8229 /* offset * 4 + bit24 * 2 + (thumb bit) */
8230 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8231 /* pipeline offset */
8232 val += 4;
be5e7a76 8233 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8234 gen_bx_im(s, val);
9ee6e8bb
PB
8235 return;
8236 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8237 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8238 /* iWMMXt register transfer. */
c0f4af17 8239 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8240 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8241 return;
c0f4af17
PM
8242 }
8243 }
9ee6e8bb
PB
8244 }
8245 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8246 /* Coprocessor double register transfer. */
be5e7a76 8247 ARCH(5TE);
9ee6e8bb
PB
8248 } else if ((insn & 0x0f000010) == 0x0e000010) {
8249 /* Additional coprocessor register transfer. */
7997d92f 8250 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8251 uint32_t mask;
8252 uint32_t val;
8253 /* cps (privileged) */
8254 if (IS_USER(s))
8255 return;
8256 mask = val = 0;
8257 if (insn & (1 << 19)) {
8258 if (insn & (1 << 8))
8259 mask |= CPSR_A;
8260 if (insn & (1 << 7))
8261 mask |= CPSR_I;
8262 if (insn & (1 << 6))
8263 mask |= CPSR_F;
8264 if (insn & (1 << 18))
8265 val |= mask;
8266 }
7997d92f 8267 if (insn & (1 << 17)) {
9ee6e8bb
PB
8268 mask |= CPSR_M;
8269 val |= (insn & 0x1f);
8270 }
8271 if (mask) {
2fbac54b 8272 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8273 }
8274 return;
8275 }
8276 goto illegal_op;
8277 }
8278 if (cond != 0xe) {
8279 /* if not always execute, we generate a conditional jump to
8280 next instruction */
8281 s->condlabel = gen_new_label();
39fb730a 8282 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8283 s->condjmp = 1;
8284 }
8285 if ((insn & 0x0f900000) == 0x03000000) {
8286 if ((insn & (1 << 21)) == 0) {
8287 ARCH(6T2);
8288 rd = (insn >> 12) & 0xf;
8289 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8290 if ((insn & (1 << 22)) == 0) {
8291 /* MOVW */
7d1b0095 8292 tmp = tcg_temp_new_i32();
5e3f878a 8293 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8294 } else {
8295 /* MOVT */
5e3f878a 8296 tmp = load_reg(s, rd);
86831435 8297 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8298 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8299 }
5e3f878a 8300 store_reg(s, rd, tmp);
9ee6e8bb
PB
8301 } else {
8302 if (((insn >> 12) & 0xf) != 0xf)
8303 goto illegal_op;
8304 if (((insn >> 16) & 0xf) == 0) {
8305 gen_nop_hint(s, insn & 0xff);
8306 } else {
8307 /* CPSR = immediate */
8308 val = insn & 0xff;
8309 shift = ((insn >> 8) & 0xf) * 2;
8310 if (shift)
8311 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8312 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8313 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8314 i, val)) {
9ee6e8bb 8315 goto illegal_op;
7dcc1f89 8316 }
9ee6e8bb
PB
8317 }
8318 }
8319 } else if ((insn & 0x0f900000) == 0x01000000
8320 && (insn & 0x00000090) != 0x00000090) {
8321 /* miscellaneous instructions */
8322 op1 = (insn >> 21) & 3;
8323 sh = (insn >> 4) & 0xf;
8324 rm = insn & 0xf;
8325 switch (sh) {
8bfd0550
PM
8326 case 0x0: /* MSR, MRS */
8327 if (insn & (1 << 9)) {
8328 /* MSR (banked) and MRS (banked) */
8329 int sysm = extract32(insn, 16, 4) |
8330 (extract32(insn, 8, 1) << 4);
8331 int r = extract32(insn, 22, 1);
8332
8333 if (op1 & 1) {
8334 /* MSR (banked) */
8335 gen_msr_banked(s, r, sysm, rm);
8336 } else {
8337 /* MRS (banked) */
8338 int rd = extract32(insn, 12, 4);
8339
8340 gen_mrs_banked(s, r, sysm, rd);
8341 }
8342 break;
8343 }
8344
8345 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8346 if (op1 & 1) {
8347 /* PSR = reg */
2fbac54b 8348 tmp = load_reg(s, rm);
9ee6e8bb 8349 i = ((op1 & 2) != 0);
7dcc1f89 8350 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8351 goto illegal_op;
8352 } else {
8353 /* reg = PSR */
8354 rd = (insn >> 12) & 0xf;
8355 if (op1 & 2) {
8356 if (IS_USER(s))
8357 goto illegal_op;
d9ba4830 8358 tmp = load_cpu_field(spsr);
9ee6e8bb 8359 } else {
7d1b0095 8360 tmp = tcg_temp_new_i32();
9ef39277 8361 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8362 }
d9ba4830 8363 store_reg(s, rd, tmp);
9ee6e8bb
PB
8364 }
8365 break;
8366 case 0x1:
8367 if (op1 == 1) {
8368 /* branch/exchange thumb (bx). */
be5e7a76 8369 ARCH(4T);
d9ba4830
PB
8370 tmp = load_reg(s, rm);
8371 gen_bx(s, tmp);
9ee6e8bb
PB
8372 } else if (op1 == 3) {
8373 /* clz */
be5e7a76 8374 ARCH(5);
9ee6e8bb 8375 rd = (insn >> 12) & 0xf;
1497c961 8376 tmp = load_reg(s, rm);
7539a012 8377 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8378 store_reg(s, rd, tmp);
9ee6e8bb
PB
8379 } else {
8380 goto illegal_op;
8381 }
8382 break;
8383 case 0x2:
8384 if (op1 == 1) {
8385 ARCH(5J); /* bxj */
8386 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8387 tmp = load_reg(s, rm);
8388 gen_bx(s, tmp);
9ee6e8bb
PB
8389 } else {
8390 goto illegal_op;
8391 }
8392 break;
8393 case 0x3:
8394 if (op1 != 1)
8395 goto illegal_op;
8396
be5e7a76 8397 ARCH(5);
9ee6e8bb 8398 /* branch link/exchange thumb (blx) */
d9ba4830 8399 tmp = load_reg(s, rm);
7d1b0095 8400 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8401 tcg_gen_movi_i32(tmp2, s->pc);
8402 store_reg(s, 14, tmp2);
8403 gen_bx(s, tmp);
9ee6e8bb 8404 break;
eb0ecd5a
WN
8405 case 0x4:
8406 {
8407 /* crc32/crc32c */
8408 uint32_t c = extract32(insn, 8, 4);
8409
8410 /* Check this CPU supports ARMv8 CRC instructions.
8411 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8412 * Bits 8, 10 and 11 should be zero.
8413 */
d614a513 8414 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8415 (c & 0xd) != 0) {
8416 goto illegal_op;
8417 }
8418
8419 rn = extract32(insn, 16, 4);
8420 rd = extract32(insn, 12, 4);
8421
8422 tmp = load_reg(s, rn);
8423 tmp2 = load_reg(s, rm);
aa633469
PM
8424 if (op1 == 0) {
8425 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8426 } else if (op1 == 1) {
8427 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8428 }
eb0ecd5a
WN
8429 tmp3 = tcg_const_i32(1 << op1);
8430 if (c & 0x2) {
8431 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8432 } else {
8433 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8434 }
8435 tcg_temp_free_i32(tmp2);
8436 tcg_temp_free_i32(tmp3);
8437 store_reg(s, rd, tmp);
8438 break;
8439 }
9ee6e8bb 8440 case 0x5: /* saturating add/subtract */
be5e7a76 8441 ARCH(5TE);
9ee6e8bb
PB
8442 rd = (insn >> 12) & 0xf;
8443 rn = (insn >> 16) & 0xf;
b40d0353 8444 tmp = load_reg(s, rm);
5e3f878a 8445 tmp2 = load_reg(s, rn);
9ee6e8bb 8446 if (op1 & 2)
9ef39277 8447 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8448 if (op1 & 1)
9ef39277 8449 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8450 else
9ef39277 8451 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8452 tcg_temp_free_i32(tmp2);
5e3f878a 8453 store_reg(s, rd, tmp);
9ee6e8bb 8454 break;
49e14940 8455 case 7:
d4a2dc67
PM
8456 {
8457 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8458 switch (op1) {
19a6e31c
PM
8459 case 0:
8460 /* HLT */
8461 gen_hlt(s, imm16);
8462 break;
37e6456e
PM
8463 case 1:
8464 /* bkpt */
8465 ARCH(5);
8466 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8467 syn_aa32_bkpt(imm16, false),
8468 default_exception_el(s));
37e6456e
PM
8469 break;
8470 case 2:
8471 /* Hypervisor call (v7) */
8472 ARCH(7);
8473 if (IS_USER(s)) {
8474 goto illegal_op;
8475 }
8476 gen_hvc(s, imm16);
8477 break;
8478 case 3:
8479 /* Secure monitor call (v6+) */
8480 ARCH(6K);
8481 if (IS_USER(s)) {
8482 goto illegal_op;
8483 }
8484 gen_smc(s);
8485 break;
8486 default:
19a6e31c 8487 g_assert_not_reached();
49e14940 8488 }
9ee6e8bb 8489 break;
d4a2dc67 8490 }
9ee6e8bb
PB
8491 case 0x8: /* signed multiply */
8492 case 0xa:
8493 case 0xc:
8494 case 0xe:
be5e7a76 8495 ARCH(5TE);
9ee6e8bb
PB
8496 rs = (insn >> 8) & 0xf;
8497 rn = (insn >> 12) & 0xf;
8498 rd = (insn >> 16) & 0xf;
8499 if (op1 == 1) {
8500 /* (32 * 16) >> 16 */
5e3f878a
PB
8501 tmp = load_reg(s, rm);
8502 tmp2 = load_reg(s, rs);
9ee6e8bb 8503 if (sh & 4)
5e3f878a 8504 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8505 else
5e3f878a 8506 gen_sxth(tmp2);
a7812ae4
PB
8507 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8508 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8509 tmp = tcg_temp_new_i32();
ecc7b3aa 8510 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8511 tcg_temp_free_i64(tmp64);
9ee6e8bb 8512 if ((sh & 2) == 0) {
5e3f878a 8513 tmp2 = load_reg(s, rn);
9ef39277 8514 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8515 tcg_temp_free_i32(tmp2);
9ee6e8bb 8516 }
5e3f878a 8517 store_reg(s, rd, tmp);
9ee6e8bb
PB
8518 } else {
8519 /* 16 * 16 */
5e3f878a
PB
8520 tmp = load_reg(s, rm);
8521 tmp2 = load_reg(s, rs);
8522 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8523 tcg_temp_free_i32(tmp2);
9ee6e8bb 8524 if (op1 == 2) {
a7812ae4
PB
8525 tmp64 = tcg_temp_new_i64();
8526 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8527 tcg_temp_free_i32(tmp);
a7812ae4
PB
8528 gen_addq(s, tmp64, rn, rd);
8529 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8530 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8531 } else {
8532 if (op1 == 0) {
5e3f878a 8533 tmp2 = load_reg(s, rn);
9ef39277 8534 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8535 tcg_temp_free_i32(tmp2);
9ee6e8bb 8536 }
5e3f878a 8537 store_reg(s, rd, tmp);
9ee6e8bb
PB
8538 }
8539 }
8540 break;
8541 default:
8542 goto illegal_op;
8543 }
8544 } else if (((insn & 0x0e000000) == 0 &&
8545 (insn & 0x00000090) != 0x90) ||
8546 ((insn & 0x0e000000) == (1 << 25))) {
8547 int set_cc, logic_cc, shiftop;
8548
8549 op1 = (insn >> 21) & 0xf;
8550 set_cc = (insn >> 20) & 1;
8551 logic_cc = table_logic_cc[op1] & set_cc;
8552
8553 /* data processing instruction */
8554 if (insn & (1 << 25)) {
8555 /* immediate operand */
8556 val = insn & 0xff;
8557 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8558 if (shift) {
9ee6e8bb 8559 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8560 }
7d1b0095 8561 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8562 tcg_gen_movi_i32(tmp2, val);
8563 if (logic_cc && shift) {
8564 gen_set_CF_bit31(tmp2);
8565 }
9ee6e8bb
PB
8566 } else {
8567 /* register */
8568 rm = (insn) & 0xf;
e9bb4aa9 8569 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8570 shiftop = (insn >> 5) & 3;
8571 if (!(insn & (1 << 4))) {
8572 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8573 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8574 } else {
8575 rs = (insn >> 8) & 0xf;
8984bd2e 8576 tmp = load_reg(s, rs);
e9bb4aa9 8577 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8578 }
8579 }
8580 if (op1 != 0x0f && op1 != 0x0d) {
8581 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8582 tmp = load_reg(s, rn);
8583 } else {
39d5492a 8584 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8585 }
8586 rd = (insn >> 12) & 0xf;
8587 switch(op1) {
8588 case 0x00:
e9bb4aa9
JR
8589 tcg_gen_and_i32(tmp, tmp, tmp2);
8590 if (logic_cc) {
8591 gen_logic_CC(tmp);
8592 }
7dcc1f89 8593 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8594 break;
8595 case 0x01:
e9bb4aa9
JR
8596 tcg_gen_xor_i32(tmp, tmp, tmp2);
8597 if (logic_cc) {
8598 gen_logic_CC(tmp);
8599 }
7dcc1f89 8600 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8601 break;
8602 case 0x02:
8603 if (set_cc && rd == 15) {
8604 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8605 if (IS_USER(s)) {
9ee6e8bb 8606 goto illegal_op;
e9bb4aa9 8607 }
72485ec4 8608 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8609 gen_exception_return(s, tmp);
9ee6e8bb 8610 } else {
e9bb4aa9 8611 if (set_cc) {
72485ec4 8612 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8613 } else {
8614 tcg_gen_sub_i32(tmp, tmp, tmp2);
8615 }
7dcc1f89 8616 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8617 }
8618 break;
8619 case 0x03:
e9bb4aa9 8620 if (set_cc) {
72485ec4 8621 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8622 } else {
8623 tcg_gen_sub_i32(tmp, tmp2, tmp);
8624 }
7dcc1f89 8625 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8626 break;
8627 case 0x04:
e9bb4aa9 8628 if (set_cc) {
72485ec4 8629 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8630 } else {
8631 tcg_gen_add_i32(tmp, tmp, tmp2);
8632 }
7dcc1f89 8633 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8634 break;
8635 case 0x05:
e9bb4aa9 8636 if (set_cc) {
49b4c31e 8637 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8638 } else {
8639 gen_add_carry(tmp, tmp, tmp2);
8640 }
7dcc1f89 8641 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8642 break;
8643 case 0x06:
e9bb4aa9 8644 if (set_cc) {
2de68a49 8645 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8646 } else {
8647 gen_sub_carry(tmp, tmp, tmp2);
8648 }
7dcc1f89 8649 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8650 break;
8651 case 0x07:
e9bb4aa9 8652 if (set_cc) {
2de68a49 8653 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8654 } else {
8655 gen_sub_carry(tmp, tmp2, tmp);
8656 }
7dcc1f89 8657 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8658 break;
8659 case 0x08:
8660 if (set_cc) {
e9bb4aa9
JR
8661 tcg_gen_and_i32(tmp, tmp, tmp2);
8662 gen_logic_CC(tmp);
9ee6e8bb 8663 }
7d1b0095 8664 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8665 break;
8666 case 0x09:
8667 if (set_cc) {
e9bb4aa9
JR
8668 tcg_gen_xor_i32(tmp, tmp, tmp2);
8669 gen_logic_CC(tmp);
9ee6e8bb 8670 }
7d1b0095 8671 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8672 break;
8673 case 0x0a:
8674 if (set_cc) {
72485ec4 8675 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8676 }
7d1b0095 8677 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8678 break;
8679 case 0x0b:
8680 if (set_cc) {
72485ec4 8681 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8682 }
7d1b0095 8683 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8684 break;
8685 case 0x0c:
e9bb4aa9
JR
8686 tcg_gen_or_i32(tmp, tmp, tmp2);
8687 if (logic_cc) {
8688 gen_logic_CC(tmp);
8689 }
7dcc1f89 8690 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8691 break;
8692 case 0x0d:
8693 if (logic_cc && rd == 15) {
8694 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8695 if (IS_USER(s)) {
9ee6e8bb 8696 goto illegal_op;
e9bb4aa9
JR
8697 }
8698 gen_exception_return(s, tmp2);
9ee6e8bb 8699 } else {
e9bb4aa9
JR
8700 if (logic_cc) {
8701 gen_logic_CC(tmp2);
8702 }
7dcc1f89 8703 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8704 }
8705 break;
8706 case 0x0e:
f669df27 8707 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8708 if (logic_cc) {
8709 gen_logic_CC(tmp);
8710 }
7dcc1f89 8711 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8712 break;
8713 default:
8714 case 0x0f:
e9bb4aa9
JR
8715 tcg_gen_not_i32(tmp2, tmp2);
8716 if (logic_cc) {
8717 gen_logic_CC(tmp2);
8718 }
7dcc1f89 8719 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8720 break;
8721 }
e9bb4aa9 8722 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8723 tcg_temp_free_i32(tmp2);
e9bb4aa9 8724 }
9ee6e8bb
PB
8725 } else {
8726 /* other instructions */
8727 op1 = (insn >> 24) & 0xf;
8728 switch(op1) {
8729 case 0x0:
8730 case 0x1:
8731 /* multiplies, extra load/stores */
8732 sh = (insn >> 5) & 3;
8733 if (sh == 0) {
8734 if (op1 == 0x0) {
8735 rd = (insn >> 16) & 0xf;
8736 rn = (insn >> 12) & 0xf;
8737 rs = (insn >> 8) & 0xf;
8738 rm = (insn) & 0xf;
8739 op1 = (insn >> 20) & 0xf;
8740 switch (op1) {
8741 case 0: case 1: case 2: case 3: case 6:
8742 /* 32 bit mul */
5e3f878a
PB
8743 tmp = load_reg(s, rs);
8744 tmp2 = load_reg(s, rm);
8745 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8746 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8747 if (insn & (1 << 22)) {
8748 /* Subtract (mls) */
8749 ARCH(6T2);
5e3f878a
PB
8750 tmp2 = load_reg(s, rn);
8751 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8752 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8753 } else if (insn & (1 << 21)) {
8754 /* Add */
5e3f878a
PB
8755 tmp2 = load_reg(s, rn);
8756 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8757 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8758 }
8759 if (insn & (1 << 20))
5e3f878a
PB
8760 gen_logic_CC(tmp);
8761 store_reg(s, rd, tmp);
9ee6e8bb 8762 break;
8aac08b1
AJ
8763 case 4:
8764 /* 64 bit mul double accumulate (UMAAL) */
8765 ARCH(6);
8766 tmp = load_reg(s, rs);
8767 tmp2 = load_reg(s, rm);
8768 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8769 gen_addq_lo(s, tmp64, rn);
8770 gen_addq_lo(s, tmp64, rd);
8771 gen_storeq_reg(s, rn, rd, tmp64);
8772 tcg_temp_free_i64(tmp64);
8773 break;
8774 case 8: case 9: case 10: case 11:
8775 case 12: case 13: case 14: case 15:
8776 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8777 tmp = load_reg(s, rs);
8778 tmp2 = load_reg(s, rm);
8aac08b1 8779 if (insn & (1 << 22)) {
c9f10124 8780 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8781 } else {
c9f10124 8782 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8783 }
8784 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8785 TCGv_i32 al = load_reg(s, rn);
8786 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8787 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8788 tcg_temp_free_i32(al);
8789 tcg_temp_free_i32(ah);
9ee6e8bb 8790 }
8aac08b1 8791 if (insn & (1 << 20)) {
c9f10124 8792 gen_logicq_cc(tmp, tmp2);
8aac08b1 8793 }
c9f10124
RH
8794 store_reg(s, rn, tmp);
8795 store_reg(s, rd, tmp2);
9ee6e8bb 8796 break;
8aac08b1
AJ
8797 default:
8798 goto illegal_op;
9ee6e8bb
PB
8799 }
8800 } else {
8801 rn = (insn >> 16) & 0xf;
8802 rd = (insn >> 12) & 0xf;
8803 if (insn & (1 << 23)) {
8804 /* load/store exclusive */
2359bf80 8805 int op2 = (insn >> 8) & 3;
86753403 8806 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8807
8808 switch (op2) {
8809 case 0: /* lda/stl */
8810 if (op1 == 1) {
8811 goto illegal_op;
8812 }
8813 ARCH(8);
8814 break;
8815 case 1: /* reserved */
8816 goto illegal_op;
8817 case 2: /* ldaex/stlex */
8818 ARCH(8);
8819 break;
8820 case 3: /* ldrex/strex */
8821 if (op1) {
8822 ARCH(6K);
8823 } else {
8824 ARCH(6);
8825 }
8826 break;
8827 }
8828
3174f8e9 8829 addr = tcg_temp_local_new_i32();
98a46317 8830 load_reg_var(s, addr, rn);
2359bf80
MR
8831
8832 /* Since the emulation does not have barriers,
8833 the acquire/release semantics need no special
8834 handling */
8835 if (op2 == 0) {
8836 if (insn & (1 << 20)) {
8837 tmp = tcg_temp_new_i32();
8838 switch (op1) {
8839 case 0: /* lda */
9bb6558a
PM
8840 gen_aa32_ld32u_iss(s, tmp, addr,
8841 get_mem_index(s),
8842 rd | ISSIsAcqRel);
2359bf80
MR
8843 break;
8844 case 2: /* ldab */
9bb6558a
PM
8845 gen_aa32_ld8u_iss(s, tmp, addr,
8846 get_mem_index(s),
8847 rd | ISSIsAcqRel);
2359bf80
MR
8848 break;
8849 case 3: /* ldah */
9bb6558a
PM
8850 gen_aa32_ld16u_iss(s, tmp, addr,
8851 get_mem_index(s),
8852 rd | ISSIsAcqRel);
2359bf80
MR
8853 break;
8854 default:
8855 abort();
8856 }
8857 store_reg(s, rd, tmp);
8858 } else {
8859 rm = insn & 0xf;
8860 tmp = load_reg(s, rm);
8861 switch (op1) {
8862 case 0: /* stl */
9bb6558a
PM
8863 gen_aa32_st32_iss(s, tmp, addr,
8864 get_mem_index(s),
8865 rm | ISSIsAcqRel);
2359bf80
MR
8866 break;
8867 case 2: /* stlb */
9bb6558a
PM
8868 gen_aa32_st8_iss(s, tmp, addr,
8869 get_mem_index(s),
8870 rm | ISSIsAcqRel);
2359bf80
MR
8871 break;
8872 case 3: /* stlh */
9bb6558a
PM
8873 gen_aa32_st16_iss(s, tmp, addr,
8874 get_mem_index(s),
8875 rm | ISSIsAcqRel);
2359bf80
MR
8876 break;
8877 default:
8878 abort();
8879 }
8880 tcg_temp_free_i32(tmp);
8881 }
8882 } else if (insn & (1 << 20)) {
86753403
PB
8883 switch (op1) {
8884 case 0: /* ldrex */
426f5abc 8885 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8886 break;
8887 case 1: /* ldrexd */
426f5abc 8888 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8889 break;
8890 case 2: /* ldrexb */
426f5abc 8891 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8892 break;
8893 case 3: /* ldrexh */
426f5abc 8894 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8895 break;
8896 default:
8897 abort();
8898 }
9ee6e8bb
PB
8899 } else {
8900 rm = insn & 0xf;
86753403
PB
8901 switch (op1) {
8902 case 0: /* strex */
426f5abc 8903 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8904 break;
8905 case 1: /* strexd */
502e64fe 8906 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8907 break;
8908 case 2: /* strexb */
426f5abc 8909 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8910 break;
8911 case 3: /* strexh */
426f5abc 8912 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8913 break;
8914 default:
8915 abort();
8916 }
9ee6e8bb 8917 }
39d5492a 8918 tcg_temp_free_i32(addr);
9ee6e8bb 8919 } else {
cf12bce0
EC
8920 TCGv taddr;
8921 TCGMemOp opc = s->be_data;
8922
9ee6e8bb
PB
8923 /* SWP instruction */
8924 rm = (insn) & 0xf;
8925
9ee6e8bb 8926 if (insn & (1 << 22)) {
cf12bce0 8927 opc |= MO_UB;
9ee6e8bb 8928 } else {
cf12bce0 8929 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8930 }
cf12bce0
EC
8931
8932 addr = load_reg(s, rn);
8933 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8934 tcg_temp_free_i32(addr);
cf12bce0
EC
8935
8936 tmp = load_reg(s, rm);
8937 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8938 get_mem_index(s), opc);
8939 tcg_temp_free(taddr);
8940 store_reg(s, rd, tmp);
9ee6e8bb
PB
8941 }
8942 }
8943 } else {
8944 int address_offset;
3960c336 8945 bool load = insn & (1 << 20);
63f26fcf
PM
8946 bool wbit = insn & (1 << 21);
8947 bool pbit = insn & (1 << 24);
3960c336 8948 bool doubleword = false;
9bb6558a
PM
8949 ISSInfo issinfo;
8950
9ee6e8bb
PB
8951 /* Misc load/store */
8952 rn = (insn >> 16) & 0xf;
8953 rd = (insn >> 12) & 0xf;
3960c336 8954
9bb6558a
PM
8955 /* ISS not valid if writeback */
8956 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8957
3960c336
PM
8958 if (!load && (sh & 2)) {
8959 /* doubleword */
8960 ARCH(5TE);
8961 if (rd & 1) {
8962 /* UNPREDICTABLE; we choose to UNDEF */
8963 goto illegal_op;
8964 }
8965 load = (sh & 1) == 0;
8966 doubleword = true;
8967 }
8968
b0109805 8969 addr = load_reg(s, rn);
63f26fcf 8970 if (pbit) {
b0109805 8971 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8972 }
9ee6e8bb 8973 address_offset = 0;
3960c336
PM
8974
8975 if (doubleword) {
8976 if (!load) {
9ee6e8bb 8977 /* store */
b0109805 8978 tmp = load_reg(s, rd);
12dcc321 8979 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8980 tcg_temp_free_i32(tmp);
b0109805
PB
8981 tcg_gen_addi_i32(addr, addr, 4);
8982 tmp = load_reg(s, rd + 1);
12dcc321 8983 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8984 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8985 } else {
8986 /* load */
5a839c0d 8987 tmp = tcg_temp_new_i32();
12dcc321 8988 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8989 store_reg(s, rd, tmp);
8990 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8991 tmp = tcg_temp_new_i32();
12dcc321 8992 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8993 rd++;
9ee6e8bb
PB
8994 }
8995 address_offset = -4;
3960c336
PM
8996 } else if (load) {
8997 /* load */
8998 tmp = tcg_temp_new_i32();
8999 switch (sh) {
9000 case 1:
9bb6558a
PM
9001 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9002 issinfo);
3960c336
PM
9003 break;
9004 case 2:
9bb6558a
PM
9005 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
9006 issinfo);
3960c336
PM
9007 break;
9008 default:
9009 case 3:
9bb6558a
PM
9010 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
9011 issinfo);
3960c336
PM
9012 break;
9013 }
9ee6e8bb
PB
9014 } else {
9015 /* store */
b0109805 9016 tmp = load_reg(s, rd);
9bb6558a 9017 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 9018 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9019 }
9020 /* Perform base writeback before the loaded value to
9021 ensure correct behavior with overlapping index registers.
b6af0975 9022 ldrd with base writeback is undefined if the
9ee6e8bb 9023 destination and index registers overlap. */
63f26fcf 9024 if (!pbit) {
b0109805
PB
9025 gen_add_datah_offset(s, insn, address_offset, addr);
9026 store_reg(s, rn, addr);
63f26fcf 9027 } else if (wbit) {
9ee6e8bb 9028 if (address_offset)
b0109805
PB
9029 tcg_gen_addi_i32(addr, addr, address_offset);
9030 store_reg(s, rn, addr);
9031 } else {
7d1b0095 9032 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9033 }
9034 if (load) {
9035 /* Complete the load. */
b0109805 9036 store_reg(s, rd, tmp);
9ee6e8bb
PB
9037 }
9038 }
9039 break;
9040 case 0x4:
9041 case 0x5:
9042 goto do_ldst;
9043 case 0x6:
9044 case 0x7:
9045 if (insn & (1 << 4)) {
9046 ARCH(6);
9047 /* Armv6 Media instructions. */
9048 rm = insn & 0xf;
9049 rn = (insn >> 16) & 0xf;
2c0262af 9050 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
9051 rs = (insn >> 8) & 0xf;
9052 switch ((insn >> 23) & 3) {
9053 case 0: /* Parallel add/subtract. */
9054 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
9055 tmp = load_reg(s, rn);
9056 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9057 sh = (insn >> 5) & 7;
9058 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
9059 goto illegal_op;
6ddbc6e4 9060 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 9061 tcg_temp_free_i32(tmp2);
6ddbc6e4 9062 store_reg(s, rd, tmp);
9ee6e8bb
PB
9063 break;
9064 case 1:
9065 if ((insn & 0x00700020) == 0) {
6c95676b 9066 /* Halfword pack. */
3670669c
PB
9067 tmp = load_reg(s, rn);
9068 tmp2 = load_reg(s, rm);
9ee6e8bb 9069 shift = (insn >> 7) & 0x1f;
3670669c
PB
9070 if (insn & (1 << 6)) {
9071 /* pkhtb */
22478e79
AZ
9072 if (shift == 0)
9073 shift = 31;
9074 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 9075 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 9076 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
9077 } else {
9078 /* pkhbt */
22478e79
AZ
9079 if (shift)
9080 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 9081 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
9082 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9083 }
9084 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9085 tcg_temp_free_i32(tmp2);
3670669c 9086 store_reg(s, rd, tmp);
9ee6e8bb
PB
9087 } else if ((insn & 0x00200020) == 0x00200000) {
9088 /* [us]sat */
6ddbc6e4 9089 tmp = load_reg(s, rm);
9ee6e8bb
PB
9090 shift = (insn >> 7) & 0x1f;
9091 if (insn & (1 << 6)) {
9092 if (shift == 0)
9093 shift = 31;
6ddbc6e4 9094 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9095 } else {
6ddbc6e4 9096 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9097 }
9098 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9099 tmp2 = tcg_const_i32(sh);
9100 if (insn & (1 << 22))
9ef39277 9101 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9102 else
9ef39277 9103 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9104 tcg_temp_free_i32(tmp2);
6ddbc6e4 9105 store_reg(s, rd, tmp);
9ee6e8bb
PB
9106 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9107 /* [us]sat16 */
6ddbc6e4 9108 tmp = load_reg(s, rm);
9ee6e8bb 9109 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9110 tmp2 = tcg_const_i32(sh);
9111 if (insn & (1 << 22))
9ef39277 9112 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9113 else
9ef39277 9114 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9115 tcg_temp_free_i32(tmp2);
6ddbc6e4 9116 store_reg(s, rd, tmp);
9ee6e8bb
PB
9117 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9118 /* Select bytes. */
6ddbc6e4
PB
9119 tmp = load_reg(s, rn);
9120 tmp2 = load_reg(s, rm);
7d1b0095 9121 tmp3 = tcg_temp_new_i32();
0ecb72a5 9122 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9123 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9124 tcg_temp_free_i32(tmp3);
9125 tcg_temp_free_i32(tmp2);
6ddbc6e4 9126 store_reg(s, rd, tmp);
9ee6e8bb 9127 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9128 tmp = load_reg(s, rm);
9ee6e8bb 9129 shift = (insn >> 10) & 3;
1301f322 9130 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9131 rotate, a shift is sufficient. */
9132 if (shift != 0)
f669df27 9133 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9134 op1 = (insn >> 20) & 7;
9135 switch (op1) {
5e3f878a
PB
9136 case 0: gen_sxtb16(tmp); break;
9137 case 2: gen_sxtb(tmp); break;
9138 case 3: gen_sxth(tmp); break;
9139 case 4: gen_uxtb16(tmp); break;
9140 case 6: gen_uxtb(tmp); break;
9141 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9142 default: goto illegal_op;
9143 }
9144 if (rn != 15) {
5e3f878a 9145 tmp2 = load_reg(s, rn);
9ee6e8bb 9146 if ((op1 & 3) == 0) {
5e3f878a 9147 gen_add16(tmp, tmp2);
9ee6e8bb 9148 } else {
5e3f878a 9149 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9150 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9151 }
9152 }
6c95676b 9153 store_reg(s, rd, tmp);
9ee6e8bb
PB
9154 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9155 /* rev */
b0109805 9156 tmp = load_reg(s, rm);
9ee6e8bb
PB
9157 if (insn & (1 << 22)) {
9158 if (insn & (1 << 7)) {
b0109805 9159 gen_revsh(tmp);
9ee6e8bb
PB
9160 } else {
9161 ARCH(6T2);
b0109805 9162 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9163 }
9164 } else {
9165 if (insn & (1 << 7))
b0109805 9166 gen_rev16(tmp);
9ee6e8bb 9167 else
66896cb8 9168 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9169 }
b0109805 9170 store_reg(s, rd, tmp);
9ee6e8bb
PB
9171 } else {
9172 goto illegal_op;
9173 }
9174 break;
9175 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9176 switch ((insn >> 20) & 0x7) {
9177 case 5:
9178 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9179 /* op2 not 00x or 11x : UNDEF */
9180 goto illegal_op;
9181 }
838fa72d
AJ
9182 /* Signed multiply most significant [accumulate].
9183 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9184 tmp = load_reg(s, rm);
9185 tmp2 = load_reg(s, rs);
a7812ae4 9186 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9187
955a7dd5 9188 if (rd != 15) {
838fa72d 9189 tmp = load_reg(s, rd);
9ee6e8bb 9190 if (insn & (1 << 6)) {
838fa72d 9191 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9192 } else {
838fa72d 9193 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9194 }
9195 }
838fa72d
AJ
9196 if (insn & (1 << 5)) {
9197 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9198 }
9199 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9200 tmp = tcg_temp_new_i32();
ecc7b3aa 9201 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9202 tcg_temp_free_i64(tmp64);
955a7dd5 9203 store_reg(s, rn, tmp);
41e9564d
PM
9204 break;
9205 case 0:
9206 case 4:
9207 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9208 if (insn & (1 << 7)) {
9209 goto illegal_op;
9210 }
9211 tmp = load_reg(s, rm);
9212 tmp2 = load_reg(s, rs);
9ee6e8bb 9213 if (insn & (1 << 5))
5e3f878a
PB
9214 gen_swap_half(tmp2);
9215 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9216 if (insn & (1 << 22)) {
5e3f878a 9217 /* smlald, smlsld */
33bbd75a
PC
9218 TCGv_i64 tmp64_2;
9219
a7812ae4 9220 tmp64 = tcg_temp_new_i64();
33bbd75a 9221 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9222 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9223 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9224 tcg_temp_free_i32(tmp);
33bbd75a
PC
9225 tcg_temp_free_i32(tmp2);
9226 if (insn & (1 << 6)) {
9227 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9228 } else {
9229 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9230 }
9231 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9232 gen_addq(s, tmp64, rd, rn);
9233 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9234 tcg_temp_free_i64(tmp64);
9ee6e8bb 9235 } else {
5e3f878a 9236 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9237 if (insn & (1 << 6)) {
9238 /* This subtraction cannot overflow. */
9239 tcg_gen_sub_i32(tmp, tmp, tmp2);
9240 } else {
9241 /* This addition cannot overflow 32 bits;
9242 * however it may overflow considered as a
9243 * signed operation, in which case we must set
9244 * the Q flag.
9245 */
9246 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9247 }
9248 tcg_temp_free_i32(tmp2);
22478e79 9249 if (rd != 15)
9ee6e8bb 9250 {
22478e79 9251 tmp2 = load_reg(s, rd);
9ef39277 9252 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9253 tcg_temp_free_i32(tmp2);
9ee6e8bb 9254 }
22478e79 9255 store_reg(s, rn, tmp);
9ee6e8bb 9256 }
41e9564d 9257 break;
b8b8ea05
PM
9258 case 1:
9259 case 3:
9260 /* SDIV, UDIV */
d614a513 9261 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9262 goto illegal_op;
9263 }
9264 if (((insn >> 5) & 7) || (rd != 15)) {
9265 goto illegal_op;
9266 }
9267 tmp = load_reg(s, rm);
9268 tmp2 = load_reg(s, rs);
9269 if (insn & (1 << 21)) {
9270 gen_helper_udiv(tmp, tmp, tmp2);
9271 } else {
9272 gen_helper_sdiv(tmp, tmp, tmp2);
9273 }
9274 tcg_temp_free_i32(tmp2);
9275 store_reg(s, rn, tmp);
9276 break;
41e9564d
PM
9277 default:
9278 goto illegal_op;
9ee6e8bb
PB
9279 }
9280 break;
9281 case 3:
9282 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9283 switch (op1) {
9284 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9285 ARCH(6);
9286 tmp = load_reg(s, rm);
9287 tmp2 = load_reg(s, rs);
9288 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9289 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9290 if (rd != 15) {
9291 tmp2 = load_reg(s, rd);
6ddbc6e4 9292 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9293 tcg_temp_free_i32(tmp2);
9ee6e8bb 9294 }
ded9d295 9295 store_reg(s, rn, tmp);
9ee6e8bb
PB
9296 break;
9297 case 0x20: case 0x24: case 0x28: case 0x2c:
9298 /* Bitfield insert/clear. */
9299 ARCH(6T2);
9300 shift = (insn >> 7) & 0x1f;
9301 i = (insn >> 16) & 0x1f;
45140a57
KB
9302 if (i < shift) {
9303 /* UNPREDICTABLE; we choose to UNDEF */
9304 goto illegal_op;
9305 }
9ee6e8bb
PB
9306 i = i + 1 - shift;
9307 if (rm == 15) {
7d1b0095 9308 tmp = tcg_temp_new_i32();
5e3f878a 9309 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9310 } else {
5e3f878a 9311 tmp = load_reg(s, rm);
9ee6e8bb
PB
9312 }
9313 if (i != 32) {
5e3f878a 9314 tmp2 = load_reg(s, rd);
d593c48e 9315 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9316 tcg_temp_free_i32(tmp2);
9ee6e8bb 9317 }
5e3f878a 9318 store_reg(s, rd, tmp);
9ee6e8bb
PB
9319 break;
9320 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9321 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9322 ARCH(6T2);
5e3f878a 9323 tmp = load_reg(s, rm);
9ee6e8bb
PB
9324 shift = (insn >> 7) & 0x1f;
9325 i = ((insn >> 16) & 0x1f) + 1;
9326 if (shift + i > 32)
9327 goto illegal_op;
9328 if (i < 32) {
9329 if (op1 & 0x20) {
59a71b4c 9330 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9331 } else {
59a71b4c 9332 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9333 }
9334 }
5e3f878a 9335 store_reg(s, rd, tmp);
9ee6e8bb
PB
9336 break;
9337 default:
9338 goto illegal_op;
9339 }
9340 break;
9341 }
9342 break;
9343 }
9344 do_ldst:
9345 /* Check for undefined extension instructions
9346 * per the ARM Bible IE:
9347 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9348 */
9349 sh = (0xf << 20) | (0xf << 4);
9350 if (op1 == 0x7 && ((insn & sh) == sh))
9351 {
9352 goto illegal_op;
9353 }
9354 /* load/store byte/word */
9355 rn = (insn >> 16) & 0xf;
9356 rd = (insn >> 12) & 0xf;
b0109805 9357 tmp2 = load_reg(s, rn);
a99caa48
PM
9358 if ((insn & 0x01200000) == 0x00200000) {
9359 /* ldrt/strt */
579d21cc 9360 i = get_a32_user_mem_index(s);
a99caa48
PM
9361 } else {
9362 i = get_mem_index(s);
9363 }
9ee6e8bb 9364 if (insn & (1 << 24))
b0109805 9365 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9366 if (insn & (1 << 20)) {
9367 /* load */
5a839c0d 9368 tmp = tcg_temp_new_i32();
9ee6e8bb 9369 if (insn & (1 << 22)) {
9bb6558a 9370 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9371 } else {
9bb6558a 9372 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9373 }
9ee6e8bb
PB
9374 } else {
9375 /* store */
b0109805 9376 tmp = load_reg(s, rd);
5a839c0d 9377 if (insn & (1 << 22)) {
9bb6558a 9378 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9379 } else {
9bb6558a 9380 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9381 }
9382 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9383 }
9384 if (!(insn & (1 << 24))) {
b0109805
PB
9385 gen_add_data_offset(s, insn, tmp2);
9386 store_reg(s, rn, tmp2);
9387 } else if (insn & (1 << 21)) {
9388 store_reg(s, rn, tmp2);
9389 } else {
7d1b0095 9390 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9391 }
9392 if (insn & (1 << 20)) {
9393 /* Complete the load. */
7dcc1f89 9394 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9395 }
9396 break;
9397 case 0x08:
9398 case 0x09:
9399 {
da3e53dd
PM
9400 int j, n, loaded_base;
9401 bool exc_return = false;
9402 bool is_load = extract32(insn, 20, 1);
9403 bool user = false;
39d5492a 9404 TCGv_i32 loaded_var;
9ee6e8bb
PB
9405 /* load/store multiple words */
9406 /* XXX: store correct base if write back */
9ee6e8bb 9407 if (insn & (1 << 22)) {
da3e53dd 9408 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9409 if (IS_USER(s))
9410 goto illegal_op; /* only usable in supervisor mode */
9411
da3e53dd
PM
9412 if (is_load && extract32(insn, 15, 1)) {
9413 exc_return = true;
9414 } else {
9415 user = true;
9416 }
9ee6e8bb
PB
9417 }
9418 rn = (insn >> 16) & 0xf;
b0109805 9419 addr = load_reg(s, rn);
9ee6e8bb
PB
9420
9421 /* compute total size */
9422 loaded_base = 0;
39d5492a 9423 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9424 n = 0;
9425 for(i=0;i<16;i++) {
9426 if (insn & (1 << i))
9427 n++;
9428 }
9429 /* XXX: test invalid n == 0 case ? */
9430 if (insn & (1 << 23)) {
9431 if (insn & (1 << 24)) {
9432 /* pre increment */
b0109805 9433 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9434 } else {
9435 /* post increment */
9436 }
9437 } else {
9438 if (insn & (1 << 24)) {
9439 /* pre decrement */
b0109805 9440 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9441 } else {
9442 /* post decrement */
9443 if (n != 1)
b0109805 9444 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9445 }
9446 }
9447 j = 0;
9448 for(i=0;i<16;i++) {
9449 if (insn & (1 << i)) {
da3e53dd 9450 if (is_load) {
9ee6e8bb 9451 /* load */
5a839c0d 9452 tmp = tcg_temp_new_i32();
12dcc321 9453 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9454 if (user) {
b75263d6 9455 tmp2 = tcg_const_i32(i);
1ce94f81 9456 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9457 tcg_temp_free_i32(tmp2);
7d1b0095 9458 tcg_temp_free_i32(tmp);
9ee6e8bb 9459 } else if (i == rn) {
b0109805 9460 loaded_var = tmp;
9ee6e8bb 9461 loaded_base = 1;
fb0e8e79
PM
9462 } else if (rn == 15 && exc_return) {
9463 store_pc_exc_ret(s, tmp);
9ee6e8bb 9464 } else {
7dcc1f89 9465 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9466 }
9467 } else {
9468 /* store */
9469 if (i == 15) {
9470 /* special case: r15 = PC + 8 */
9471 val = (long)s->pc + 4;
7d1b0095 9472 tmp = tcg_temp_new_i32();
b0109805 9473 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9474 } else if (user) {
7d1b0095 9475 tmp = tcg_temp_new_i32();
b75263d6 9476 tmp2 = tcg_const_i32(i);
9ef39277 9477 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9478 tcg_temp_free_i32(tmp2);
9ee6e8bb 9479 } else {
b0109805 9480 tmp = load_reg(s, i);
9ee6e8bb 9481 }
12dcc321 9482 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9483 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9484 }
9485 j++;
9486 /* no need to add after the last transfer */
9487 if (j != n)
b0109805 9488 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9489 }
9490 }
9491 if (insn & (1 << 21)) {
9492 /* write back */
9493 if (insn & (1 << 23)) {
9494 if (insn & (1 << 24)) {
9495 /* pre increment */
9496 } else {
9497 /* post increment */
b0109805 9498 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9499 }
9500 } else {
9501 if (insn & (1 << 24)) {
9502 /* pre decrement */
9503 if (n != 1)
b0109805 9504 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9505 } else {
9506 /* post decrement */
b0109805 9507 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9508 }
9509 }
b0109805
PB
9510 store_reg(s, rn, addr);
9511 } else {
7d1b0095 9512 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9513 }
9514 if (loaded_base) {
b0109805 9515 store_reg(s, rn, loaded_var);
9ee6e8bb 9516 }
da3e53dd 9517 if (exc_return) {
9ee6e8bb 9518 /* Restore CPSR from SPSR. */
d9ba4830 9519 tmp = load_cpu_field(spsr);
235ea1f5 9520 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9521 tcg_temp_free_i32(tmp);
577bf808 9522 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9523 }
9524 }
9525 break;
9526 case 0xa:
9527 case 0xb:
9528 {
9529 int32_t offset;
9530
9531 /* branch (and link) */
9532 val = (int32_t)s->pc;
9533 if (insn & (1 << 24)) {
7d1b0095 9534 tmp = tcg_temp_new_i32();
5e3f878a
PB
9535 tcg_gen_movi_i32(tmp, val);
9536 store_reg(s, 14, tmp);
9ee6e8bb 9537 }
534df156
PM
9538 offset = sextract32(insn << 2, 0, 26);
9539 val += offset + 4;
9ee6e8bb
PB
9540 gen_jmp(s, val);
9541 }
9542 break;
9543 case 0xc:
9544 case 0xd:
9545 case 0xe:
6a57f3eb
WN
9546 if (((insn >> 8) & 0xe) == 10) {
9547 /* VFP. */
7dcc1f89 9548 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9549 goto illegal_op;
9550 }
7dcc1f89 9551 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9552 /* Coprocessor. */
9ee6e8bb 9553 goto illegal_op;
6a57f3eb 9554 }
9ee6e8bb
PB
9555 break;
9556 case 0xf:
9557 /* swi */
eaed129d 9558 gen_set_pc_im(s, s->pc);
d4a2dc67 9559 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9560 s->is_jmp = DISAS_SWI;
9561 break;
9562 default:
9563 illegal_op:
73710361
GB
9564 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9565 default_exception_el(s));
9ee6e8bb
PB
9566 break;
9567 }
9568 }
9569}
9570
9571/* Return true if this is a Thumb-2 logical op. */
9572static int
9573thumb2_logic_op(int op)
9574{
9575 return (op < 8);
9576}
9577
9578/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9579 then set condition code flags based on the result of the operation.
9580 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9581 to the high bit of T1.
9582 Returns zero if the opcode is valid. */
9583
9584static int
39d5492a
PM
9585gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9586 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9587{
9588 int logic_cc;
9589
9590 logic_cc = 0;
9591 switch (op) {
9592 case 0: /* and */
396e467c 9593 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9594 logic_cc = conds;
9595 break;
9596 case 1: /* bic */
f669df27 9597 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9598 logic_cc = conds;
9599 break;
9600 case 2: /* orr */
396e467c 9601 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9602 logic_cc = conds;
9603 break;
9604 case 3: /* orn */
29501f1b 9605 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9606 logic_cc = conds;
9607 break;
9608 case 4: /* eor */
396e467c 9609 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9610 logic_cc = conds;
9611 break;
9612 case 8: /* add */
9613 if (conds)
72485ec4 9614 gen_add_CC(t0, t0, t1);
9ee6e8bb 9615 else
396e467c 9616 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9617 break;
9618 case 10: /* adc */
9619 if (conds)
49b4c31e 9620 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9621 else
396e467c 9622 gen_adc(t0, t1);
9ee6e8bb
PB
9623 break;
9624 case 11: /* sbc */
2de68a49
RH
9625 if (conds) {
9626 gen_sbc_CC(t0, t0, t1);
9627 } else {
396e467c 9628 gen_sub_carry(t0, t0, t1);
2de68a49 9629 }
9ee6e8bb
PB
9630 break;
9631 case 13: /* sub */
9632 if (conds)
72485ec4 9633 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9634 else
396e467c 9635 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9636 break;
9637 case 14: /* rsb */
9638 if (conds)
72485ec4 9639 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9640 else
396e467c 9641 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9642 break;
9643 default: /* 5, 6, 7, 9, 12, 15. */
9644 return 1;
9645 }
9646 if (logic_cc) {
396e467c 9647 gen_logic_CC(t0);
9ee6e8bb 9648 if (shifter_out)
396e467c 9649 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9650 }
9651 return 0;
9652}
9653
9654/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9655 is not legal. */
0ecb72a5 9656static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9657{
b0109805 9658 uint32_t insn, imm, shift, offset;
9ee6e8bb 9659 uint32_t rd, rn, rm, rs;
39d5492a
PM
9660 TCGv_i32 tmp;
9661 TCGv_i32 tmp2;
9662 TCGv_i32 tmp3;
9663 TCGv_i32 addr;
a7812ae4 9664 TCGv_i64 tmp64;
9ee6e8bb
PB
9665 int op;
9666 int shiftop;
9667 int conds;
9668 int logic_cc;
9669
d614a513
PM
9670 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9671 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9672 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9673 16-bit instructions to get correct prefetch abort behavior. */
9674 insn = insn_hw1;
9675 if ((insn & (1 << 12)) == 0) {
be5e7a76 9676 ARCH(5);
9ee6e8bb
PB
9677 /* Second half of blx. */
9678 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9679 tmp = load_reg(s, 14);
9680 tcg_gen_addi_i32(tmp, tmp, offset);
9681 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9682
7d1b0095 9683 tmp2 = tcg_temp_new_i32();
b0109805 9684 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9685 store_reg(s, 14, tmp2);
9686 gen_bx(s, tmp);
9ee6e8bb
PB
9687 return 0;
9688 }
9689 if (insn & (1 << 11)) {
9690 /* Second half of bl. */
9691 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9692 tmp = load_reg(s, 14);
6a0d8a1d 9693 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9694
7d1b0095 9695 tmp2 = tcg_temp_new_i32();
b0109805 9696 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9697 store_reg(s, 14, tmp2);
9698 gen_bx(s, tmp);
9ee6e8bb
PB
9699 return 0;
9700 }
9701 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9702 /* Instruction spans a page boundary. Implement it as two
9703 16-bit instructions in case the second half causes an
9704 prefetch abort. */
9705 offset = ((int32_t)insn << 21) >> 9;
396e467c 9706 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9707 return 0;
9708 }
9709 /* Fall through to 32-bit decode. */
9710 }
9711
f9fd40eb 9712 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9713 s->pc += 2;
9714 insn |= (uint32_t)insn_hw1 << 16;
9715
9716 if ((insn & 0xf800e800) != 0xf000e800) {
9717 ARCH(6T2);
9718 }
9719
9720 rn = (insn >> 16) & 0xf;
9721 rs = (insn >> 12) & 0xf;
9722 rd = (insn >> 8) & 0xf;
9723 rm = insn & 0xf;
9724 switch ((insn >> 25) & 0xf) {
9725 case 0: case 1: case 2: case 3:
9726 /* 16-bit instructions. Should never happen. */
9727 abort();
9728 case 4:
9729 if (insn & (1 << 22)) {
9730 /* Other load/store, table branch. */
9731 if (insn & 0x01200000) {
9732 /* Load/store doubleword. */
9733 if (rn == 15) {
7d1b0095 9734 addr = tcg_temp_new_i32();
b0109805 9735 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9736 } else {
b0109805 9737 addr = load_reg(s, rn);
9ee6e8bb
PB
9738 }
9739 offset = (insn & 0xff) * 4;
9740 if ((insn & (1 << 23)) == 0)
9741 offset = -offset;
9742 if (insn & (1 << 24)) {
b0109805 9743 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9744 offset = 0;
9745 }
9746 if (insn & (1 << 20)) {
9747 /* ldrd */
e2592fad 9748 tmp = tcg_temp_new_i32();
12dcc321 9749 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9750 store_reg(s, rs, tmp);
9751 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9752 tmp = tcg_temp_new_i32();
12dcc321 9753 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9754 store_reg(s, rd, tmp);
9ee6e8bb
PB
9755 } else {
9756 /* strd */
b0109805 9757 tmp = load_reg(s, rs);
12dcc321 9758 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9759 tcg_temp_free_i32(tmp);
b0109805
PB
9760 tcg_gen_addi_i32(addr, addr, 4);
9761 tmp = load_reg(s, rd);
12dcc321 9762 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9763 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9764 }
9765 if (insn & (1 << 21)) {
9766 /* Base writeback. */
9767 if (rn == 15)
9768 goto illegal_op;
b0109805
PB
9769 tcg_gen_addi_i32(addr, addr, offset - 4);
9770 store_reg(s, rn, addr);
9771 } else {
7d1b0095 9772 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9773 }
9774 } else if ((insn & (1 << 23)) == 0) {
9775 /* Load/store exclusive word. */
39d5492a 9776 addr = tcg_temp_local_new_i32();
98a46317 9777 load_reg_var(s, addr, rn);
426f5abc 9778 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9779 if (insn & (1 << 20)) {
426f5abc 9780 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9781 } else {
426f5abc 9782 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9783 }
39d5492a 9784 tcg_temp_free_i32(addr);
2359bf80 9785 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9786 /* Table Branch. */
9787 if (rn == 15) {
7d1b0095 9788 addr = tcg_temp_new_i32();
b0109805 9789 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9790 } else {
b0109805 9791 addr = load_reg(s, rn);
9ee6e8bb 9792 }
b26eefb6 9793 tmp = load_reg(s, rm);
b0109805 9794 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9795 if (insn & (1 << 4)) {
9796 /* tbh */
b0109805 9797 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9798 tcg_temp_free_i32(tmp);
e2592fad 9799 tmp = tcg_temp_new_i32();
12dcc321 9800 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9801 } else { /* tbb */
7d1b0095 9802 tcg_temp_free_i32(tmp);
e2592fad 9803 tmp = tcg_temp_new_i32();
12dcc321 9804 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9805 }
7d1b0095 9806 tcg_temp_free_i32(addr);
b0109805
PB
9807 tcg_gen_shli_i32(tmp, tmp, 1);
9808 tcg_gen_addi_i32(tmp, tmp, s->pc);
9809 store_reg(s, 15, tmp);
9ee6e8bb 9810 } else {
2359bf80 9811 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9812 op = (insn >> 4) & 0x3;
2359bf80
MR
9813 switch (op2) {
9814 case 0:
426f5abc 9815 goto illegal_op;
2359bf80
MR
9816 case 1:
9817 /* Load/store exclusive byte/halfword/doubleword */
9818 if (op == 2) {
9819 goto illegal_op;
9820 }
9821 ARCH(7);
9822 break;
9823 case 2:
9824 /* Load-acquire/store-release */
9825 if (op == 3) {
9826 goto illegal_op;
9827 }
9828 /* Fall through */
9829 case 3:
9830 /* Load-acquire/store-release exclusive */
9831 ARCH(8);
9832 break;
426f5abc 9833 }
39d5492a 9834 addr = tcg_temp_local_new_i32();
98a46317 9835 load_reg_var(s, addr, rn);
2359bf80
MR
9836 if (!(op2 & 1)) {
9837 if (insn & (1 << 20)) {
9838 tmp = tcg_temp_new_i32();
9839 switch (op) {
9840 case 0: /* ldab */
9bb6558a
PM
9841 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9842 rs | ISSIsAcqRel);
2359bf80
MR
9843 break;
9844 case 1: /* ldah */
9bb6558a
PM
9845 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9846 rs | ISSIsAcqRel);
2359bf80
MR
9847 break;
9848 case 2: /* lda */
9bb6558a
PM
9849 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9850 rs | ISSIsAcqRel);
2359bf80
MR
9851 break;
9852 default:
9853 abort();
9854 }
9855 store_reg(s, rs, tmp);
9856 } else {
9857 tmp = load_reg(s, rs);
9858 switch (op) {
9859 case 0: /* stlb */
9bb6558a
PM
9860 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9861 rs | ISSIsAcqRel);
2359bf80
MR
9862 break;
9863 case 1: /* stlh */
9bb6558a
PM
9864 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9865 rs | ISSIsAcqRel);
2359bf80
MR
9866 break;
9867 case 2: /* stl */
9bb6558a
PM
9868 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9869 rs | ISSIsAcqRel);
2359bf80
MR
9870 break;
9871 default:
9872 abort();
9873 }
9874 tcg_temp_free_i32(tmp);
9875 }
9876 } else if (insn & (1 << 20)) {
426f5abc 9877 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9878 } else {
426f5abc 9879 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9880 }
39d5492a 9881 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9882 }
9883 } else {
9884 /* Load/store multiple, RFE, SRS. */
9885 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9886 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9887 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9888 goto illegal_op;
00115976 9889 }
9ee6e8bb
PB
9890 if (insn & (1 << 20)) {
9891 /* rfe */
b0109805
PB
9892 addr = load_reg(s, rn);
9893 if ((insn & (1 << 24)) == 0)
9894 tcg_gen_addi_i32(addr, addr, -8);
9895 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9896 tmp = tcg_temp_new_i32();
12dcc321 9897 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9898 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9899 tmp2 = tcg_temp_new_i32();
12dcc321 9900 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9901 if (insn & (1 << 21)) {
9902 /* Base writeback. */
b0109805
PB
9903 if (insn & (1 << 24)) {
9904 tcg_gen_addi_i32(addr, addr, 4);
9905 } else {
9906 tcg_gen_addi_i32(addr, addr, -4);
9907 }
9908 store_reg(s, rn, addr);
9909 } else {
7d1b0095 9910 tcg_temp_free_i32(addr);
9ee6e8bb 9911 }
b0109805 9912 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9913 } else {
9914 /* srs */
81465888
PM
9915 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9916 insn & (1 << 21));
9ee6e8bb
PB
9917 }
9918 } else {
5856d44e 9919 int i, loaded_base = 0;
39d5492a 9920 TCGv_i32 loaded_var;
9ee6e8bb 9921 /* Load/store multiple. */
b0109805 9922 addr = load_reg(s, rn);
9ee6e8bb
PB
9923 offset = 0;
9924 for (i = 0; i < 16; i++) {
9925 if (insn & (1 << i))
9926 offset += 4;
9927 }
9928 if (insn & (1 << 24)) {
b0109805 9929 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9930 }
9931
39d5492a 9932 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9933 for (i = 0; i < 16; i++) {
9934 if ((insn & (1 << i)) == 0)
9935 continue;
9936 if (insn & (1 << 20)) {
9937 /* Load. */
e2592fad 9938 tmp = tcg_temp_new_i32();
12dcc321 9939 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9940 if (i == 15) {
3bb8a96f 9941 gen_bx_excret(s, tmp);
5856d44e
YO
9942 } else if (i == rn) {
9943 loaded_var = tmp;
9944 loaded_base = 1;
9ee6e8bb 9945 } else {
b0109805 9946 store_reg(s, i, tmp);
9ee6e8bb
PB
9947 }
9948 } else {
9949 /* Store. */
b0109805 9950 tmp = load_reg(s, i);
12dcc321 9951 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9952 tcg_temp_free_i32(tmp);
9ee6e8bb 9953 }
b0109805 9954 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9955 }
5856d44e
YO
9956 if (loaded_base) {
9957 store_reg(s, rn, loaded_var);
9958 }
9ee6e8bb
PB
9959 if (insn & (1 << 21)) {
9960 /* Base register writeback. */
9961 if (insn & (1 << 24)) {
b0109805 9962 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9963 }
9964 /* Fault if writeback register is in register list. */
9965 if (insn & (1 << rn))
9966 goto illegal_op;
b0109805
PB
9967 store_reg(s, rn, addr);
9968 } else {
7d1b0095 9969 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9970 }
9971 }
9972 }
9973 break;
2af9ab77
JB
9974 case 5:
9975
9ee6e8bb 9976 op = (insn >> 21) & 0xf;
2af9ab77 9977 if (op == 6) {
62b44f05
AR
9978 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9979 goto illegal_op;
9980 }
2af9ab77
JB
9981 /* Halfword pack. */
9982 tmp = load_reg(s, rn);
9983 tmp2 = load_reg(s, rm);
9984 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9985 if (insn & (1 << 5)) {
9986 /* pkhtb */
9987 if (shift == 0)
9988 shift = 31;
9989 tcg_gen_sari_i32(tmp2, tmp2, shift);
9990 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9991 tcg_gen_ext16u_i32(tmp2, tmp2);
9992 } else {
9993 /* pkhbt */
9994 if (shift)
9995 tcg_gen_shli_i32(tmp2, tmp2, shift);
9996 tcg_gen_ext16u_i32(tmp, tmp);
9997 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9998 }
9999 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 10000 tcg_temp_free_i32(tmp2);
3174f8e9
FN
10001 store_reg(s, rd, tmp);
10002 } else {
2af9ab77
JB
10003 /* Data processing register constant shift. */
10004 if (rn == 15) {
7d1b0095 10005 tmp = tcg_temp_new_i32();
2af9ab77
JB
10006 tcg_gen_movi_i32(tmp, 0);
10007 } else {
10008 tmp = load_reg(s, rn);
10009 }
10010 tmp2 = load_reg(s, rm);
10011
10012 shiftop = (insn >> 4) & 3;
10013 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
10014 conds = (insn & (1 << 20)) != 0;
10015 logic_cc = (conds && thumb2_logic_op(op));
10016 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
10017 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
10018 goto illegal_op;
7d1b0095 10019 tcg_temp_free_i32(tmp2);
2af9ab77
JB
10020 if (rd != 15) {
10021 store_reg(s, rd, tmp);
10022 } else {
7d1b0095 10023 tcg_temp_free_i32(tmp);
2af9ab77 10024 }
3174f8e9 10025 }
9ee6e8bb
PB
10026 break;
10027 case 13: /* Misc data processing. */
10028 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
10029 if (op < 4 && (insn & 0xf000) != 0xf000)
10030 goto illegal_op;
10031 switch (op) {
10032 case 0: /* Register controlled shift. */
8984bd2e
PB
10033 tmp = load_reg(s, rn);
10034 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10035 if ((insn & 0x70) != 0)
10036 goto illegal_op;
10037 op = (insn >> 21) & 3;
8984bd2e
PB
10038 logic_cc = (insn & (1 << 20)) != 0;
10039 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
10040 if (logic_cc)
10041 gen_logic_CC(tmp);
bedb8a6b 10042 store_reg(s, rd, tmp);
9ee6e8bb
PB
10043 break;
10044 case 1: /* Sign/zero extend. */
62b44f05
AR
10045 op = (insn >> 20) & 7;
10046 switch (op) {
10047 case 0: /* SXTAH, SXTH */
10048 case 1: /* UXTAH, UXTH */
10049 case 4: /* SXTAB, SXTB */
10050 case 5: /* UXTAB, UXTB */
10051 break;
10052 case 2: /* SXTAB16, SXTB16 */
10053 case 3: /* UXTAB16, UXTB16 */
10054 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10055 goto illegal_op;
10056 }
10057 break;
10058 default:
10059 goto illegal_op;
10060 }
10061 if (rn != 15) {
10062 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10063 goto illegal_op;
10064 }
10065 }
5e3f878a 10066 tmp = load_reg(s, rm);
9ee6e8bb 10067 shift = (insn >> 4) & 3;
1301f322 10068 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
10069 rotate, a shift is sufficient. */
10070 if (shift != 0)
f669df27 10071 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
10072 op = (insn >> 20) & 7;
10073 switch (op) {
5e3f878a
PB
10074 case 0: gen_sxth(tmp); break;
10075 case 1: gen_uxth(tmp); break;
10076 case 2: gen_sxtb16(tmp); break;
10077 case 3: gen_uxtb16(tmp); break;
10078 case 4: gen_sxtb(tmp); break;
10079 case 5: gen_uxtb(tmp); break;
62b44f05
AR
10080 default:
10081 g_assert_not_reached();
9ee6e8bb
PB
10082 }
10083 if (rn != 15) {
5e3f878a 10084 tmp2 = load_reg(s, rn);
9ee6e8bb 10085 if ((op >> 1) == 1) {
5e3f878a 10086 gen_add16(tmp, tmp2);
9ee6e8bb 10087 } else {
5e3f878a 10088 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10089 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10090 }
10091 }
5e3f878a 10092 store_reg(s, rd, tmp);
9ee6e8bb
PB
10093 break;
10094 case 2: /* SIMD add/subtract. */
62b44f05
AR
10095 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10096 goto illegal_op;
10097 }
9ee6e8bb
PB
10098 op = (insn >> 20) & 7;
10099 shift = (insn >> 4) & 7;
10100 if ((op & 3) == 3 || (shift & 3) == 3)
10101 goto illegal_op;
6ddbc6e4
PB
10102 tmp = load_reg(s, rn);
10103 tmp2 = load_reg(s, rm);
10104 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10105 tcg_temp_free_i32(tmp2);
6ddbc6e4 10106 store_reg(s, rd, tmp);
9ee6e8bb
PB
10107 break;
10108 case 3: /* Other data processing. */
10109 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10110 if (op < 4) {
10111 /* Saturating add/subtract. */
62b44f05
AR
10112 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10113 goto illegal_op;
10114 }
d9ba4830
PB
10115 tmp = load_reg(s, rn);
10116 tmp2 = load_reg(s, rm);
9ee6e8bb 10117 if (op & 1)
9ef39277 10118 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10119 if (op & 2)
9ef39277 10120 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10121 else
9ef39277 10122 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10123 tcg_temp_free_i32(tmp2);
9ee6e8bb 10124 } else {
62b44f05
AR
10125 switch (op) {
10126 case 0x0a: /* rbit */
10127 case 0x08: /* rev */
10128 case 0x09: /* rev16 */
10129 case 0x0b: /* revsh */
10130 case 0x18: /* clz */
10131 break;
10132 case 0x10: /* sel */
10133 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10134 goto illegal_op;
10135 }
10136 break;
10137 case 0x20: /* crc32/crc32c */
10138 case 0x21:
10139 case 0x22:
10140 case 0x28:
10141 case 0x29:
10142 case 0x2a:
10143 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10144 goto illegal_op;
10145 }
10146 break;
10147 default:
10148 goto illegal_op;
10149 }
d9ba4830 10150 tmp = load_reg(s, rn);
9ee6e8bb
PB
10151 switch (op) {
10152 case 0x0a: /* rbit */
d9ba4830 10153 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10154 break;
10155 case 0x08: /* rev */
66896cb8 10156 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10157 break;
10158 case 0x09: /* rev16 */
d9ba4830 10159 gen_rev16(tmp);
9ee6e8bb
PB
10160 break;
10161 case 0x0b: /* revsh */
d9ba4830 10162 gen_revsh(tmp);
9ee6e8bb
PB
10163 break;
10164 case 0x10: /* sel */
d9ba4830 10165 tmp2 = load_reg(s, rm);
7d1b0095 10166 tmp3 = tcg_temp_new_i32();
0ecb72a5 10167 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10168 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10169 tcg_temp_free_i32(tmp3);
10170 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10171 break;
10172 case 0x18: /* clz */
7539a012 10173 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10174 break;
eb0ecd5a
WN
10175 case 0x20:
10176 case 0x21:
10177 case 0x22:
10178 case 0x28:
10179 case 0x29:
10180 case 0x2a:
10181 {
10182 /* crc32/crc32c */
10183 uint32_t sz = op & 0x3;
10184 uint32_t c = op & 0x8;
10185
eb0ecd5a 10186 tmp2 = load_reg(s, rm);
aa633469
PM
10187 if (sz == 0) {
10188 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10189 } else if (sz == 1) {
10190 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10191 }
eb0ecd5a
WN
10192 tmp3 = tcg_const_i32(1 << sz);
10193 if (c) {
10194 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10195 } else {
10196 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10197 }
10198 tcg_temp_free_i32(tmp2);
10199 tcg_temp_free_i32(tmp3);
10200 break;
10201 }
9ee6e8bb 10202 default:
62b44f05 10203 g_assert_not_reached();
9ee6e8bb
PB
10204 }
10205 }
d9ba4830 10206 store_reg(s, rd, tmp);
9ee6e8bb
PB
10207 break;
10208 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10209 switch ((insn >> 20) & 7) {
10210 case 0: /* 32 x 32 -> 32 */
10211 case 7: /* Unsigned sum of absolute differences. */
10212 break;
10213 case 1: /* 16 x 16 -> 32 */
10214 case 2: /* Dual multiply add. */
10215 case 3: /* 32 * 16 -> 32msb */
10216 case 4: /* Dual multiply subtract. */
10217 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10218 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10219 goto illegal_op;
10220 }
10221 break;
10222 }
9ee6e8bb 10223 op = (insn >> 4) & 0xf;
d9ba4830
PB
10224 tmp = load_reg(s, rn);
10225 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10226 switch ((insn >> 20) & 7) {
10227 case 0: /* 32 x 32 -> 32 */
d9ba4830 10228 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10229 tcg_temp_free_i32(tmp2);
9ee6e8bb 10230 if (rs != 15) {
d9ba4830 10231 tmp2 = load_reg(s, rs);
9ee6e8bb 10232 if (op)
d9ba4830 10233 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10234 else
d9ba4830 10235 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10236 tcg_temp_free_i32(tmp2);
9ee6e8bb 10237 }
9ee6e8bb
PB
10238 break;
10239 case 1: /* 16 x 16 -> 32 */
d9ba4830 10240 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10241 tcg_temp_free_i32(tmp2);
9ee6e8bb 10242 if (rs != 15) {
d9ba4830 10243 tmp2 = load_reg(s, rs);
9ef39277 10244 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10245 tcg_temp_free_i32(tmp2);
9ee6e8bb 10246 }
9ee6e8bb
PB
10247 break;
10248 case 2: /* Dual multiply add. */
10249 case 4: /* Dual multiply subtract. */
10250 if (op)
d9ba4830
PB
10251 gen_swap_half(tmp2);
10252 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10253 if (insn & (1 << 22)) {
e1d177b9 10254 /* This subtraction cannot overflow. */
d9ba4830 10255 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10256 } else {
e1d177b9
PM
10257 /* This addition cannot overflow 32 bits;
10258 * however it may overflow considered as a signed
10259 * operation, in which case we must set the Q flag.
10260 */
9ef39277 10261 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10262 }
7d1b0095 10263 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10264 if (rs != 15)
10265 {
d9ba4830 10266 tmp2 = load_reg(s, rs);
9ef39277 10267 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10268 tcg_temp_free_i32(tmp2);
9ee6e8bb 10269 }
9ee6e8bb
PB
10270 break;
10271 case 3: /* 32 * 16 -> 32msb */
10272 if (op)
d9ba4830 10273 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10274 else
d9ba4830 10275 gen_sxth(tmp2);
a7812ae4
PB
10276 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10277 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10278 tmp = tcg_temp_new_i32();
ecc7b3aa 10279 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10280 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10281 if (rs != 15)
10282 {
d9ba4830 10283 tmp2 = load_reg(s, rs);
9ef39277 10284 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10285 tcg_temp_free_i32(tmp2);
9ee6e8bb 10286 }
9ee6e8bb 10287 break;
838fa72d
AJ
10288 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10289 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10290 if (rs != 15) {
838fa72d
AJ
10291 tmp = load_reg(s, rs);
10292 if (insn & (1 << 20)) {
10293 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10294 } else {
838fa72d 10295 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10296 }
2c0262af 10297 }
838fa72d
AJ
10298 if (insn & (1 << 4)) {
10299 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10300 }
10301 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10302 tmp = tcg_temp_new_i32();
ecc7b3aa 10303 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10304 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10305 break;
10306 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10307 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10308 tcg_temp_free_i32(tmp2);
9ee6e8bb 10309 if (rs != 15) {
d9ba4830
PB
10310 tmp2 = load_reg(s, rs);
10311 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10312 tcg_temp_free_i32(tmp2);
5fd46862 10313 }
9ee6e8bb 10314 break;
2c0262af 10315 }
d9ba4830 10316 store_reg(s, rd, tmp);
2c0262af 10317 break;
9ee6e8bb
PB
10318 case 6: case 7: /* 64-bit multiply, Divide. */
10319 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10320 tmp = load_reg(s, rn);
10321 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10322 if ((op & 0x50) == 0x10) {
10323 /* sdiv, udiv */
d614a513 10324 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10325 goto illegal_op;
47789990 10326 }
9ee6e8bb 10327 if (op & 0x20)
5e3f878a 10328 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10329 else
5e3f878a 10330 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10331 tcg_temp_free_i32(tmp2);
5e3f878a 10332 store_reg(s, rd, tmp);
9ee6e8bb
PB
10333 } else if ((op & 0xe) == 0xc) {
10334 /* Dual multiply accumulate long. */
62b44f05
AR
10335 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10336 tcg_temp_free_i32(tmp);
10337 tcg_temp_free_i32(tmp2);
10338 goto illegal_op;
10339 }
9ee6e8bb 10340 if (op & 1)
5e3f878a
PB
10341 gen_swap_half(tmp2);
10342 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10343 if (op & 0x10) {
5e3f878a 10344 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10345 } else {
5e3f878a 10346 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10347 }
7d1b0095 10348 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10349 /* BUGFIX */
10350 tmp64 = tcg_temp_new_i64();
10351 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10352 tcg_temp_free_i32(tmp);
a7812ae4
PB
10353 gen_addq(s, tmp64, rs, rd);
10354 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10355 tcg_temp_free_i64(tmp64);
2c0262af 10356 } else {
9ee6e8bb
PB
10357 if (op & 0x20) {
10358 /* Unsigned 64-bit multiply */
a7812ae4 10359 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10360 } else {
9ee6e8bb
PB
10361 if (op & 8) {
10362 /* smlalxy */
62b44f05
AR
10363 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10364 tcg_temp_free_i32(tmp2);
10365 tcg_temp_free_i32(tmp);
10366 goto illegal_op;
10367 }
5e3f878a 10368 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10369 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10370 tmp64 = tcg_temp_new_i64();
10371 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10372 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10373 } else {
10374 /* Signed 64-bit multiply */
a7812ae4 10375 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10376 }
b5ff1b31 10377 }
9ee6e8bb
PB
10378 if (op & 4) {
10379 /* umaal */
62b44f05
AR
10380 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10381 tcg_temp_free_i64(tmp64);
10382 goto illegal_op;
10383 }
a7812ae4
PB
10384 gen_addq_lo(s, tmp64, rs);
10385 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10386 } else if (op & 0x40) {
10387 /* 64-bit accumulate. */
a7812ae4 10388 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10389 }
a7812ae4 10390 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10391 tcg_temp_free_i64(tmp64);
5fd46862 10392 }
2c0262af 10393 break;
9ee6e8bb
PB
10394 }
10395 break;
10396 case 6: case 7: case 14: case 15:
10397 /* Coprocessor. */
7517748e
PM
10398 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10399 /* We don't currently implement M profile FP support,
10400 * so this entire space should give a NOCP fault.
10401 */
10402 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10403 default_exception_el(s));
10404 break;
10405 }
9ee6e8bb
PB
10406 if (((insn >> 24) & 3) == 3) {
10407 /* Translate into the equivalent ARM encoding. */
f06053e3 10408 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10409 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10410 goto illegal_op;
7dcc1f89 10411 }
6a57f3eb 10412 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10413 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10414 goto illegal_op;
10415 }
9ee6e8bb
PB
10416 } else {
10417 if (insn & (1 << 28))
10418 goto illegal_op;
7dcc1f89 10419 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10420 goto illegal_op;
7dcc1f89 10421 }
9ee6e8bb
PB
10422 }
10423 break;
10424 case 8: case 9: case 10: case 11:
10425 if (insn & (1 << 15)) {
10426 /* Branches, misc control. */
10427 if (insn & 0x5000) {
10428 /* Unconditional branch. */
10429 /* signextend(hw1[10:0]) -> offset[:12]. */
10430 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10431 /* hw1[10:0] -> offset[11:1]. */
10432 offset |= (insn & 0x7ff) << 1;
10433 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10434 offset[24:22] already have the same value because of the
10435 sign extension above. */
10436 offset ^= ((~insn) & (1 << 13)) << 10;
10437 offset ^= ((~insn) & (1 << 11)) << 11;
10438
9ee6e8bb
PB
10439 if (insn & (1 << 14)) {
10440 /* Branch and link. */
3174f8e9 10441 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10442 }
3b46e624 10443
b0109805 10444 offset += s->pc;
9ee6e8bb
PB
10445 if (insn & (1 << 12)) {
10446 /* b/bl */
b0109805 10447 gen_jmp(s, offset);
9ee6e8bb
PB
10448 } else {
10449 /* blx */
b0109805 10450 offset &= ~(uint32_t)2;
be5e7a76 10451 /* thumb2 bx, no need to check */
b0109805 10452 gen_bx_im(s, offset);
2c0262af 10453 }
9ee6e8bb
PB
10454 } else if (((insn >> 23) & 7) == 7) {
10455 /* Misc control */
10456 if (insn & (1 << 13))
10457 goto illegal_op;
10458
10459 if (insn & (1 << 26)) {
001b3cab
PM
10460 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10461 goto illegal_op;
10462 }
37e6456e
PM
10463 if (!(insn & (1 << 20))) {
10464 /* Hypervisor call (v7) */
10465 int imm16 = extract32(insn, 16, 4) << 12
10466 | extract32(insn, 0, 12);
10467 ARCH(7);
10468 if (IS_USER(s)) {
10469 goto illegal_op;
10470 }
10471 gen_hvc(s, imm16);
10472 } else {
10473 /* Secure monitor call (v6+) */
10474 ARCH(6K);
10475 if (IS_USER(s)) {
10476 goto illegal_op;
10477 }
10478 gen_smc(s);
10479 }
2c0262af 10480 } else {
9ee6e8bb
PB
10481 op = (insn >> 20) & 7;
10482 switch (op) {
10483 case 0: /* msr cpsr. */
b53d8923 10484 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e 10485 tmp = load_reg(s, rn);
b28b3377
PM
10486 /* the constant is the mask and SYSm fields */
10487 addr = tcg_const_i32(insn & 0xfff);
8984bd2e 10488 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10489 tcg_temp_free_i32(addr);
7d1b0095 10490 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10491 gen_lookup_tb(s);
10492 break;
10493 }
10494 /* fall through */
10495 case 1: /* msr spsr. */
b53d8923 10496 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10497 goto illegal_op;
b53d8923 10498 }
8bfd0550
PM
10499
10500 if (extract32(insn, 5, 1)) {
10501 /* MSR (banked) */
10502 int sysm = extract32(insn, 8, 4) |
10503 (extract32(insn, 4, 1) << 4);
10504 int r = op & 1;
10505
10506 gen_msr_banked(s, r, sysm, rm);
10507 break;
10508 }
10509
10510 /* MSR (for PSRs) */
2fbac54b
FN
10511 tmp = load_reg(s, rn);
10512 if (gen_set_psr(s,
7dcc1f89 10513 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10514 op == 1, tmp))
9ee6e8bb
PB
10515 goto illegal_op;
10516 break;
10517 case 2: /* cps, nop-hint. */
10518 if (((insn >> 8) & 7) == 0) {
10519 gen_nop_hint(s, insn & 0xff);
10520 }
10521 /* Implemented as NOP in user mode. */
10522 if (IS_USER(s))
10523 break;
10524 offset = 0;
10525 imm = 0;
10526 if (insn & (1 << 10)) {
10527 if (insn & (1 << 7))
10528 offset |= CPSR_A;
10529 if (insn & (1 << 6))
10530 offset |= CPSR_I;
10531 if (insn & (1 << 5))
10532 offset |= CPSR_F;
10533 if (insn & (1 << 9))
10534 imm = CPSR_A | CPSR_I | CPSR_F;
10535 }
10536 if (insn & (1 << 8)) {
10537 offset |= 0x1f;
10538 imm |= (insn & 0x1f);
10539 }
10540 if (offset) {
2fbac54b 10541 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10542 }
10543 break;
10544 case 3: /* Special control operations. */
426f5abc 10545 ARCH(7);
9ee6e8bb
PB
10546 op = (insn >> 4) & 0xf;
10547 switch (op) {
10548 case 2: /* clrex */
426f5abc 10549 gen_clrex(s);
9ee6e8bb
PB
10550 break;
10551 case 4: /* dsb */
10552 case 5: /* dmb */
61e4c432 10553 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10554 break;
6df99dec
SS
10555 case 6: /* isb */
10556 /* We need to break the TB after this insn
10557 * to execute self-modifying code correctly
10558 * and also to take any pending interrupts
10559 * immediately.
10560 */
10561 gen_lookup_tb(s);
10562 break;
9ee6e8bb
PB
10563 default:
10564 goto illegal_op;
10565 }
10566 break;
10567 case 4: /* bxj */
9d7c59c8
PM
10568 /* Trivial implementation equivalent to bx.
10569 * This instruction doesn't exist at all for M-profile.
10570 */
10571 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10572 goto illegal_op;
10573 }
d9ba4830
PB
10574 tmp = load_reg(s, rn);
10575 gen_bx(s, tmp);
9ee6e8bb
PB
10576 break;
10577 case 5: /* Exception return. */
b8b45b68
RV
10578 if (IS_USER(s)) {
10579 goto illegal_op;
10580 }
10581 if (rn != 14 || rd != 15) {
10582 goto illegal_op;
10583 }
10584 tmp = load_reg(s, rn);
10585 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10586 gen_exception_return(s, tmp);
10587 break;
8bfd0550 10588 case 6: /* MRS */
43ac6574
PM
10589 if (extract32(insn, 5, 1) &&
10590 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10591 /* MRS (banked) */
10592 int sysm = extract32(insn, 16, 4) |
10593 (extract32(insn, 4, 1) << 4);
10594
10595 gen_mrs_banked(s, 0, sysm, rd);
10596 break;
10597 }
10598
3d54026f
PM
10599 if (extract32(insn, 16, 4) != 0xf) {
10600 goto illegal_op;
10601 }
10602 if (!arm_dc_feature(s, ARM_FEATURE_M) &&
10603 extract32(insn, 0, 8) != 0) {
10604 goto illegal_op;
10605 }
10606
8bfd0550 10607 /* mrs cpsr */
7d1b0095 10608 tmp = tcg_temp_new_i32();
b53d8923 10609 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10610 addr = tcg_const_i32(insn & 0xff);
10611 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10612 tcg_temp_free_i32(addr);
9ee6e8bb 10613 } else {
9ef39277 10614 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10615 }
8984bd2e 10616 store_reg(s, rd, tmp);
9ee6e8bb 10617 break;
8bfd0550 10618 case 7: /* MRS */
43ac6574
PM
10619 if (extract32(insn, 5, 1) &&
10620 !arm_dc_feature(s, ARM_FEATURE_M)) {
8bfd0550
PM
10621 /* MRS (banked) */
10622 int sysm = extract32(insn, 16, 4) |
10623 (extract32(insn, 4, 1) << 4);
10624
10625 gen_mrs_banked(s, 1, sysm, rd);
10626 break;
10627 }
10628
10629 /* mrs spsr. */
9ee6e8bb 10630 /* Not accessible in user mode. */
b53d8923 10631 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10632 goto illegal_op;
b53d8923 10633 }
3d54026f
PM
10634
10635 if (extract32(insn, 16, 4) != 0xf ||
10636 extract32(insn, 0, 8) != 0) {
10637 goto illegal_op;
10638 }
10639
d9ba4830
PB
10640 tmp = load_cpu_field(spsr);
10641 store_reg(s, rd, tmp);
9ee6e8bb 10642 break;
2c0262af
FB
10643 }
10644 }
9ee6e8bb
PB
10645 } else {
10646 /* Conditional branch. */
10647 op = (insn >> 22) & 0xf;
10648 /* Generate a conditional jump to next instruction. */
10649 s->condlabel = gen_new_label();
39fb730a 10650 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10651 s->condjmp = 1;
10652
10653 /* offset[11:1] = insn[10:0] */
10654 offset = (insn & 0x7ff) << 1;
10655 /* offset[17:12] = insn[21:16]. */
10656 offset |= (insn & 0x003f0000) >> 4;
10657 /* offset[31:20] = insn[26]. */
10658 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10659 /* offset[18] = insn[13]. */
10660 offset |= (insn & (1 << 13)) << 5;
10661 /* offset[19] = insn[11]. */
10662 offset |= (insn & (1 << 11)) << 8;
10663
10664 /* jump to the offset */
b0109805 10665 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10666 }
10667 } else {
10668 /* Data processing immediate. */
10669 if (insn & (1 << 25)) {
10670 if (insn & (1 << 24)) {
10671 if (insn & (1 << 20))
10672 goto illegal_op;
10673 /* Bitfield/Saturate. */
10674 op = (insn >> 21) & 7;
10675 imm = insn & 0x1f;
10676 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10677 if (rn == 15) {
7d1b0095 10678 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10679 tcg_gen_movi_i32(tmp, 0);
10680 } else {
10681 tmp = load_reg(s, rn);
10682 }
9ee6e8bb
PB
10683 switch (op) {
10684 case 2: /* Signed bitfield extract. */
10685 imm++;
10686 if (shift + imm > 32)
10687 goto illegal_op;
59a71b4c
RH
10688 if (imm < 32) {
10689 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10690 }
9ee6e8bb
PB
10691 break;
10692 case 6: /* Unsigned bitfield extract. */
10693 imm++;
10694 if (shift + imm > 32)
10695 goto illegal_op;
59a71b4c
RH
10696 if (imm < 32) {
10697 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10698 }
9ee6e8bb
PB
10699 break;
10700 case 3: /* Bitfield insert/clear. */
10701 if (imm < shift)
10702 goto illegal_op;
10703 imm = imm + 1 - shift;
10704 if (imm != 32) {
6ddbc6e4 10705 tmp2 = load_reg(s, rd);
d593c48e 10706 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10707 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10708 }
10709 break;
10710 case 7:
10711 goto illegal_op;
10712 default: /* Saturate. */
9ee6e8bb
PB
10713 if (shift) {
10714 if (op & 1)
6ddbc6e4 10715 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10716 else
6ddbc6e4 10717 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10718 }
6ddbc6e4 10719 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10720 if (op & 4) {
10721 /* Unsigned. */
62b44f05
AR
10722 if ((op & 1) && shift == 0) {
10723 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10724 tcg_temp_free_i32(tmp);
10725 tcg_temp_free_i32(tmp2);
10726 goto illegal_op;
10727 }
9ef39277 10728 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10729 } else {
9ef39277 10730 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10731 }
2c0262af 10732 } else {
9ee6e8bb 10733 /* Signed. */
62b44f05
AR
10734 if ((op & 1) && shift == 0) {
10735 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10736 tcg_temp_free_i32(tmp);
10737 tcg_temp_free_i32(tmp2);
10738 goto illegal_op;
10739 }
9ef39277 10740 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10741 } else {
9ef39277 10742 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10743 }
2c0262af 10744 }
b75263d6 10745 tcg_temp_free_i32(tmp2);
9ee6e8bb 10746 break;
2c0262af 10747 }
6ddbc6e4 10748 store_reg(s, rd, tmp);
9ee6e8bb
PB
10749 } else {
10750 imm = ((insn & 0x04000000) >> 15)
10751 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10752 if (insn & (1 << 22)) {
10753 /* 16-bit immediate. */
10754 imm |= (insn >> 4) & 0xf000;
10755 if (insn & (1 << 23)) {
10756 /* movt */
5e3f878a 10757 tmp = load_reg(s, rd);
86831435 10758 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10759 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10760 } else {
9ee6e8bb 10761 /* movw */
7d1b0095 10762 tmp = tcg_temp_new_i32();
5e3f878a 10763 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10764 }
10765 } else {
9ee6e8bb
PB
10766 /* Add/sub 12-bit immediate. */
10767 if (rn == 15) {
b0109805 10768 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10769 if (insn & (1 << 23))
b0109805 10770 offset -= imm;
9ee6e8bb 10771 else
b0109805 10772 offset += imm;
7d1b0095 10773 tmp = tcg_temp_new_i32();
5e3f878a 10774 tcg_gen_movi_i32(tmp, offset);
2c0262af 10775 } else {
5e3f878a 10776 tmp = load_reg(s, rn);
9ee6e8bb 10777 if (insn & (1 << 23))
5e3f878a 10778 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10779 else
5e3f878a 10780 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10781 }
9ee6e8bb 10782 }
5e3f878a 10783 store_reg(s, rd, tmp);
191abaa2 10784 }
9ee6e8bb
PB
10785 } else {
10786 int shifter_out = 0;
10787 /* modified 12-bit immediate. */
10788 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10789 imm = (insn & 0xff);
10790 switch (shift) {
10791 case 0: /* XY */
10792 /* Nothing to do. */
10793 break;
10794 case 1: /* 00XY00XY */
10795 imm |= imm << 16;
10796 break;
10797 case 2: /* XY00XY00 */
10798 imm |= imm << 16;
10799 imm <<= 8;
10800 break;
10801 case 3: /* XYXYXYXY */
10802 imm |= imm << 16;
10803 imm |= imm << 8;
10804 break;
10805 default: /* Rotated constant. */
10806 shift = (shift << 1) | (imm >> 7);
10807 imm |= 0x80;
10808 imm = imm << (32 - shift);
10809 shifter_out = 1;
10810 break;
b5ff1b31 10811 }
7d1b0095 10812 tmp2 = tcg_temp_new_i32();
3174f8e9 10813 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10814 rn = (insn >> 16) & 0xf;
3174f8e9 10815 if (rn == 15) {
7d1b0095 10816 tmp = tcg_temp_new_i32();
3174f8e9
FN
10817 tcg_gen_movi_i32(tmp, 0);
10818 } else {
10819 tmp = load_reg(s, rn);
10820 }
9ee6e8bb
PB
10821 op = (insn >> 21) & 0xf;
10822 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10823 shifter_out, tmp, tmp2))
9ee6e8bb 10824 goto illegal_op;
7d1b0095 10825 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10826 rd = (insn >> 8) & 0xf;
10827 if (rd != 15) {
3174f8e9
FN
10828 store_reg(s, rd, tmp);
10829 } else {
7d1b0095 10830 tcg_temp_free_i32(tmp);
2c0262af 10831 }
2c0262af 10832 }
9ee6e8bb
PB
10833 }
10834 break;
10835 case 12: /* Load/store single data item. */
10836 {
10837 int postinc = 0;
10838 int writeback = 0;
a99caa48 10839 int memidx;
9bb6558a
PM
10840 ISSInfo issinfo;
10841
9ee6e8bb 10842 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10843 if (disas_neon_ls_insn(s, insn)) {
c1713132 10844 goto illegal_op;
7dcc1f89 10845 }
9ee6e8bb
PB
10846 break;
10847 }
a2fdc890
PM
10848 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10849 if (rs == 15) {
10850 if (!(insn & (1 << 20))) {
10851 goto illegal_op;
10852 }
10853 if (op != 2) {
10854 /* Byte or halfword load space with dest == r15 : memory hints.
10855 * Catch them early so we don't emit pointless addressing code.
10856 * This space is a mix of:
10857 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10858 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10859 * cores)
10860 * unallocated hints, which must be treated as NOPs
10861 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10862 * which is easiest for the decoding logic
10863 * Some space which must UNDEF
10864 */
10865 int op1 = (insn >> 23) & 3;
10866 int op2 = (insn >> 6) & 0x3f;
10867 if (op & 2) {
10868 goto illegal_op;
10869 }
10870 if (rn == 15) {
02afbf64
PM
10871 /* UNPREDICTABLE, unallocated hint or
10872 * PLD/PLDW/PLI (literal)
10873 */
a2fdc890
PM
10874 return 0;
10875 }
10876 if (op1 & 1) {
02afbf64 10877 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10878 }
10879 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10880 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10881 }
10882 /* UNDEF space, or an UNPREDICTABLE */
10883 return 1;
10884 }
10885 }
a99caa48 10886 memidx = get_mem_index(s);
9ee6e8bb 10887 if (rn == 15) {
7d1b0095 10888 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10889 /* PC relative. */
10890 /* s->pc has already been incremented by 4. */
10891 imm = s->pc & 0xfffffffc;
10892 if (insn & (1 << 23))
10893 imm += insn & 0xfff;
10894 else
10895 imm -= insn & 0xfff;
b0109805 10896 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10897 } else {
b0109805 10898 addr = load_reg(s, rn);
9ee6e8bb
PB
10899 if (insn & (1 << 23)) {
10900 /* Positive offset. */
10901 imm = insn & 0xfff;
b0109805 10902 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10903 } else {
9ee6e8bb 10904 imm = insn & 0xff;
2a0308c5
PM
10905 switch ((insn >> 8) & 0xf) {
10906 case 0x0: /* Shifted Register. */
9ee6e8bb 10907 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10908 if (shift > 3) {
10909 tcg_temp_free_i32(addr);
18c9b560 10910 goto illegal_op;
2a0308c5 10911 }
b26eefb6 10912 tmp = load_reg(s, rm);
9ee6e8bb 10913 if (shift)
b26eefb6 10914 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10915 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10916 tcg_temp_free_i32(tmp);
9ee6e8bb 10917 break;
2a0308c5 10918 case 0xc: /* Negative offset. */
b0109805 10919 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10920 break;
2a0308c5 10921 case 0xe: /* User privilege. */
b0109805 10922 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10923 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10924 break;
2a0308c5 10925 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10926 imm = -imm;
10927 /* Fall through. */
2a0308c5 10928 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10929 postinc = 1;
10930 writeback = 1;
10931 break;
2a0308c5 10932 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10933 imm = -imm;
10934 /* Fall through. */
2a0308c5 10935 case 0xf: /* Pre-increment. */
b0109805 10936 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10937 writeback = 1;
10938 break;
10939 default:
2a0308c5 10940 tcg_temp_free_i32(addr);
b7bcbe95 10941 goto illegal_op;
9ee6e8bb
PB
10942 }
10943 }
10944 }
9bb6558a
PM
10945
10946 issinfo = writeback ? ISSInvalid : rs;
10947
9ee6e8bb
PB
10948 if (insn & (1 << 20)) {
10949 /* Load. */
5a839c0d 10950 tmp = tcg_temp_new_i32();
a2fdc890 10951 switch (op) {
5a839c0d 10952 case 0:
9bb6558a 10953 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10954 break;
10955 case 4:
9bb6558a 10956 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10957 break;
10958 case 1:
9bb6558a 10959 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10960 break;
10961 case 5:
9bb6558a 10962 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10963 break;
10964 case 2:
9bb6558a 10965 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10966 break;
2a0308c5 10967 default:
5a839c0d 10968 tcg_temp_free_i32(tmp);
2a0308c5
PM
10969 tcg_temp_free_i32(addr);
10970 goto illegal_op;
a2fdc890
PM
10971 }
10972 if (rs == 15) {
3bb8a96f 10973 gen_bx_excret(s, tmp);
9ee6e8bb 10974 } else {
a2fdc890 10975 store_reg(s, rs, tmp);
9ee6e8bb
PB
10976 }
10977 } else {
10978 /* Store. */
b0109805 10979 tmp = load_reg(s, rs);
9ee6e8bb 10980 switch (op) {
5a839c0d 10981 case 0:
9bb6558a 10982 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10983 break;
10984 case 1:
9bb6558a 10985 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10986 break;
10987 case 2:
9bb6558a 10988 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10989 break;
2a0308c5 10990 default:
5a839c0d 10991 tcg_temp_free_i32(tmp);
2a0308c5
PM
10992 tcg_temp_free_i32(addr);
10993 goto illegal_op;
b7bcbe95 10994 }
5a839c0d 10995 tcg_temp_free_i32(tmp);
2c0262af 10996 }
9ee6e8bb 10997 if (postinc)
b0109805
PB
10998 tcg_gen_addi_i32(addr, addr, imm);
10999 if (writeback) {
11000 store_reg(s, rn, addr);
11001 } else {
7d1b0095 11002 tcg_temp_free_i32(addr);
b0109805 11003 }
9ee6e8bb
PB
11004 }
11005 break;
11006 default:
11007 goto illegal_op;
2c0262af 11008 }
9ee6e8bb
PB
11009 return 0;
11010illegal_op:
11011 return 1;
2c0262af
FB
11012}
11013
0ecb72a5 11014static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
11015{
11016 uint32_t val, insn, op, rm, rn, rd, shift, cond;
11017 int32_t offset;
11018 int i;
39d5492a
PM
11019 TCGv_i32 tmp;
11020 TCGv_i32 tmp2;
11021 TCGv_i32 addr;
99c475ab 11022
9ee6e8bb
PB
11023 if (s->condexec_mask) {
11024 cond = s->condexec_cond;
bedd2912
JB
11025 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
11026 s->condlabel = gen_new_label();
39fb730a 11027 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
11028 s->condjmp = 1;
11029 }
9ee6e8bb
PB
11030 }
11031
f9fd40eb 11032 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 11033 s->pc += 2;
b5ff1b31 11034
99c475ab
FB
11035 switch (insn >> 12) {
11036 case 0: case 1:
396e467c 11037
99c475ab
FB
11038 rd = insn & 7;
11039 op = (insn >> 11) & 3;
11040 if (op == 3) {
11041 /* add/subtract */
11042 rn = (insn >> 3) & 7;
396e467c 11043 tmp = load_reg(s, rn);
99c475ab
FB
11044 if (insn & (1 << 10)) {
11045 /* immediate */
7d1b0095 11046 tmp2 = tcg_temp_new_i32();
396e467c 11047 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
11048 } else {
11049 /* reg */
11050 rm = (insn >> 6) & 7;
396e467c 11051 tmp2 = load_reg(s, rm);
99c475ab 11052 }
9ee6e8bb
PB
11053 if (insn & (1 << 9)) {
11054 if (s->condexec_mask)
396e467c 11055 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 11056 else
72485ec4 11057 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
11058 } else {
11059 if (s->condexec_mask)
396e467c 11060 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 11061 else
72485ec4 11062 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 11063 }
7d1b0095 11064 tcg_temp_free_i32(tmp2);
396e467c 11065 store_reg(s, rd, tmp);
99c475ab
FB
11066 } else {
11067 /* shift immediate */
11068 rm = (insn >> 3) & 7;
11069 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
11070 tmp = load_reg(s, rm);
11071 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
11072 if (!s->condexec_mask)
11073 gen_logic_CC(tmp);
11074 store_reg(s, rd, tmp);
99c475ab
FB
11075 }
11076 break;
11077 case 2: case 3:
11078 /* arithmetic large immediate */
11079 op = (insn >> 11) & 3;
11080 rd = (insn >> 8) & 0x7;
396e467c 11081 if (op == 0) { /* mov */
7d1b0095 11082 tmp = tcg_temp_new_i32();
396e467c 11083 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 11084 if (!s->condexec_mask)
396e467c
FN
11085 gen_logic_CC(tmp);
11086 store_reg(s, rd, tmp);
11087 } else {
11088 tmp = load_reg(s, rd);
7d1b0095 11089 tmp2 = tcg_temp_new_i32();
396e467c
FN
11090 tcg_gen_movi_i32(tmp2, insn & 0xff);
11091 switch (op) {
11092 case 1: /* cmp */
72485ec4 11093 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11094 tcg_temp_free_i32(tmp);
11095 tcg_temp_free_i32(tmp2);
396e467c
FN
11096 break;
11097 case 2: /* add */
11098 if (s->condexec_mask)
11099 tcg_gen_add_i32(tmp, tmp, tmp2);
11100 else
72485ec4 11101 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 11102 tcg_temp_free_i32(tmp2);
396e467c
FN
11103 store_reg(s, rd, tmp);
11104 break;
11105 case 3: /* sub */
11106 if (s->condexec_mask)
11107 tcg_gen_sub_i32(tmp, tmp, tmp2);
11108 else
72485ec4 11109 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11110 tcg_temp_free_i32(tmp2);
396e467c
FN
11111 store_reg(s, rd, tmp);
11112 break;
11113 }
99c475ab 11114 }
99c475ab
FB
11115 break;
11116 case 4:
11117 if (insn & (1 << 11)) {
11118 rd = (insn >> 8) & 7;
5899f386
FB
11119 /* load pc-relative. Bit 1 of PC is ignored. */
11120 val = s->pc + 2 + ((insn & 0xff) * 4);
11121 val &= ~(uint32_t)2;
7d1b0095 11122 addr = tcg_temp_new_i32();
b0109805 11123 tcg_gen_movi_i32(addr, val);
c40c8556 11124 tmp = tcg_temp_new_i32();
9bb6558a
PM
11125 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11126 rd | ISSIs16Bit);
7d1b0095 11127 tcg_temp_free_i32(addr);
b0109805 11128 store_reg(s, rd, tmp);
99c475ab
FB
11129 break;
11130 }
11131 if (insn & (1 << 10)) {
11132 /* data processing extended or blx */
11133 rd = (insn & 7) | ((insn >> 4) & 8);
11134 rm = (insn >> 3) & 0xf;
11135 op = (insn >> 8) & 3;
11136 switch (op) {
11137 case 0: /* add */
396e467c
FN
11138 tmp = load_reg(s, rd);
11139 tmp2 = load_reg(s, rm);
11140 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11141 tcg_temp_free_i32(tmp2);
396e467c 11142 store_reg(s, rd, tmp);
99c475ab
FB
11143 break;
11144 case 1: /* cmp */
396e467c
FN
11145 tmp = load_reg(s, rd);
11146 tmp2 = load_reg(s, rm);
72485ec4 11147 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11148 tcg_temp_free_i32(tmp2);
11149 tcg_temp_free_i32(tmp);
99c475ab
FB
11150 break;
11151 case 2: /* mov/cpy */
396e467c
FN
11152 tmp = load_reg(s, rm);
11153 store_reg(s, rd, tmp);
99c475ab
FB
11154 break;
11155 case 3:/* branch [and link] exchange thumb register */
b0109805 11156 tmp = load_reg(s, rm);
99c475ab 11157 if (insn & (1 << 7)) {
be5e7a76 11158 ARCH(5);
99c475ab 11159 val = (uint32_t)s->pc | 1;
7d1b0095 11160 tmp2 = tcg_temp_new_i32();
b0109805
PB
11161 tcg_gen_movi_i32(tmp2, val);
11162 store_reg(s, 14, tmp2);
3bb8a96f
PM
11163 gen_bx(s, tmp);
11164 } else {
11165 /* Only BX works as exception-return, not BLX */
11166 gen_bx_excret(s, tmp);
99c475ab 11167 }
99c475ab
FB
11168 break;
11169 }
11170 break;
11171 }
11172
11173 /* data processing register */
11174 rd = insn & 7;
11175 rm = (insn >> 3) & 7;
11176 op = (insn >> 6) & 0xf;
11177 if (op == 2 || op == 3 || op == 4 || op == 7) {
11178 /* the shift/rotate ops want the operands backwards */
11179 val = rm;
11180 rm = rd;
11181 rd = val;
11182 val = 1;
11183 } else {
11184 val = 0;
11185 }
11186
396e467c 11187 if (op == 9) { /* neg */
7d1b0095 11188 tmp = tcg_temp_new_i32();
396e467c
FN
11189 tcg_gen_movi_i32(tmp, 0);
11190 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11191 tmp = load_reg(s, rd);
11192 } else {
39d5492a 11193 TCGV_UNUSED_I32(tmp);
396e467c 11194 }
99c475ab 11195
396e467c 11196 tmp2 = load_reg(s, rm);
5899f386 11197 switch (op) {
99c475ab 11198 case 0x0: /* and */
396e467c 11199 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11200 if (!s->condexec_mask)
396e467c 11201 gen_logic_CC(tmp);
99c475ab
FB
11202 break;
11203 case 0x1: /* eor */
396e467c 11204 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11205 if (!s->condexec_mask)
396e467c 11206 gen_logic_CC(tmp);
99c475ab
FB
11207 break;
11208 case 0x2: /* lsl */
9ee6e8bb 11209 if (s->condexec_mask) {
365af80e 11210 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11211 } else {
9ef39277 11212 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11213 gen_logic_CC(tmp2);
9ee6e8bb 11214 }
99c475ab
FB
11215 break;
11216 case 0x3: /* lsr */
9ee6e8bb 11217 if (s->condexec_mask) {
365af80e 11218 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11219 } else {
9ef39277 11220 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11221 gen_logic_CC(tmp2);
9ee6e8bb 11222 }
99c475ab
FB
11223 break;
11224 case 0x4: /* asr */
9ee6e8bb 11225 if (s->condexec_mask) {
365af80e 11226 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11227 } else {
9ef39277 11228 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11229 gen_logic_CC(tmp2);
9ee6e8bb 11230 }
99c475ab
FB
11231 break;
11232 case 0x5: /* adc */
49b4c31e 11233 if (s->condexec_mask) {
396e467c 11234 gen_adc(tmp, tmp2);
49b4c31e
RH
11235 } else {
11236 gen_adc_CC(tmp, tmp, tmp2);
11237 }
99c475ab
FB
11238 break;
11239 case 0x6: /* sbc */
2de68a49 11240 if (s->condexec_mask) {
396e467c 11241 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11242 } else {
11243 gen_sbc_CC(tmp, tmp, tmp2);
11244 }
99c475ab
FB
11245 break;
11246 case 0x7: /* ror */
9ee6e8bb 11247 if (s->condexec_mask) {
f669df27
AJ
11248 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11249 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11250 } else {
9ef39277 11251 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11252 gen_logic_CC(tmp2);
9ee6e8bb 11253 }
99c475ab
FB
11254 break;
11255 case 0x8: /* tst */
396e467c
FN
11256 tcg_gen_and_i32(tmp, tmp, tmp2);
11257 gen_logic_CC(tmp);
99c475ab 11258 rd = 16;
5899f386 11259 break;
99c475ab 11260 case 0x9: /* neg */
9ee6e8bb 11261 if (s->condexec_mask)
396e467c 11262 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11263 else
72485ec4 11264 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11265 break;
11266 case 0xa: /* cmp */
72485ec4 11267 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11268 rd = 16;
11269 break;
11270 case 0xb: /* cmn */
72485ec4 11271 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11272 rd = 16;
11273 break;
11274 case 0xc: /* orr */
396e467c 11275 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11276 if (!s->condexec_mask)
396e467c 11277 gen_logic_CC(tmp);
99c475ab
FB
11278 break;
11279 case 0xd: /* mul */
7b2919a0 11280 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11281 if (!s->condexec_mask)
396e467c 11282 gen_logic_CC(tmp);
99c475ab
FB
11283 break;
11284 case 0xe: /* bic */
f669df27 11285 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11286 if (!s->condexec_mask)
396e467c 11287 gen_logic_CC(tmp);
99c475ab
FB
11288 break;
11289 case 0xf: /* mvn */
396e467c 11290 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11291 if (!s->condexec_mask)
396e467c 11292 gen_logic_CC(tmp2);
99c475ab 11293 val = 1;
5899f386 11294 rm = rd;
99c475ab
FB
11295 break;
11296 }
11297 if (rd != 16) {
396e467c
FN
11298 if (val) {
11299 store_reg(s, rm, tmp2);
11300 if (op != 0xf)
7d1b0095 11301 tcg_temp_free_i32(tmp);
396e467c
FN
11302 } else {
11303 store_reg(s, rd, tmp);
7d1b0095 11304 tcg_temp_free_i32(tmp2);
396e467c
FN
11305 }
11306 } else {
7d1b0095
PM
11307 tcg_temp_free_i32(tmp);
11308 tcg_temp_free_i32(tmp2);
99c475ab
FB
11309 }
11310 break;
11311
11312 case 5:
11313 /* load/store register offset. */
11314 rd = insn & 7;
11315 rn = (insn >> 3) & 7;
11316 rm = (insn >> 6) & 7;
11317 op = (insn >> 9) & 7;
b0109805 11318 addr = load_reg(s, rn);
b26eefb6 11319 tmp = load_reg(s, rm);
b0109805 11320 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11321 tcg_temp_free_i32(tmp);
99c475ab 11322
c40c8556 11323 if (op < 3) { /* store */
b0109805 11324 tmp = load_reg(s, rd);
c40c8556
PM
11325 } else {
11326 tmp = tcg_temp_new_i32();
11327 }
99c475ab
FB
11328
11329 switch (op) {
11330 case 0: /* str */
9bb6558a 11331 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11332 break;
11333 case 1: /* strh */
9bb6558a 11334 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11335 break;
11336 case 2: /* strb */
9bb6558a 11337 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11338 break;
11339 case 3: /* ldrsb */
9bb6558a 11340 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11341 break;
11342 case 4: /* ldr */
9bb6558a 11343 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11344 break;
11345 case 5: /* ldrh */
9bb6558a 11346 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11347 break;
11348 case 6: /* ldrb */
9bb6558a 11349 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11350 break;
11351 case 7: /* ldrsh */
9bb6558a 11352 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11353 break;
11354 }
c40c8556 11355 if (op >= 3) { /* load */
b0109805 11356 store_reg(s, rd, tmp);
c40c8556
PM
11357 } else {
11358 tcg_temp_free_i32(tmp);
11359 }
7d1b0095 11360 tcg_temp_free_i32(addr);
99c475ab
FB
11361 break;
11362
11363 case 6:
11364 /* load/store word immediate offset */
11365 rd = insn & 7;
11366 rn = (insn >> 3) & 7;
b0109805 11367 addr = load_reg(s, rn);
99c475ab 11368 val = (insn >> 4) & 0x7c;
b0109805 11369 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11370
11371 if (insn & (1 << 11)) {
11372 /* load */
c40c8556 11373 tmp = tcg_temp_new_i32();
12dcc321 11374 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11375 store_reg(s, rd, tmp);
99c475ab
FB
11376 } else {
11377 /* store */
b0109805 11378 tmp = load_reg(s, rd);
12dcc321 11379 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11380 tcg_temp_free_i32(tmp);
99c475ab 11381 }
7d1b0095 11382 tcg_temp_free_i32(addr);
99c475ab
FB
11383 break;
11384
11385 case 7:
11386 /* load/store byte immediate offset */
11387 rd = insn & 7;
11388 rn = (insn >> 3) & 7;
b0109805 11389 addr = load_reg(s, rn);
99c475ab 11390 val = (insn >> 6) & 0x1f;
b0109805 11391 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11392
11393 if (insn & (1 << 11)) {
11394 /* load */
c40c8556 11395 tmp = tcg_temp_new_i32();
9bb6558a 11396 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11397 store_reg(s, rd, tmp);
99c475ab
FB
11398 } else {
11399 /* store */
b0109805 11400 tmp = load_reg(s, rd);
9bb6558a 11401 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11402 tcg_temp_free_i32(tmp);
99c475ab 11403 }
7d1b0095 11404 tcg_temp_free_i32(addr);
99c475ab
FB
11405 break;
11406
11407 case 8:
11408 /* load/store halfword immediate offset */
11409 rd = insn & 7;
11410 rn = (insn >> 3) & 7;
b0109805 11411 addr = load_reg(s, rn);
99c475ab 11412 val = (insn >> 5) & 0x3e;
b0109805 11413 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11414
11415 if (insn & (1 << 11)) {
11416 /* load */
c40c8556 11417 tmp = tcg_temp_new_i32();
9bb6558a 11418 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11419 store_reg(s, rd, tmp);
99c475ab
FB
11420 } else {
11421 /* store */
b0109805 11422 tmp = load_reg(s, rd);
9bb6558a 11423 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11424 tcg_temp_free_i32(tmp);
99c475ab 11425 }
7d1b0095 11426 tcg_temp_free_i32(addr);
99c475ab
FB
11427 break;
11428
11429 case 9:
11430 /* load/store from stack */
11431 rd = (insn >> 8) & 7;
b0109805 11432 addr = load_reg(s, 13);
99c475ab 11433 val = (insn & 0xff) * 4;
b0109805 11434 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11435
11436 if (insn & (1 << 11)) {
11437 /* load */
c40c8556 11438 tmp = tcg_temp_new_i32();
9bb6558a 11439 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11440 store_reg(s, rd, tmp);
99c475ab
FB
11441 } else {
11442 /* store */
b0109805 11443 tmp = load_reg(s, rd);
9bb6558a 11444 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11445 tcg_temp_free_i32(tmp);
99c475ab 11446 }
7d1b0095 11447 tcg_temp_free_i32(addr);
99c475ab
FB
11448 break;
11449
11450 case 10:
11451 /* add to high reg */
11452 rd = (insn >> 8) & 7;
5899f386
FB
11453 if (insn & (1 << 11)) {
11454 /* SP */
5e3f878a 11455 tmp = load_reg(s, 13);
5899f386
FB
11456 } else {
11457 /* PC. bit 1 is ignored. */
7d1b0095 11458 tmp = tcg_temp_new_i32();
5e3f878a 11459 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11460 }
99c475ab 11461 val = (insn & 0xff) * 4;
5e3f878a
PB
11462 tcg_gen_addi_i32(tmp, tmp, val);
11463 store_reg(s, rd, tmp);
99c475ab
FB
11464 break;
11465
11466 case 11:
11467 /* misc */
11468 op = (insn >> 8) & 0xf;
11469 switch (op) {
11470 case 0:
11471 /* adjust stack pointer */
b26eefb6 11472 tmp = load_reg(s, 13);
99c475ab
FB
11473 val = (insn & 0x7f) * 4;
11474 if (insn & (1 << 7))
6a0d8a1d 11475 val = -(int32_t)val;
b26eefb6
PB
11476 tcg_gen_addi_i32(tmp, tmp, val);
11477 store_reg(s, 13, tmp);
99c475ab
FB
11478 break;
11479
9ee6e8bb
PB
11480 case 2: /* sign/zero extend. */
11481 ARCH(6);
11482 rd = insn & 7;
11483 rm = (insn >> 3) & 7;
b0109805 11484 tmp = load_reg(s, rm);
9ee6e8bb 11485 switch ((insn >> 6) & 3) {
b0109805
PB
11486 case 0: gen_sxth(tmp); break;
11487 case 1: gen_sxtb(tmp); break;
11488 case 2: gen_uxth(tmp); break;
11489 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11490 }
b0109805 11491 store_reg(s, rd, tmp);
9ee6e8bb 11492 break;
99c475ab
FB
11493 case 4: case 5: case 0xc: case 0xd:
11494 /* push/pop */
b0109805 11495 addr = load_reg(s, 13);
5899f386
FB
11496 if (insn & (1 << 8))
11497 offset = 4;
99c475ab 11498 else
5899f386
FB
11499 offset = 0;
11500 for (i = 0; i < 8; i++) {
11501 if (insn & (1 << i))
11502 offset += 4;
11503 }
11504 if ((insn & (1 << 11)) == 0) {
b0109805 11505 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11506 }
99c475ab
FB
11507 for (i = 0; i < 8; i++) {
11508 if (insn & (1 << i)) {
11509 if (insn & (1 << 11)) {
11510 /* pop */
c40c8556 11511 tmp = tcg_temp_new_i32();
12dcc321 11512 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11513 store_reg(s, i, tmp);
99c475ab
FB
11514 } else {
11515 /* push */
b0109805 11516 tmp = load_reg(s, i);
12dcc321 11517 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11518 tcg_temp_free_i32(tmp);
99c475ab 11519 }
5899f386 11520 /* advance to the next address. */
b0109805 11521 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11522 }
11523 }
39d5492a 11524 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11525 if (insn & (1 << 8)) {
11526 if (insn & (1 << 11)) {
11527 /* pop pc */
c40c8556 11528 tmp = tcg_temp_new_i32();
12dcc321 11529 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11530 /* don't set the pc until the rest of the instruction
11531 has completed */
11532 } else {
11533 /* push lr */
b0109805 11534 tmp = load_reg(s, 14);
12dcc321 11535 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11536 tcg_temp_free_i32(tmp);
99c475ab 11537 }
b0109805 11538 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11539 }
5899f386 11540 if ((insn & (1 << 11)) == 0) {
b0109805 11541 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11542 }
99c475ab 11543 /* write back the new stack pointer */
b0109805 11544 store_reg(s, 13, addr);
99c475ab 11545 /* set the new PC value */
be5e7a76 11546 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11547 store_reg_from_load(s, 15, tmp);
be5e7a76 11548 }
99c475ab
FB
11549 break;
11550
9ee6e8bb
PB
11551 case 1: case 3: case 9: case 11: /* czb */
11552 rm = insn & 7;
d9ba4830 11553 tmp = load_reg(s, rm);
9ee6e8bb
PB
11554 s->condlabel = gen_new_label();
11555 s->condjmp = 1;
11556 if (insn & (1 << 11))
cb63669a 11557 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11558 else
cb63669a 11559 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11560 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11561 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11562 val = (uint32_t)s->pc + 2;
11563 val += offset;
11564 gen_jmp(s, val);
11565 break;
11566
11567 case 15: /* IT, nop-hint. */
11568 if ((insn & 0xf) == 0) {
11569 gen_nop_hint(s, (insn >> 4) & 0xf);
11570 break;
11571 }
11572 /* If Then. */
11573 s->condexec_cond = (insn >> 4) & 0xe;
11574 s->condexec_mask = insn & 0x1f;
11575 /* No actual code generated for this insn, just setup state. */
11576 break;
11577
06c949e6 11578 case 0xe: /* bkpt */
d4a2dc67
PM
11579 {
11580 int imm8 = extract32(insn, 0, 8);
be5e7a76 11581 ARCH(5);
73710361
GB
11582 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11583 default_exception_el(s));
06c949e6 11584 break;
d4a2dc67 11585 }
06c949e6 11586
19a6e31c
PM
11587 case 0xa: /* rev, and hlt */
11588 {
11589 int op1 = extract32(insn, 6, 2);
11590
11591 if (op1 == 2) {
11592 /* HLT */
11593 int imm6 = extract32(insn, 0, 6);
11594
11595 gen_hlt(s, imm6);
11596 break;
11597 }
11598
11599 /* Otherwise this is rev */
9ee6e8bb
PB
11600 ARCH(6);
11601 rn = (insn >> 3) & 0x7;
11602 rd = insn & 0x7;
b0109805 11603 tmp = load_reg(s, rn);
19a6e31c 11604 switch (op1) {
66896cb8 11605 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11606 case 1: gen_rev16(tmp); break;
11607 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11608 default:
11609 g_assert_not_reached();
9ee6e8bb 11610 }
b0109805 11611 store_reg(s, rd, tmp);
9ee6e8bb 11612 break;
19a6e31c 11613 }
9ee6e8bb 11614
d9e028c1
PM
11615 case 6:
11616 switch ((insn >> 5) & 7) {
11617 case 2:
11618 /* setend */
11619 ARCH(6);
9886ecdf
PB
11620 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11621 gen_helper_setend(cpu_env);
11622 s->is_jmp = DISAS_UPDATE;
d9e028c1 11623 }
9ee6e8bb 11624 break;
d9e028c1
PM
11625 case 3:
11626 /* cps */
11627 ARCH(6);
11628 if (IS_USER(s)) {
11629 break;
8984bd2e 11630 }
b53d8923 11631 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11632 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11633 /* FAULTMASK */
11634 if (insn & 1) {
11635 addr = tcg_const_i32(19);
11636 gen_helper_v7m_msr(cpu_env, addr, tmp);
11637 tcg_temp_free_i32(addr);
11638 }
11639 /* PRIMASK */
11640 if (insn & 2) {
11641 addr = tcg_const_i32(16);
11642 gen_helper_v7m_msr(cpu_env, addr, tmp);
11643 tcg_temp_free_i32(addr);
11644 }
11645 tcg_temp_free_i32(tmp);
11646 gen_lookup_tb(s);
11647 } else {
11648 if (insn & (1 << 4)) {
11649 shift = CPSR_A | CPSR_I | CPSR_F;
11650 } else {
11651 shift = 0;
11652 }
11653 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11654 }
d9e028c1
PM
11655 break;
11656 default:
11657 goto undef;
9ee6e8bb
PB
11658 }
11659 break;
11660
99c475ab
FB
11661 default:
11662 goto undef;
11663 }
11664 break;
11665
11666 case 12:
a7d3970d 11667 {
99c475ab 11668 /* load/store multiple */
39d5492a
PM
11669 TCGv_i32 loaded_var;
11670 TCGV_UNUSED_I32(loaded_var);
99c475ab 11671 rn = (insn >> 8) & 0x7;
b0109805 11672 addr = load_reg(s, rn);
99c475ab
FB
11673 for (i = 0; i < 8; i++) {
11674 if (insn & (1 << i)) {
99c475ab
FB
11675 if (insn & (1 << 11)) {
11676 /* load */
c40c8556 11677 tmp = tcg_temp_new_i32();
12dcc321 11678 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11679 if (i == rn) {
11680 loaded_var = tmp;
11681 } else {
11682 store_reg(s, i, tmp);
11683 }
99c475ab
FB
11684 } else {
11685 /* store */
b0109805 11686 tmp = load_reg(s, i);
12dcc321 11687 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11688 tcg_temp_free_i32(tmp);
99c475ab 11689 }
5899f386 11690 /* advance to the next address */
b0109805 11691 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11692 }
11693 }
b0109805 11694 if ((insn & (1 << rn)) == 0) {
a7d3970d 11695 /* base reg not in list: base register writeback */
b0109805
PB
11696 store_reg(s, rn, addr);
11697 } else {
a7d3970d
PM
11698 /* base reg in list: if load, complete it now */
11699 if (insn & (1 << 11)) {
11700 store_reg(s, rn, loaded_var);
11701 }
7d1b0095 11702 tcg_temp_free_i32(addr);
b0109805 11703 }
99c475ab 11704 break;
a7d3970d 11705 }
99c475ab
FB
11706 case 13:
11707 /* conditional branch or swi */
11708 cond = (insn >> 8) & 0xf;
11709 if (cond == 0xe)
11710 goto undef;
11711
11712 if (cond == 0xf) {
11713 /* swi */
eaed129d 11714 gen_set_pc_im(s, s->pc);
d4a2dc67 11715 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11716 s->is_jmp = DISAS_SWI;
99c475ab
FB
11717 break;
11718 }
11719 /* generate a conditional jump to next instruction */
e50e6a20 11720 s->condlabel = gen_new_label();
39fb730a 11721 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11722 s->condjmp = 1;
99c475ab
FB
11723
11724 /* jump to the offset */
5899f386 11725 val = (uint32_t)s->pc + 2;
99c475ab 11726 offset = ((int32_t)insn << 24) >> 24;
5899f386 11727 val += offset << 1;
8aaca4c0 11728 gen_jmp(s, val);
99c475ab
FB
11729 break;
11730
11731 case 14:
358bf29e 11732 if (insn & (1 << 11)) {
9ee6e8bb
PB
11733 if (disas_thumb2_insn(env, s, insn))
11734 goto undef32;
358bf29e
PB
11735 break;
11736 }
9ee6e8bb 11737 /* unconditional branch */
99c475ab
FB
11738 val = (uint32_t)s->pc;
11739 offset = ((int32_t)insn << 21) >> 21;
11740 val += (offset << 1) + 2;
8aaca4c0 11741 gen_jmp(s, val);
99c475ab
FB
11742 break;
11743
11744 case 15:
9ee6e8bb 11745 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11746 goto undef32;
9ee6e8bb 11747 break;
99c475ab
FB
11748 }
11749 return;
9ee6e8bb 11750undef32:
73710361
GB
11751 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11752 default_exception_el(s));
9ee6e8bb
PB
11753 return;
11754illegal_op:
99c475ab 11755undef:
73710361
GB
11756 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11757 default_exception_el(s));
99c475ab
FB
11758}
11759
541ebcd4
PM
11760static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11761{
11762 /* Return true if the insn at dc->pc might cross a page boundary.
11763 * (False positives are OK, false negatives are not.)
11764 */
11765 uint16_t insn;
11766
11767 if ((s->pc & 3) == 0) {
11768 /* At a 4-aligned address we can't be crossing a page */
11769 return false;
11770 }
11771
11772 /* This must be a Thumb insn */
f9fd40eb 11773 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11774
11775 if ((insn >> 11) >= 0x1d) {
11776 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11777 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11778 * end up actually treating this as two 16-bit insns (see the
11779 * code at the start of disas_thumb2_insn()) but we don't bother
11780 * to check for that as it is unlikely, and false positives here
11781 * are harmless.
11782 */
11783 return true;
11784 }
11785 /* Definitely a 16-bit insn, can't be crossing a page. */
11786 return false;
11787}
11788
20157705 11789/* generate intermediate code for basic block 'tb'. */
4e5e1215 11790void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11791{
4e5e1215 11792 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11793 CPUState *cs = CPU(cpu);
2c0262af 11794 DisasContext dc1, *dc = &dc1;
0fa85d43 11795 target_ulong pc_start;
0a2461fa 11796 target_ulong next_page_start;
2e70f6ef
PB
11797 int num_insns;
11798 int max_insns;
541ebcd4 11799 bool end_of_page;
3b46e624 11800
2c0262af 11801 /* generate intermediate code */
40f860cd
PM
11802
11803 /* The A64 decoder has its own top level loop, because it doesn't need
11804 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11805 */
11806 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11807 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11808 return;
11809 }
11810
0fa85d43 11811 pc_start = tb->pc;
3b46e624 11812
2c0262af
FB
11813 dc->tb = tb;
11814
2c0262af
FB
11815 dc->is_jmp = DISAS_NEXT;
11816 dc->pc = pc_start;
ed2803da 11817 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11818 dc->condjmp = 0;
3926cc84 11819
40f860cd 11820 dc->aarch64 = 0;
cef9ee70
SS
11821 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11822 * there is no secure EL1, so we route exceptions to EL3.
11823 */
11824 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11825 !arm_el_is_aa64(env, 3);
40f860cd 11826 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11827 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11828 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11829 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11830 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
8bd5c820 11831 dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
c1e37810 11832 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11833#if !defined(CONFIG_USER_ONLY)
c1e37810 11834 dc->user = (dc->current_el == 0);
3926cc84 11835#endif
3f342b9e 11836 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11837 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11838 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11839 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11840 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11841 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
064c379c 11842 dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags);
60322b39 11843 dc->cp_regs = cpu->cp_regs;
a984e42c 11844 dc->features = env->features;
40f860cd 11845
50225ad0
PM
11846 /* Single step state. The code-generation logic here is:
11847 * SS_ACTIVE == 0:
11848 * generate code with no special handling for single-stepping (except
11849 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11850 * this happens anyway because those changes are all system register or
11851 * PSTATE writes).
11852 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11853 * emit code for one insn
11854 * emit code to clear PSTATE.SS
11855 * emit code to generate software step exception for completed step
11856 * end TB (as usual for having generated an exception)
11857 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11858 * emit code to generate a software step exception
11859 * end the TB
11860 */
11861 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11862 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11863 dc->is_ldex = false;
11864 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11865
a7812ae4
PB
11866 cpu_F0s = tcg_temp_new_i32();
11867 cpu_F1s = tcg_temp_new_i32();
11868 cpu_F0d = tcg_temp_new_i64();
11869 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11870 cpu_V0 = cpu_F0d;
11871 cpu_V1 = cpu_F1d;
e677137d 11872 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11873 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11874 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11875 num_insns = 0;
11876 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11877 if (max_insns == 0) {
2e70f6ef 11878 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11879 }
11880 if (max_insns > TCG_MAX_INSNS) {
11881 max_insns = TCG_MAX_INSNS;
11882 }
2e70f6ef 11883
cd42d5b2 11884 gen_tb_start(tb);
e12ce78d 11885
3849902c
PM
11886 tcg_clear_temp_count();
11887
e12ce78d
PM
11888 /* A note on handling of the condexec (IT) bits:
11889 *
11890 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11891 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11892 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11893 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11894 * to do it at the end of the block. (For example if we don't do this
11895 * it's hard to identify whether we can safely skip writing condexec
11896 * at the end of the TB, which we definitely want to do for the case
11897 * where a TB doesn't do anything with the IT state at all.)
11898 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11899 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11900 * This is done both for leaving the TB at the end, and for leaving
11901 * it because of an exception we know will happen, which is done in
11902 * gen_exception_insn(). The latter is necessary because we need to
11903 * leave the TB with the PC/IT state just prior to execution of the
11904 * instruction which caused the exception.
11905 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11906 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11907 * This is handled in the same way as restoration of the
4e5e1215
RH
11908 * PC in these situations; we save the value of the condexec bits
11909 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11910 * then uses this to restore them after an exception.
e12ce78d
PM
11911 *
11912 * Note that there are no instructions which can read the condexec
11913 * bits, and none which can write non-static values to them, so
0ecb72a5 11914 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11915 * middle of a TB.
11916 */
11917
9ee6e8bb
PB
11918 /* Reset the conditional execution bits immediately. This avoids
11919 complications trying to do it at the end of the block. */
98eac7ca 11920 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11921 {
39d5492a 11922 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11923 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11924 store_cpu_field(tmp, condexec_bits);
8f01245e 11925 }
2c0262af 11926 do {
9bb6558a 11927 dc->insn_start_idx = tcg_op_buf_count();
52e971d9 11928 tcg_gen_insn_start(dc->pc,
aaa1f954
EI
11929 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11930 0);
b933066a
RH
11931 num_insns++;
11932
fbb4a2e3
PB
11933#ifdef CONFIG_USER_ONLY
11934 /* Intercept jump to the magic kernel page. */
40f860cd 11935 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11936 /* We always get here via a jump, so know we are not in a
11937 conditional execution block. */
d4a2dc67 11938 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11939 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11940 break;
11941 }
9ee6e8bb
PB
11942#endif
11943
f0c3c505 11944 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11945 CPUBreakpoint *bp;
f0c3c505 11946 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11947 if (bp->pc == dc->pc) {
5d98bf8f 11948 if (bp->flags & BP_CPU) {
ce8a1b54 11949 gen_set_condexec(dc);
ed6c6448 11950 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11951 gen_helper_check_breakpoints(cpu_env);
11952 /* End the TB early; it's likely not going to be executed */
11953 dc->is_jmp = DISAS_UPDATE;
11954 } else {
11955 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11956 /* The address covered by the breakpoint must be
11957 included in [tb->pc, tb->pc + tb->size) in order
11958 to for it to be properly cleared -- thus we
11959 increment the PC here so that the logic setting
11960 tb->size below does the right thing. */
5d98bf8f
SF
11961 /* TODO: Advance PC by correct instruction length to
11962 * avoid disassembler error messages */
11963 dc->pc += 2;
11964 goto done_generating;
11965 }
11966 break;
1fddef4b
FB
11967 }
11968 }
11969 }
e50e6a20 11970
959082fc 11971 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11972 gen_io_start();
959082fc 11973 }
2e70f6ef 11974
50225ad0
PM
11975 if (dc->ss_active && !dc->pstate_ss) {
11976 /* Singlestep state is Active-pending.
11977 * If we're in this state at the start of a TB then either
11978 * a) we just took an exception to an EL which is being debugged
11979 * and this is the first insn in the exception handler
11980 * b) debug exceptions were masked and we just unmasked them
11981 * without changing EL (eg by clearing PSTATE.D)
11982 * In either case we're going to take a swstep exception in the
11983 * "did not step an insn" case, and so the syndrome ISV and EX
11984 * bits should be zero.
11985 */
959082fc 11986 assert(num_insns == 1);
73710361
GB
11987 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11988 default_exception_el(dc));
50225ad0
PM
11989 goto done_generating;
11990 }
11991
40f860cd 11992 if (dc->thumb) {
9ee6e8bb
PB
11993 disas_thumb_insn(env, dc);
11994 if (dc->condexec_mask) {
11995 dc->condexec_cond = (dc->condexec_cond & 0xe)
11996 | ((dc->condexec_mask >> 4) & 1);
11997 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11998 if (dc->condexec_mask == 0) {
11999 dc->condexec_cond = 0;
12000 }
12001 }
12002 } else {
f9fd40eb 12003 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
12004 dc->pc += 4;
12005 disas_arm_insn(dc, insn);
9ee6e8bb 12006 }
e50e6a20
FB
12007
12008 if (dc->condjmp && !dc->is_jmp) {
12009 gen_set_label(dc->condlabel);
12010 dc->condjmp = 0;
12011 }
3849902c
PM
12012
12013 if (tcg_check_temp_count()) {
0a2461fa
AG
12014 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
12015 dc->pc);
3849902c
PM
12016 }
12017
aaf2d97d 12018 /* Translation stops when a conditional branch is encountered.
e50e6a20 12019 * Otherwise the subsequent code could get translated several times.
b5ff1b31 12020 * Also stop translation when a page boundary is reached. This
bf20dc07 12021 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
12022
12023 /* We want to stop the TB if the next insn starts in a new page,
12024 * or if it spans between this page and the next. This means that
12025 * if we're looking at the last halfword in the page we need to
12026 * see if it's a 16-bit Thumb insn (which will fit in this TB)
12027 * or a 32-bit Thumb insn (which won't).
12028 * This is to avoid generating a silly TB with a single 16-bit insn
12029 * in it at the end of this page (which would execute correctly
12030 * but isn't very efficient).
12031 */
12032 end_of_page = (dc->pc >= next_page_start) ||
12033 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
12034
fe700adb 12035 } while (!dc->is_jmp && !tcg_op_buf_full() &&
b636649f 12036 !is_singlestepping(dc) &&
1b530a6d 12037 !singlestep &&
541ebcd4 12038 !end_of_page &&
2e70f6ef
PB
12039 num_insns < max_insns);
12040
12041 if (tb->cflags & CF_LAST_IO) {
12042 if (dc->condjmp) {
12043 /* FIXME: This can theoretically happen with self-modifying
12044 code. */
a47dddd7 12045 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
12046 }
12047 gen_io_end();
12048 }
9ee6e8bb 12049
b5ff1b31 12050 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
12051 instruction was a conditional branch or trap, and the PC has
12052 already been written. */
f021b2c4 12053 gen_set_condexec(dc);
3bb8a96f
PM
12054 if (dc->is_jmp == DISAS_BX_EXCRET) {
12055 /* Exception return branches need some special case code at the
12056 * end of the TB, which is complex enough that it has to
12057 * handle the single-step vs not and the condition-failed
12058 * insn codepath itself.
12059 */
12060 gen_bx_excret_final_code(dc);
12061 } else if (unlikely(is_singlestepping(dc))) {
7999a5c8 12062 /* Unconditional and "condition passed" instruction codepath. */
7999a5c8
SF
12063 switch (dc->is_jmp) {
12064 case DISAS_SWI:
50225ad0 12065 gen_ss_advance(dc);
73710361
GB
12066 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12067 default_exception_el(dc));
7999a5c8
SF
12068 break;
12069 case DISAS_HVC:
37e6456e 12070 gen_ss_advance(dc);
73710361 12071 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
12072 break;
12073 case DISAS_SMC:
37e6456e 12074 gen_ss_advance(dc);
73710361 12075 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
12076 break;
12077 case DISAS_NEXT:
12078 case DISAS_UPDATE:
12079 gen_set_pc_im(dc, dc->pc);
12080 /* fall through */
12081 default:
5425415e
PM
12082 /* FIXME: Single stepping a WFI insn will not halt the CPU. */
12083 gen_singlestep_exception(dc);
7999a5c8 12084 }
8aaca4c0 12085 } else {
9ee6e8bb
PB
12086 /* While branches must always occur at the end of an IT block,
12087 there are a few other things that can cause us to terminate
65626741 12088 the TB in the middle of an IT block:
9ee6e8bb
PB
12089 - Exception generating instructions (bkpt, swi, undefined).
12090 - Page boundaries.
12091 - Hardware watchpoints.
12092 Hardware breakpoints have already been handled and skip this code.
12093 */
8aaca4c0 12094 switch(dc->is_jmp) {
8aaca4c0 12095 case DISAS_NEXT:
6e256c93 12096 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12097 break;
8aaca4c0 12098 case DISAS_UPDATE:
577bf808
SF
12099 gen_set_pc_im(dc, dc->pc);
12100 /* fall through */
12101 case DISAS_JUMP:
8a6b28c7
EC
12102 gen_goto_ptr();
12103 break;
577bf808 12104 default:
8aaca4c0 12105 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12106 tcg_gen_exit_tb(0);
8aaca4c0
FB
12107 break;
12108 case DISAS_TB_JUMP:
8a6b28c7 12109 case DISAS_EXC:
8aaca4c0
FB
12110 /* nothing more to generate */
12111 break;
9ee6e8bb 12112 case DISAS_WFI:
1ce94f81 12113 gen_helper_wfi(cpu_env);
84549b6d
PM
12114 /* The helper doesn't necessarily throw an exception, but we
12115 * must go back to the main loop to check for interrupts anyway.
12116 */
12117 tcg_gen_exit_tb(0);
9ee6e8bb 12118 break;
72c1d3af
PM
12119 case DISAS_WFE:
12120 gen_helper_wfe(cpu_env);
12121 break;
c87e5a61
PM
12122 case DISAS_YIELD:
12123 gen_helper_yield(cpu_env);
12124 break;
9ee6e8bb 12125 case DISAS_SWI:
73710361
GB
12126 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12127 default_exception_el(dc));
9ee6e8bb 12128 break;
37e6456e 12129 case DISAS_HVC:
73710361 12130 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12131 break;
12132 case DISAS_SMC:
73710361 12133 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12134 break;
8aaca4c0 12135 }
f021b2c4
PM
12136 }
12137
12138 if (dc->condjmp) {
12139 /* "Condition failed" instruction codepath for the branch/trap insn */
12140 gen_set_label(dc->condlabel);
12141 gen_set_condexec(dc);
b636649f 12142 if (unlikely(is_singlestepping(dc))) {
f021b2c4
PM
12143 gen_set_pc_im(dc, dc->pc);
12144 gen_singlestep_exception(dc);
12145 } else {
6e256c93 12146 gen_goto_tb(dc, 1, dc->pc);
e50e6a20 12147 }
2c0262af 12148 }
2e70f6ef 12149
9ee6e8bb 12150done_generating:
806f352d 12151 gen_tb_end(tb, num_insns);
2c0262af
FB
12152
12153#ifdef DEBUG_DISAS
06486077
AB
12154 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12155 qemu_log_in_addr_range(pc_start)) {
1ee73216 12156 qemu_log_lock();
93fcfe39
AL
12157 qemu_log("----------------\n");
12158 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 12159 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 12160 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12161 qemu_log("\n");
1ee73216 12162 qemu_log_unlock();
2c0262af
FB
12163 }
12164#endif
4e5e1215
RH
12165 tb->size = dc->pc - pc_start;
12166 tb->icount = num_insns;
2c0262af
FB
12167}
12168
b5ff1b31 12169static const char *cpu_mode_names[16] = {
28c9457d
EI
12170 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12171 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12172};
9ee6e8bb 12173
878096ee
AF
12174void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12175 int flags)
2c0262af 12176{
878096ee
AF
12177 ARMCPU *cpu = ARM_CPU(cs);
12178 CPUARMState *env = &cpu->env;
2c0262af 12179 int i;
b5ff1b31 12180 uint32_t psr;
06e5cf7a 12181 const char *ns_status;
2c0262af 12182
17731115
PM
12183 if (is_a64(env)) {
12184 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12185 return;
12186 }
12187
2c0262af 12188 for(i=0;i<16;i++) {
7fe48483 12189 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12190 if ((i % 4) == 3)
7fe48483 12191 cpu_fprintf(f, "\n");
2c0262af 12192 else
7fe48483 12193 cpu_fprintf(f, " ");
2c0262af 12194 }
b5ff1b31 12195 psr = cpsr_read(env);
06e5cf7a
PM
12196
12197 if (arm_feature(env, ARM_FEATURE_EL3) &&
12198 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12199 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12200 } else {
12201 ns_status = "";
12202 }
12203
12204 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12205 psr,
b5ff1b31
FB
12206 psr & (1 << 31) ? 'N' : '-',
12207 psr & (1 << 30) ? 'Z' : '-',
12208 psr & (1 << 29) ? 'C' : '-',
12209 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12210 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12211 ns_status,
b5ff1b31 12212 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12213
f2617cfc
PM
12214 if (flags & CPU_DUMP_FPU) {
12215 int numvfpregs = 0;
12216 if (arm_feature(env, ARM_FEATURE_VFP)) {
12217 numvfpregs += 16;
12218 }
12219 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12220 numvfpregs += 16;
12221 }
12222 for (i = 0; i < numvfpregs; i++) {
12223 uint64_t v = float64_val(env->vfp.regs[i]);
12224 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12225 i * 2, (uint32_t)v,
12226 i * 2 + 1, (uint32_t)(v >> 32),
12227 i, v);
12228 }
12229 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12230 }
2c0262af 12231}
a6b025d3 12232
bad729e2
RH
12233void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12234 target_ulong *data)
d2856f1a 12235{
3926cc84 12236 if (is_a64(env)) {
bad729e2 12237 env->pc = data[0];
40f860cd 12238 env->condexec_bits = 0;
aaa1f954 12239 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12240 } else {
bad729e2
RH
12241 env->regs[15] = data[0];
12242 env->condexec_bits = data[1];
aaa1f954 12243 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12244 }
d2856f1a 12245}