]> git.proxmox.com Git - mirror_qemu.git/blame - target/arm/translate.c
armv7m: Check exception return consistency
[mirror_qemu.git] / target / arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af 20 */
74c21bd0 21#include "qemu/osdep.h"
2c0262af
FB
22
23#include "cpu.h"
ccd38087 24#include "internals.h"
76cad711 25#include "disas/disas.h"
63c91552 26#include "exec/exec-all.h"
57fec1fe 27#include "tcg-op.h"
1de7afc9 28#include "qemu/log.h"
534df156 29#include "qemu/bitops.h"
1d854765 30#include "arm_ldst.h"
19a6e31c 31#include "exec/semihost.h"
1497c961 32
2ef6175a
RH
33#include "exec/helper-proto.h"
34#include "exec/helper-gen.h"
2c0262af 35
a7e30d84 36#include "trace-tcg.h"
508127e2 37#include "exec/log.h"
a7e30d84
LV
38
39
2b51668f
PM
40#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
41#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 42/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 43#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 44#define ENABLE_ARCH_5J 0
2b51668f
PM
45#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
46#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
47#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
48#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
49#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 50
86753403 51#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 52
f570c61e 53#include "translate.h"
e12ce78d 54
b5ff1b31
FB
55#if defined(CONFIG_USER_ONLY)
56#define IS_USER(s) 1
57#else
58#define IS_USER(s) (s->user)
59#endif
60
1bcea73e 61TCGv_env cpu_env;
ad69471c 62/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 63static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 64static TCGv_i32 cpu_R[16];
78bcaa3e
RH
65TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
66TCGv_i64 cpu_exclusive_addr;
67TCGv_i64 cpu_exclusive_val;
ad69471c 68
b26eefb6 69/* FIXME: These should be removed. */
39d5492a 70static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 71static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 72
022c62cb 73#include "exec/gen-icount.h"
2e70f6ef 74
155c3eac
FN
75static const char *regnames[] =
76 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
77 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
78
b26eefb6
PB
79/* initialize TCG globals. */
80void arm_translate_init(void)
81{
155c3eac
FN
82 int i;
83
a7812ae4 84 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 85 tcg_ctx.tcg_env = cpu_env;
a7812ae4 86
155c3eac 87 for (i = 0; i < 16; i++) {
e1ccc054 88 cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
0ecb72a5 89 offsetof(CPUARMState, regs[i]),
155c3eac
FN
90 regnames[i]);
91 }
e1ccc054
RH
92 cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
93 cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
94 cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
95 cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
66c374de 96
e1ccc054 97 cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 98 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
e1ccc054 99 cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
0ecb72a5 100 offsetof(CPUARMState, exclusive_val), "exclusive_val");
155c3eac 101
14ade10f 102 a64_translate_init();
b26eefb6
PB
103}
104
9bb6558a
PM
105/* Flags for the disas_set_da_iss info argument:
106 * lower bits hold the Rt register number, higher bits are flags.
107 */
108typedef enum ISSInfo {
109 ISSNone = 0,
110 ISSRegMask = 0x1f,
111 ISSInvalid = (1 << 5),
112 ISSIsAcqRel = (1 << 6),
113 ISSIsWrite = (1 << 7),
114 ISSIs16Bit = (1 << 8),
115} ISSInfo;
116
117/* Save the syndrome information for a Data Abort */
118static void disas_set_da_iss(DisasContext *s, TCGMemOp memop, ISSInfo issinfo)
119{
120 uint32_t syn;
121 int sas = memop & MO_SIZE;
122 bool sse = memop & MO_SIGN;
123 bool is_acqrel = issinfo & ISSIsAcqRel;
124 bool is_write = issinfo & ISSIsWrite;
125 bool is_16bit = issinfo & ISSIs16Bit;
126 int srt = issinfo & ISSRegMask;
127
128 if (issinfo & ISSInvalid) {
129 /* Some callsites want to conditionally provide ISS info,
130 * eg "only if this was not a writeback"
131 */
132 return;
133 }
134
135 if (srt == 15) {
136 /* For AArch32, insns where the src/dest is R15 never generate
137 * ISS information. Catching that here saves checking at all
138 * the call sites.
139 */
140 return;
141 }
142
143 syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
144 0, 0, 0, is_write, 0, is_16bit);
145 disas_set_insn_syndrome(s, syn);
146}
147
579d21cc
PM
148static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
149{
150 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
151 * insns:
152 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
153 * otherwise, access as if at PL0.
154 */
155 switch (s->mmu_idx) {
156 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
157 case ARMMMUIdx_S12NSE0:
158 case ARMMMUIdx_S12NSE1:
159 return ARMMMUIdx_S12NSE0;
160 case ARMMMUIdx_S1E3:
161 case ARMMMUIdx_S1SE0:
162 case ARMMMUIdx_S1SE1:
163 return ARMMMUIdx_S1SE0;
164 case ARMMMUIdx_S2NS:
165 default:
166 g_assert_not_reached();
167 }
168}
169
39d5492a 170static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 171{
39d5492a 172 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
173 tcg_gen_ld_i32(tmp, cpu_env, offset);
174 return tmp;
175}
176
0ecb72a5 177#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 178
39d5492a 179static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
180{
181 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 182 tcg_temp_free_i32(var);
d9ba4830
PB
183}
184
185#define store_cpu_field(var, name) \
0ecb72a5 186 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 187
b26eefb6 188/* Set a variable to the value of a CPU register. */
39d5492a 189static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
190{
191 if (reg == 15) {
192 uint32_t addr;
b90372ad 193 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
194 if (s->thumb)
195 addr = (long)s->pc + 2;
196 else
197 addr = (long)s->pc + 4;
198 tcg_gen_movi_i32(var, addr);
199 } else {
155c3eac 200 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
201 }
202}
203
204/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 205static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 206{
39d5492a 207 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
208 load_reg_var(s, tmp, reg);
209 return tmp;
210}
211
212/* Set a CPU register. The source must be a temporary and will be
213 marked as dead. */
39d5492a 214static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
215{
216 if (reg == 15) {
9b6a3ea7
PM
217 /* In Thumb mode, we must ignore bit 0.
218 * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
219 * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
220 * We choose to ignore [1:0] in ARM mode for all architecture versions.
221 */
222 tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
b26eefb6
PB
223 s->is_jmp = DISAS_JUMP;
224 }
155c3eac 225 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 226 tcg_temp_free_i32(var);
b26eefb6
PB
227}
228
b26eefb6 229/* Value extensions. */
86831435
PB
230#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
231#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
232#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
233#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
234
1497c961
PB
235#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
236#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 237
b26eefb6 238
39d5492a 239static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 240{
39d5492a 241 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 242 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
243 tcg_temp_free_i32(tmp_mask);
244}
d9ba4830
PB
245/* Set NZCV flags from the high 4 bits of var. */
246#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
247
d4a2dc67 248static void gen_exception_internal(int excp)
d9ba4830 249{
d4a2dc67
PM
250 TCGv_i32 tcg_excp = tcg_const_i32(excp);
251
252 assert(excp_is_internal(excp));
253 gen_helper_exception_internal(cpu_env, tcg_excp);
254 tcg_temp_free_i32(tcg_excp);
255}
256
73710361 257static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
d4a2dc67
PM
258{
259 TCGv_i32 tcg_excp = tcg_const_i32(excp);
260 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
73710361 261 TCGv_i32 tcg_el = tcg_const_i32(target_el);
d4a2dc67 262
73710361
GB
263 gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
264 tcg_syn, tcg_el);
265
266 tcg_temp_free_i32(tcg_el);
d4a2dc67
PM
267 tcg_temp_free_i32(tcg_syn);
268 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
269}
270
50225ad0
PM
271static void gen_ss_advance(DisasContext *s)
272{
273 /* If the singlestep state is Active-not-pending, advance to
274 * Active-pending.
275 */
276 if (s->ss_active) {
277 s->pstate_ss = 0;
278 gen_helper_clear_pstate_ss(cpu_env);
279 }
280}
281
282static void gen_step_complete_exception(DisasContext *s)
283{
284 /* We just completed step of an insn. Move from Active-not-pending
285 * to Active-pending, and then also take the swstep exception.
286 * This corresponds to making the (IMPDEF) choice to prioritize
287 * swstep exceptions over asynchronous exceptions taken to an exception
288 * level where debug is disabled. This choice has the advantage that
289 * we do not need to maintain internal state corresponding to the
290 * ISV/EX syndrome bits between completion of the step and generation
291 * of the exception, and our syndrome information is always correct.
292 */
293 gen_ss_advance(s);
73710361
GB
294 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
295 default_exception_el(s));
50225ad0
PM
296 s->is_jmp = DISAS_EXC;
297}
298
39d5492a 299static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 300{
39d5492a
PM
301 TCGv_i32 tmp1 = tcg_temp_new_i32();
302 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
303 tcg_gen_ext16s_i32(tmp1, a);
304 tcg_gen_ext16s_i32(tmp2, b);
3670669c 305 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 306 tcg_temp_free_i32(tmp2);
3670669c
PB
307 tcg_gen_sari_i32(a, a, 16);
308 tcg_gen_sari_i32(b, b, 16);
309 tcg_gen_mul_i32(b, b, a);
310 tcg_gen_mov_i32(a, tmp1);
7d1b0095 311 tcg_temp_free_i32(tmp1);
3670669c
PB
312}
313
314/* Byteswap each halfword. */
39d5492a 315static void gen_rev16(TCGv_i32 var)
3670669c 316{
39d5492a 317 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
318 tcg_gen_shri_i32(tmp, var, 8);
319 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
320 tcg_gen_shli_i32(var, var, 8);
321 tcg_gen_andi_i32(var, var, 0xff00ff00);
322 tcg_gen_or_i32(var, var, tmp);
7d1b0095 323 tcg_temp_free_i32(tmp);
3670669c
PB
324}
325
326/* Byteswap low halfword and sign extend. */
39d5492a 327static void gen_revsh(TCGv_i32 var)
3670669c 328{
1a855029
AJ
329 tcg_gen_ext16u_i32(var, var);
330 tcg_gen_bswap16_i32(var, var);
331 tcg_gen_ext16s_i32(var, var);
3670669c
PB
332}
333
838fa72d 334/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 335static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 336{
838fa72d
AJ
337 TCGv_i64 tmp64 = tcg_temp_new_i64();
338
339 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 340 tcg_temp_free_i32(b);
838fa72d
AJ
341 tcg_gen_shli_i64(tmp64, tmp64, 32);
342 tcg_gen_add_i64(a, tmp64, a);
343
344 tcg_temp_free_i64(tmp64);
345 return a;
346}
347
348/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 349static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
350{
351 TCGv_i64 tmp64 = tcg_temp_new_i64();
352
353 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 354 tcg_temp_free_i32(b);
838fa72d
AJ
355 tcg_gen_shli_i64(tmp64, tmp64, 32);
356 tcg_gen_sub_i64(a, tmp64, a);
357
358 tcg_temp_free_i64(tmp64);
359 return a;
3670669c
PB
360}
361
5e3f878a 362/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 363static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 364{
39d5492a
PM
365 TCGv_i32 lo = tcg_temp_new_i32();
366 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 367 TCGv_i64 ret;
5e3f878a 368
831d7fe8 369 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 370 tcg_temp_free_i32(a);
7d1b0095 371 tcg_temp_free_i32(b);
831d7fe8
RH
372
373 ret = tcg_temp_new_i64();
374 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
375 tcg_temp_free_i32(lo);
376 tcg_temp_free_i32(hi);
831d7fe8
RH
377
378 return ret;
5e3f878a
PB
379}
380
39d5492a 381static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 382{
39d5492a
PM
383 TCGv_i32 lo = tcg_temp_new_i32();
384 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 385 TCGv_i64 ret;
5e3f878a 386
831d7fe8 387 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 388 tcg_temp_free_i32(a);
7d1b0095 389 tcg_temp_free_i32(b);
831d7fe8
RH
390
391 ret = tcg_temp_new_i64();
392 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
393 tcg_temp_free_i32(lo);
394 tcg_temp_free_i32(hi);
831d7fe8
RH
395
396 return ret;
5e3f878a
PB
397}
398
8f01245e 399/* Swap low and high halfwords. */
39d5492a 400static void gen_swap_half(TCGv_i32 var)
8f01245e 401{
39d5492a 402 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
403 tcg_gen_shri_i32(tmp, var, 16);
404 tcg_gen_shli_i32(var, var, 16);
405 tcg_gen_or_i32(var, var, tmp);
7d1b0095 406 tcg_temp_free_i32(tmp);
8f01245e
PB
407}
408
b26eefb6
PB
409/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
410 tmp = (t0 ^ t1) & 0x8000;
411 t0 &= ~0x8000;
412 t1 &= ~0x8000;
413 t0 = (t0 + t1) ^ tmp;
414 */
415
39d5492a 416static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 417{
39d5492a 418 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
419 tcg_gen_xor_i32(tmp, t0, t1);
420 tcg_gen_andi_i32(tmp, tmp, 0x8000);
421 tcg_gen_andi_i32(t0, t0, ~0x8000);
422 tcg_gen_andi_i32(t1, t1, ~0x8000);
423 tcg_gen_add_i32(t0, t0, t1);
424 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
425 tcg_temp_free_i32(tmp);
426 tcg_temp_free_i32(t1);
b26eefb6
PB
427}
428
429/* Set CF to the top bit of var. */
39d5492a 430static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 431{
66c374de 432 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
433}
434
435/* Set N and Z flags from var. */
39d5492a 436static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 437{
66c374de
AJ
438 tcg_gen_mov_i32(cpu_NF, var);
439 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
440}
441
442/* T0 += T1 + CF. */
39d5492a 443static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 444{
396e467c 445 tcg_gen_add_i32(t0, t0, t1);
66c374de 446 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
447}
448
e9bb4aa9 449/* dest = T0 + T1 + CF. */
39d5492a 450static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 451{
e9bb4aa9 452 tcg_gen_add_i32(dest, t0, t1);
66c374de 453 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
454}
455
3670669c 456/* dest = T0 - T1 + CF - 1. */
39d5492a 457static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 458{
3670669c 459 tcg_gen_sub_i32(dest, t0, t1);
66c374de 460 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 461 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
462}
463
72485ec4 464/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 465static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 466{
39d5492a 467 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
468 tcg_gen_movi_i32(tmp, 0);
469 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 470 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 471 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
472 tcg_gen_xor_i32(tmp, t0, t1);
473 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
474 tcg_temp_free_i32(tmp);
475 tcg_gen_mov_i32(dest, cpu_NF);
476}
477
49b4c31e 478/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 479static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 480{
39d5492a 481 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
482 if (TCG_TARGET_HAS_add2_i32) {
483 tcg_gen_movi_i32(tmp, 0);
484 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 485 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
486 } else {
487 TCGv_i64 q0 = tcg_temp_new_i64();
488 TCGv_i64 q1 = tcg_temp_new_i64();
489 tcg_gen_extu_i32_i64(q0, t0);
490 tcg_gen_extu_i32_i64(q1, t1);
491 tcg_gen_add_i64(q0, q0, q1);
492 tcg_gen_extu_i32_i64(q1, cpu_CF);
493 tcg_gen_add_i64(q0, q0, q1);
494 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
495 tcg_temp_free_i64(q0);
496 tcg_temp_free_i64(q1);
497 }
498 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
499 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
500 tcg_gen_xor_i32(tmp, t0, t1);
501 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
502 tcg_temp_free_i32(tmp);
503 tcg_gen_mov_i32(dest, cpu_NF);
504}
505
72485ec4 506/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 507static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 508{
39d5492a 509 TCGv_i32 tmp;
72485ec4
AJ
510 tcg_gen_sub_i32(cpu_NF, t0, t1);
511 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
512 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
513 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
514 tmp = tcg_temp_new_i32();
515 tcg_gen_xor_i32(tmp, t0, t1);
516 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
517 tcg_temp_free_i32(tmp);
518 tcg_gen_mov_i32(dest, cpu_NF);
519}
520
e77f0832 521/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 522static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 523{
39d5492a 524 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
525 tcg_gen_not_i32(tmp, t1);
526 gen_adc_CC(dest, t0, tmp);
39d5492a 527 tcg_temp_free_i32(tmp);
2de68a49
RH
528}
529
365af80e 530#define GEN_SHIFT(name) \
39d5492a 531static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 532{ \
39d5492a 533 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
534 tmp1 = tcg_temp_new_i32(); \
535 tcg_gen_andi_i32(tmp1, t1, 0xff); \
536 tmp2 = tcg_const_i32(0); \
537 tmp3 = tcg_const_i32(0x1f); \
538 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
539 tcg_temp_free_i32(tmp3); \
540 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
541 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
542 tcg_temp_free_i32(tmp2); \
543 tcg_temp_free_i32(tmp1); \
544}
545GEN_SHIFT(shl)
546GEN_SHIFT(shr)
547#undef GEN_SHIFT
548
39d5492a 549static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 550{
39d5492a 551 TCGv_i32 tmp1, tmp2;
365af80e
AJ
552 tmp1 = tcg_temp_new_i32();
553 tcg_gen_andi_i32(tmp1, t1, 0xff);
554 tmp2 = tcg_const_i32(0x1f);
555 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
556 tcg_temp_free_i32(tmp2);
557 tcg_gen_sar_i32(dest, t0, tmp1);
558 tcg_temp_free_i32(tmp1);
559}
560
39d5492a 561static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 562{
39d5492a
PM
563 TCGv_i32 c0 = tcg_const_i32(0);
564 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
565 tcg_gen_neg_i32(tmp, src);
566 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
567 tcg_temp_free_i32(c0);
568 tcg_temp_free_i32(tmp);
569}
ad69471c 570
39d5492a 571static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 572{
9a119ff6 573 if (shift == 0) {
66c374de 574 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 575 } else {
66c374de
AJ
576 tcg_gen_shri_i32(cpu_CF, var, shift);
577 if (shift != 31) {
578 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
579 }
9a119ff6 580 }
9a119ff6 581}
b26eefb6 582
9a119ff6 583/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
584static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
585 int shift, int flags)
9a119ff6
PB
586{
587 switch (shiftop) {
588 case 0: /* LSL */
589 if (shift != 0) {
590 if (flags)
591 shifter_out_im(var, 32 - shift);
592 tcg_gen_shli_i32(var, var, shift);
593 }
594 break;
595 case 1: /* LSR */
596 if (shift == 0) {
597 if (flags) {
66c374de 598 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
599 }
600 tcg_gen_movi_i32(var, 0);
601 } else {
602 if (flags)
603 shifter_out_im(var, shift - 1);
604 tcg_gen_shri_i32(var, var, shift);
605 }
606 break;
607 case 2: /* ASR */
608 if (shift == 0)
609 shift = 32;
610 if (flags)
611 shifter_out_im(var, shift - 1);
612 if (shift == 32)
613 shift = 31;
614 tcg_gen_sari_i32(var, var, shift);
615 break;
616 case 3: /* ROR/RRX */
617 if (shift != 0) {
618 if (flags)
619 shifter_out_im(var, shift - 1);
f669df27 620 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 621 } else {
39d5492a 622 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 623 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
624 if (flags)
625 shifter_out_im(var, 0);
626 tcg_gen_shri_i32(var, var, 1);
b26eefb6 627 tcg_gen_or_i32(var, var, tmp);
7d1b0095 628 tcg_temp_free_i32(tmp);
b26eefb6
PB
629 }
630 }
631};
632
39d5492a
PM
633static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
634 TCGv_i32 shift, int flags)
8984bd2e
PB
635{
636 if (flags) {
637 switch (shiftop) {
9ef39277
BS
638 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
639 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
640 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
641 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
642 }
643 } else {
644 switch (shiftop) {
365af80e
AJ
645 case 0:
646 gen_shl(var, var, shift);
647 break;
648 case 1:
649 gen_shr(var, var, shift);
650 break;
651 case 2:
652 gen_sar(var, var, shift);
653 break;
f669df27
AJ
654 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
655 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
656 }
657 }
7d1b0095 658 tcg_temp_free_i32(shift);
8984bd2e
PB
659}
660
6ddbc6e4
PB
661#define PAS_OP(pfx) \
662 switch (op2) { \
663 case 0: gen_pas_helper(glue(pfx,add16)); break; \
664 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
665 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
666 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
667 case 4: gen_pas_helper(glue(pfx,add8)); break; \
668 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
669 }
39d5492a 670static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 671{
a7812ae4 672 TCGv_ptr tmp;
6ddbc6e4
PB
673
674 switch (op1) {
675#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
676 case 1:
a7812ae4 677 tmp = tcg_temp_new_ptr();
0ecb72a5 678 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 679 PAS_OP(s)
b75263d6 680 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
681 break;
682 case 5:
a7812ae4 683 tmp = tcg_temp_new_ptr();
0ecb72a5 684 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 685 PAS_OP(u)
b75263d6 686 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
687 break;
688#undef gen_pas_helper
689#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
690 case 2:
691 PAS_OP(q);
692 break;
693 case 3:
694 PAS_OP(sh);
695 break;
696 case 6:
697 PAS_OP(uq);
698 break;
699 case 7:
700 PAS_OP(uh);
701 break;
702#undef gen_pas_helper
703 }
704}
9ee6e8bb
PB
705#undef PAS_OP
706
6ddbc6e4
PB
707/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
708#define PAS_OP(pfx) \
ed89a2f1 709 switch (op1) { \
6ddbc6e4
PB
710 case 0: gen_pas_helper(glue(pfx,add8)); break; \
711 case 1: gen_pas_helper(glue(pfx,add16)); break; \
712 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
713 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
714 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
715 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
716 }
39d5492a 717static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 718{
a7812ae4 719 TCGv_ptr tmp;
6ddbc6e4 720
ed89a2f1 721 switch (op2) {
6ddbc6e4
PB
722#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
723 case 0:
a7812ae4 724 tmp = tcg_temp_new_ptr();
0ecb72a5 725 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 726 PAS_OP(s)
b75263d6 727 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
728 break;
729 case 4:
a7812ae4 730 tmp = tcg_temp_new_ptr();
0ecb72a5 731 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 732 PAS_OP(u)
b75263d6 733 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
734 break;
735#undef gen_pas_helper
736#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
737 case 1:
738 PAS_OP(q);
739 break;
740 case 2:
741 PAS_OP(sh);
742 break;
743 case 5:
744 PAS_OP(uq);
745 break;
746 case 6:
747 PAS_OP(uh);
748 break;
749#undef gen_pas_helper
750 }
751}
9ee6e8bb
PB
752#undef PAS_OP
753
39fb730a 754/*
6c2c63d3 755 * Generate a conditional based on ARM condition code cc.
39fb730a
AG
756 * This is common between ARM and Aarch64 targets.
757 */
6c2c63d3 758void arm_test_cc(DisasCompare *cmp, int cc)
d9ba4830 759{
6c2c63d3
RH
760 TCGv_i32 value;
761 TCGCond cond;
762 bool global = true;
d9ba4830 763
d9ba4830
PB
764 switch (cc) {
765 case 0: /* eq: Z */
d9ba4830 766 case 1: /* ne: !Z */
6c2c63d3
RH
767 cond = TCG_COND_EQ;
768 value = cpu_ZF;
d9ba4830 769 break;
6c2c63d3 770
d9ba4830 771 case 2: /* cs: C */
d9ba4830 772 case 3: /* cc: !C */
6c2c63d3
RH
773 cond = TCG_COND_NE;
774 value = cpu_CF;
d9ba4830 775 break;
6c2c63d3 776
d9ba4830 777 case 4: /* mi: N */
d9ba4830 778 case 5: /* pl: !N */
6c2c63d3
RH
779 cond = TCG_COND_LT;
780 value = cpu_NF;
d9ba4830 781 break;
6c2c63d3 782
d9ba4830 783 case 6: /* vs: V */
d9ba4830 784 case 7: /* vc: !V */
6c2c63d3
RH
785 cond = TCG_COND_LT;
786 value = cpu_VF;
d9ba4830 787 break;
6c2c63d3 788
d9ba4830 789 case 8: /* hi: C && !Z */
6c2c63d3
RH
790 case 9: /* ls: !C || Z -> !(C && !Z) */
791 cond = TCG_COND_NE;
792 value = tcg_temp_new_i32();
793 global = false;
794 /* CF is 1 for C, so -CF is an all-bits-set mask for C;
795 ZF is non-zero for !Z; so AND the two subexpressions. */
796 tcg_gen_neg_i32(value, cpu_CF);
797 tcg_gen_and_i32(value, value, cpu_ZF);
d9ba4830 798 break;
6c2c63d3 799
d9ba4830 800 case 10: /* ge: N == V -> N ^ V == 0 */
d9ba4830 801 case 11: /* lt: N != V -> N ^ V != 0 */
6c2c63d3
RH
802 /* Since we're only interested in the sign bit, == 0 is >= 0. */
803 cond = TCG_COND_GE;
804 value = tcg_temp_new_i32();
805 global = false;
806 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
d9ba4830 807 break;
6c2c63d3 808
d9ba4830 809 case 12: /* gt: !Z && N == V */
d9ba4830 810 case 13: /* le: Z || N != V */
6c2c63d3
RH
811 cond = TCG_COND_NE;
812 value = tcg_temp_new_i32();
813 global = false;
814 /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
815 * the sign bit then AND with ZF to yield the result. */
816 tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
817 tcg_gen_sari_i32(value, value, 31);
818 tcg_gen_andc_i32(value, cpu_ZF, value);
d9ba4830 819 break;
6c2c63d3 820
9305eac0
RH
821 case 14: /* always */
822 case 15: /* always */
823 /* Use the ALWAYS condition, which will fold early.
824 * It doesn't matter what we use for the value. */
825 cond = TCG_COND_ALWAYS;
826 value = cpu_ZF;
827 goto no_invert;
828
d9ba4830
PB
829 default:
830 fprintf(stderr, "Bad condition code 0x%x\n", cc);
831 abort();
832 }
6c2c63d3
RH
833
834 if (cc & 1) {
835 cond = tcg_invert_cond(cond);
836 }
837
9305eac0 838 no_invert:
6c2c63d3
RH
839 cmp->cond = cond;
840 cmp->value = value;
841 cmp->value_global = global;
842}
843
844void arm_free_cc(DisasCompare *cmp)
845{
846 if (!cmp->value_global) {
847 tcg_temp_free_i32(cmp->value);
848 }
849}
850
851void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
852{
853 tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
854}
855
856void arm_gen_test_cc(int cc, TCGLabel *label)
857{
858 DisasCompare cmp;
859 arm_test_cc(&cmp, cc);
860 arm_jump_cc(&cmp, label);
861 arm_free_cc(&cmp);
d9ba4830 862}
2c0262af 863
b1d8e52e 864static const uint8_t table_logic_cc[16] = {
2c0262af
FB
865 1, /* and */
866 1, /* xor */
867 0, /* sub */
868 0, /* rsb */
869 0, /* add */
870 0, /* adc */
871 0, /* sbc */
872 0, /* rsc */
873 1, /* andl */
874 1, /* xorl */
875 0, /* cmp */
876 0, /* cmn */
877 1, /* orr */
878 1, /* mov */
879 1, /* bic */
880 1, /* mvn */
881};
3b46e624 882
d9ba4830
PB
883/* Set PC and Thumb state from an immediate address. */
884static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 885{
39d5492a 886 TCGv_i32 tmp;
99c475ab 887
577bf808 888 s->is_jmp = DISAS_JUMP;
d9ba4830 889 if (s->thumb != (addr & 1)) {
7d1b0095 890 tmp = tcg_temp_new_i32();
d9ba4830 891 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 892 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 893 tcg_temp_free_i32(tmp);
d9ba4830 894 }
155c3eac 895 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
896}
897
898/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 899static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 900{
577bf808 901 s->is_jmp = DISAS_JUMP;
155c3eac
FN
902 tcg_gen_andi_i32(cpu_R[15], var, ~1);
903 tcg_gen_andi_i32(var, var, 1);
904 store_cpu_field(var, thumb);
d9ba4830
PB
905}
906
21aeb343
JR
907/* Variant of store_reg which uses branch&exchange logic when storing
908 to r15 in ARM architecture v7 and above. The source must be a temporary
909 and will be marked as dead. */
7dcc1f89 910static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
911{
912 if (reg == 15 && ENABLE_ARCH_7) {
913 gen_bx(s, var);
914 } else {
915 store_reg(s, reg, var);
916 }
917}
918
be5e7a76
DES
919/* Variant of store_reg which uses branch&exchange logic when storing
920 * to r15 in ARM architecture v5T and above. This is used for storing
921 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
922 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 923static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
924{
925 if (reg == 15 && ENABLE_ARCH_5) {
926 gen_bx(s, var);
927 } else {
928 store_reg(s, reg, var);
929 }
930}
931
e334bd31
PB
932#ifdef CONFIG_USER_ONLY
933#define IS_USER_ONLY 1
934#else
935#define IS_USER_ONLY 0
936#endif
937
08307563
PM
938/* Abstractions of "generate code to do a guest load/store for
939 * AArch32", where a vaddr is always 32 bits (and is zero
940 * extended if we're a 64 bit core) and data is also
941 * 32 bits unless specifically doing a 64 bit access.
942 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 943 * that the address argument is TCGv_i32 rather than TCGv.
08307563 944 */
08307563 945
7f5616f5 946static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
08307563 947{
7f5616f5
RH
948 TCGv addr = tcg_temp_new();
949 tcg_gen_extu_i32_tl(addr, a32);
950
e334bd31 951 /* Not needed for user-mode BE32, where we use MO_BE instead. */
7f5616f5
RH
952 if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
953 tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
e334bd31 954 }
7f5616f5 955 return addr;
08307563
PM
956}
957
7f5616f5
RH
958static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
959 int index, TCGMemOp opc)
08307563 960{
7f5616f5
RH
961 TCGv addr = gen_aa32_addr(s, a32, opc);
962 tcg_gen_qemu_ld_i32(val, addr, index, opc);
963 tcg_temp_free(addr);
08307563
PM
964}
965
7f5616f5
RH
966static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
967 int index, TCGMemOp opc)
968{
969 TCGv addr = gen_aa32_addr(s, a32, opc);
970 tcg_gen_qemu_st_i32(val, addr, index, opc);
971 tcg_temp_free(addr);
972}
08307563 973
7f5616f5 974#define DO_GEN_LD(SUFF, OPC) \
12dcc321 975static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 976 TCGv_i32 a32, int index) \
08307563 977{ \
7f5616f5 978 gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
979} \
980static inline void gen_aa32_ld##SUFF##_iss(DisasContext *s, \
981 TCGv_i32 val, \
982 TCGv_i32 a32, int index, \
983 ISSInfo issinfo) \
984{ \
985 gen_aa32_ld##SUFF(s, val, a32, index); \
986 disas_set_da_iss(s, OPC, issinfo); \
08307563
PM
987}
988
7f5616f5 989#define DO_GEN_ST(SUFF, OPC) \
12dcc321 990static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
7f5616f5 991 TCGv_i32 a32, int index) \
08307563 992{ \
7f5616f5 993 gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
9bb6558a
PM
994} \
995static inline void gen_aa32_st##SUFF##_iss(DisasContext *s, \
996 TCGv_i32 val, \
997 TCGv_i32 a32, int index, \
998 ISSInfo issinfo) \
999{ \
1000 gen_aa32_st##SUFF(s, val, a32, index); \
1001 disas_set_da_iss(s, OPC, issinfo | ISSIsWrite); \
08307563
PM
1002}
1003
7f5616f5 1004static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
08307563 1005{
e334bd31
PB
1006 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1007 if (!IS_USER_ONLY && s->sctlr_b) {
1008 tcg_gen_rotri_i64(val, val, 32);
1009 }
08307563
PM
1010}
1011
7f5616f5
RH
1012static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1013 int index, TCGMemOp opc)
08307563 1014{
7f5616f5
RH
1015 TCGv addr = gen_aa32_addr(s, a32, opc);
1016 tcg_gen_qemu_ld_i64(val, addr, index, opc);
1017 gen_aa32_frob64(s, val);
1018 tcg_temp_free(addr);
1019}
1020
1021static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
1022 TCGv_i32 a32, int index)
1023{
1024 gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
1025}
1026
1027static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
1028 int index, TCGMemOp opc)
1029{
1030 TCGv addr = gen_aa32_addr(s, a32, opc);
e334bd31
PB
1031
1032 /* Not needed for user-mode BE32, where we use MO_BE instead. */
1033 if (!IS_USER_ONLY && s->sctlr_b) {
7f5616f5 1034 TCGv_i64 tmp = tcg_temp_new_i64();
e334bd31 1035 tcg_gen_rotri_i64(tmp, val, 32);
7f5616f5
RH
1036 tcg_gen_qemu_st_i64(tmp, addr, index, opc);
1037 tcg_temp_free_i64(tmp);
e334bd31 1038 } else {
7f5616f5 1039 tcg_gen_qemu_st_i64(val, addr, index, opc);
e334bd31 1040 }
7f5616f5 1041 tcg_temp_free(addr);
08307563
PM
1042}
1043
7f5616f5
RH
1044static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
1045 TCGv_i32 a32, int index)
1046{
1047 gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
1048}
08307563 1049
7f5616f5
RH
1050DO_GEN_LD(8s, MO_SB)
1051DO_GEN_LD(8u, MO_UB)
1052DO_GEN_LD(16s, MO_SW)
1053DO_GEN_LD(16u, MO_UW)
1054DO_GEN_LD(32u, MO_UL)
7f5616f5
RH
1055DO_GEN_ST(8, MO_UB)
1056DO_GEN_ST(16, MO_UW)
1057DO_GEN_ST(32, MO_UL)
08307563 1058
eaed129d 1059static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 1060{
40f860cd 1061 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
1062}
1063
37e6456e
PM
1064static inline void gen_hvc(DisasContext *s, int imm16)
1065{
1066 /* The pre HVC helper handles cases when HVC gets trapped
1067 * as an undefined insn by runtime configuration (ie before
1068 * the insn really executes).
1069 */
1070 gen_set_pc_im(s, s->pc - 4);
1071 gen_helper_pre_hvc(cpu_env);
1072 /* Otherwise we will treat this as a real exception which
1073 * happens after execution of the insn. (The distinction matters
1074 * for the PC value reported to the exception handler and also
1075 * for single stepping.)
1076 */
1077 s->svc_imm = imm16;
1078 gen_set_pc_im(s, s->pc);
1079 s->is_jmp = DISAS_HVC;
1080}
1081
1082static inline void gen_smc(DisasContext *s)
1083{
1084 /* As with HVC, we may take an exception either before or after
1085 * the insn executes.
1086 */
1087 TCGv_i32 tmp;
1088
1089 gen_set_pc_im(s, s->pc - 4);
1090 tmp = tcg_const_i32(syn_aa32_smc());
1091 gen_helper_pre_smc(cpu_env, tmp);
1092 tcg_temp_free_i32(tmp);
1093 gen_set_pc_im(s, s->pc);
1094 s->is_jmp = DISAS_SMC;
1095}
1096
d4a2dc67
PM
1097static inline void
1098gen_set_condexec (DisasContext *s)
1099{
1100 if (s->condexec_mask) {
1101 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1102 TCGv_i32 tmp = tcg_temp_new_i32();
1103 tcg_gen_movi_i32(tmp, val);
1104 store_cpu_field(tmp, condexec_bits);
1105 }
1106}
1107
1108static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1109{
1110 gen_set_condexec(s);
1111 gen_set_pc_im(s, s->pc - offset);
1112 gen_exception_internal(excp);
1113 s->is_jmp = DISAS_JUMP;
1114}
1115
73710361
GB
1116static void gen_exception_insn(DisasContext *s, int offset, int excp,
1117 int syn, uint32_t target_el)
d4a2dc67
PM
1118{
1119 gen_set_condexec(s);
1120 gen_set_pc_im(s, s->pc - offset);
73710361 1121 gen_exception(excp, syn, target_el);
d4a2dc67
PM
1122 s->is_jmp = DISAS_JUMP;
1123}
1124
b5ff1b31
FB
1125/* Force a TB lookup after an instruction that changes the CPU state. */
1126static inline void gen_lookup_tb(DisasContext *s)
1127{
a6445c52 1128 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
577bf808 1129 s->is_jmp = DISAS_JUMP;
b5ff1b31
FB
1130}
1131
19a6e31c
PM
1132static inline void gen_hlt(DisasContext *s, int imm)
1133{
1134 /* HLT. This has two purposes.
1135 * Architecturally, it is an external halting debug instruction.
1136 * Since QEMU doesn't implement external debug, we treat this as
1137 * it is required for halting debug disabled: it will UNDEF.
1138 * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
1139 * and "HLT 0xF000" is an A32 semihosting syscall. These traps
1140 * must trigger semihosting even for ARMv7 and earlier, where
1141 * HLT was an undefined encoding.
1142 * In system mode, we don't allow userspace access to
1143 * semihosting, to provide some semblance of security
1144 * (and for consistency with our 32-bit semihosting).
1145 */
1146 if (semihosting_enabled() &&
1147#ifndef CONFIG_USER_ONLY
1148 s->current_el != 0 &&
1149#endif
1150 (imm == (s->thumb ? 0x3c : 0xf000))) {
1151 gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
1152 return;
1153 }
1154
1155 gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
1156 default_exception_el(s));
1157}
1158
b0109805 1159static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1160 TCGv_i32 var)
2c0262af 1161{
1e8d4eec 1162 int val, rm, shift, shiftop;
39d5492a 1163 TCGv_i32 offset;
2c0262af
FB
1164
1165 if (!(insn & (1 << 25))) {
1166 /* immediate */
1167 val = insn & 0xfff;
1168 if (!(insn & (1 << 23)))
1169 val = -val;
537730b9 1170 if (val != 0)
b0109805 1171 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1172 } else {
1173 /* shift/register */
1174 rm = (insn) & 0xf;
1175 shift = (insn >> 7) & 0x1f;
1e8d4eec 1176 shiftop = (insn >> 5) & 3;
b26eefb6 1177 offset = load_reg(s, rm);
9a119ff6 1178 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1179 if (!(insn & (1 << 23)))
b0109805 1180 tcg_gen_sub_i32(var, var, offset);
2c0262af 1181 else
b0109805 1182 tcg_gen_add_i32(var, var, offset);
7d1b0095 1183 tcg_temp_free_i32(offset);
2c0262af
FB
1184 }
1185}
1186
191f9a93 1187static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1188 int extra, TCGv_i32 var)
2c0262af
FB
1189{
1190 int val, rm;
39d5492a 1191 TCGv_i32 offset;
3b46e624 1192
2c0262af
FB
1193 if (insn & (1 << 22)) {
1194 /* immediate */
1195 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1196 if (!(insn & (1 << 23)))
1197 val = -val;
18acad92 1198 val += extra;
537730b9 1199 if (val != 0)
b0109805 1200 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1201 } else {
1202 /* register */
191f9a93 1203 if (extra)
b0109805 1204 tcg_gen_addi_i32(var, var, extra);
2c0262af 1205 rm = (insn) & 0xf;
b26eefb6 1206 offset = load_reg(s, rm);
2c0262af 1207 if (!(insn & (1 << 23)))
b0109805 1208 tcg_gen_sub_i32(var, var, offset);
2c0262af 1209 else
b0109805 1210 tcg_gen_add_i32(var, var, offset);
7d1b0095 1211 tcg_temp_free_i32(offset);
2c0262af
FB
1212 }
1213}
1214
5aaebd13
PM
1215static TCGv_ptr get_fpstatus_ptr(int neon)
1216{
1217 TCGv_ptr statusptr = tcg_temp_new_ptr();
1218 int offset;
1219 if (neon) {
0ecb72a5 1220 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1221 } else {
0ecb72a5 1222 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1223 }
1224 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1225 return statusptr;
1226}
1227
4373f3ce
PB
1228#define VFP_OP2(name) \
1229static inline void gen_vfp_##name(int dp) \
1230{ \
ae1857ec
PM
1231 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1232 if (dp) { \
1233 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1234 } else { \
1235 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1236 } \
1237 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1238}
1239
4373f3ce
PB
1240VFP_OP2(add)
1241VFP_OP2(sub)
1242VFP_OP2(mul)
1243VFP_OP2(div)
1244
1245#undef VFP_OP2
1246
605a6aed
PM
1247static inline void gen_vfp_F1_mul(int dp)
1248{
1249 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1250 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1251 if (dp) {
ae1857ec 1252 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1253 } else {
ae1857ec 1254 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1255 }
ae1857ec 1256 tcg_temp_free_ptr(fpst);
605a6aed
PM
1257}
1258
1259static inline void gen_vfp_F1_neg(int dp)
1260{
1261 /* Like gen_vfp_neg() but put result in F1 */
1262 if (dp) {
1263 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1264 } else {
1265 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1266 }
1267}
1268
4373f3ce
PB
1269static inline void gen_vfp_abs(int dp)
1270{
1271 if (dp)
1272 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1273 else
1274 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1275}
1276
1277static inline void gen_vfp_neg(int dp)
1278{
1279 if (dp)
1280 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1281 else
1282 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1283}
1284
1285static inline void gen_vfp_sqrt(int dp)
1286{
1287 if (dp)
1288 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1289 else
1290 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1291}
1292
1293static inline void gen_vfp_cmp(int dp)
1294{
1295 if (dp)
1296 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1297 else
1298 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1299}
1300
1301static inline void gen_vfp_cmpe(int dp)
1302{
1303 if (dp)
1304 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1305 else
1306 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1307}
1308
1309static inline void gen_vfp_F1_ld0(int dp)
1310{
1311 if (dp)
5b340b51 1312 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1313 else
5b340b51 1314 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1315}
1316
5500b06c
PM
1317#define VFP_GEN_ITOF(name) \
1318static inline void gen_vfp_##name(int dp, int neon) \
1319{ \
5aaebd13 1320 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1321 if (dp) { \
1322 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1323 } else { \
1324 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1325 } \
b7fa9214 1326 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1327}
1328
5500b06c
PM
1329VFP_GEN_ITOF(uito)
1330VFP_GEN_ITOF(sito)
1331#undef VFP_GEN_ITOF
4373f3ce 1332
5500b06c
PM
1333#define VFP_GEN_FTOI(name) \
1334static inline void gen_vfp_##name(int dp, int neon) \
1335{ \
5aaebd13 1336 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1337 if (dp) { \
1338 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1339 } else { \
1340 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1341 } \
b7fa9214 1342 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1343}
1344
5500b06c
PM
1345VFP_GEN_FTOI(toui)
1346VFP_GEN_FTOI(touiz)
1347VFP_GEN_FTOI(tosi)
1348VFP_GEN_FTOI(tosiz)
1349#undef VFP_GEN_FTOI
4373f3ce 1350
16d5b3ca 1351#define VFP_GEN_FIX(name, round) \
5500b06c 1352static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1353{ \
39d5492a 1354 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1355 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1356 if (dp) { \
16d5b3ca
WN
1357 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1358 statusptr); \
5500b06c 1359 } else { \
16d5b3ca
WN
1360 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1361 statusptr); \
5500b06c 1362 } \
b75263d6 1363 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1364 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1365}
16d5b3ca
WN
1366VFP_GEN_FIX(tosh, _round_to_zero)
1367VFP_GEN_FIX(tosl, _round_to_zero)
1368VFP_GEN_FIX(touh, _round_to_zero)
1369VFP_GEN_FIX(toul, _round_to_zero)
1370VFP_GEN_FIX(shto, )
1371VFP_GEN_FIX(slto, )
1372VFP_GEN_FIX(uhto, )
1373VFP_GEN_FIX(ulto, )
4373f3ce 1374#undef VFP_GEN_FIX
9ee6e8bb 1375
39d5492a 1376static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1377{
08307563 1378 if (dp) {
12dcc321 1379 gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1380 } else {
12dcc321 1381 gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
08307563 1382 }
b5ff1b31
FB
1383}
1384
39d5492a 1385static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1386{
08307563 1387 if (dp) {
12dcc321 1388 gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
08307563 1389 } else {
12dcc321 1390 gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
08307563 1391 }
b5ff1b31
FB
1392}
1393
8e96005d
FB
1394static inline long
1395vfp_reg_offset (int dp, int reg)
1396{
1397 if (dp)
1398 return offsetof(CPUARMState, vfp.regs[reg]);
1399 else if (reg & 1) {
1400 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1401 + offsetof(CPU_DoubleU, l.upper);
1402 } else {
1403 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1404 + offsetof(CPU_DoubleU, l.lower);
1405 }
1406}
9ee6e8bb
PB
1407
1408/* Return the offset of a 32-bit piece of a NEON register.
1409 zero is the least significant end of the register. */
1410static inline long
1411neon_reg_offset (int reg, int n)
1412{
1413 int sreg;
1414 sreg = reg * 2 + n;
1415 return vfp_reg_offset(0, sreg);
1416}
1417
39d5492a 1418static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1419{
39d5492a 1420 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1421 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1422 return tmp;
1423}
1424
39d5492a 1425static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1426{
1427 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1428 tcg_temp_free_i32(var);
8f8e3aa4
PB
1429}
1430
a7812ae4 1431static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1432{
1433 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1434}
1435
a7812ae4 1436static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1437{
1438 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1439}
1440
4373f3ce
PB
1441#define tcg_gen_ld_f32 tcg_gen_ld_i32
1442#define tcg_gen_ld_f64 tcg_gen_ld_i64
1443#define tcg_gen_st_f32 tcg_gen_st_i32
1444#define tcg_gen_st_f64 tcg_gen_st_i64
1445
b7bcbe95
FB
1446static inline void gen_mov_F0_vreg(int dp, int reg)
1447{
1448 if (dp)
4373f3ce 1449 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1450 else
4373f3ce 1451 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1452}
1453
1454static inline void gen_mov_F1_vreg(int dp, int reg)
1455{
1456 if (dp)
4373f3ce 1457 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1458 else
4373f3ce 1459 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1460}
1461
1462static inline void gen_mov_vreg_F0(int dp, int reg)
1463{
1464 if (dp)
4373f3ce 1465 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1466 else
4373f3ce 1467 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1468}
1469
18c9b560
AZ
1470#define ARM_CP_RW_BIT (1 << 20)
1471
a7812ae4 1472static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1473{
0ecb72a5 1474 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1475}
1476
a7812ae4 1477static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1478{
0ecb72a5 1479 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1480}
1481
39d5492a 1482static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1483{
39d5492a 1484 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1485 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1486 return var;
e677137d
PB
1487}
1488
39d5492a 1489static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1490{
0ecb72a5 1491 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1492 tcg_temp_free_i32(var);
e677137d
PB
1493}
1494
1495static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1496{
1497 iwmmxt_store_reg(cpu_M0, rn);
1498}
1499
1500static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1501{
1502 iwmmxt_load_reg(cpu_M0, rn);
1503}
1504
1505static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1506{
1507 iwmmxt_load_reg(cpu_V1, rn);
1508 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1509}
1510
1511static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1512{
1513 iwmmxt_load_reg(cpu_V1, rn);
1514 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1515}
1516
1517static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1518{
1519 iwmmxt_load_reg(cpu_V1, rn);
1520 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1521}
1522
1523#define IWMMXT_OP(name) \
1524static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1525{ \
1526 iwmmxt_load_reg(cpu_V1, rn); \
1527 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1528}
1529
477955bd
PM
1530#define IWMMXT_OP_ENV(name) \
1531static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1532{ \
1533 iwmmxt_load_reg(cpu_V1, rn); \
1534 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1535}
1536
1537#define IWMMXT_OP_ENV_SIZE(name) \
1538IWMMXT_OP_ENV(name##b) \
1539IWMMXT_OP_ENV(name##w) \
1540IWMMXT_OP_ENV(name##l)
e677137d 1541
477955bd 1542#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1543static inline void gen_op_iwmmxt_##name##_M0(void) \
1544{ \
477955bd 1545 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1546}
1547
1548IWMMXT_OP(maddsq)
1549IWMMXT_OP(madduq)
1550IWMMXT_OP(sadb)
1551IWMMXT_OP(sadw)
1552IWMMXT_OP(mulslw)
1553IWMMXT_OP(mulshw)
1554IWMMXT_OP(mululw)
1555IWMMXT_OP(muluhw)
1556IWMMXT_OP(macsw)
1557IWMMXT_OP(macuw)
1558
477955bd
PM
1559IWMMXT_OP_ENV_SIZE(unpackl)
1560IWMMXT_OP_ENV_SIZE(unpackh)
1561
1562IWMMXT_OP_ENV1(unpacklub)
1563IWMMXT_OP_ENV1(unpackluw)
1564IWMMXT_OP_ENV1(unpacklul)
1565IWMMXT_OP_ENV1(unpackhub)
1566IWMMXT_OP_ENV1(unpackhuw)
1567IWMMXT_OP_ENV1(unpackhul)
1568IWMMXT_OP_ENV1(unpacklsb)
1569IWMMXT_OP_ENV1(unpacklsw)
1570IWMMXT_OP_ENV1(unpacklsl)
1571IWMMXT_OP_ENV1(unpackhsb)
1572IWMMXT_OP_ENV1(unpackhsw)
1573IWMMXT_OP_ENV1(unpackhsl)
1574
1575IWMMXT_OP_ENV_SIZE(cmpeq)
1576IWMMXT_OP_ENV_SIZE(cmpgtu)
1577IWMMXT_OP_ENV_SIZE(cmpgts)
1578
1579IWMMXT_OP_ENV_SIZE(mins)
1580IWMMXT_OP_ENV_SIZE(minu)
1581IWMMXT_OP_ENV_SIZE(maxs)
1582IWMMXT_OP_ENV_SIZE(maxu)
1583
1584IWMMXT_OP_ENV_SIZE(subn)
1585IWMMXT_OP_ENV_SIZE(addn)
1586IWMMXT_OP_ENV_SIZE(subu)
1587IWMMXT_OP_ENV_SIZE(addu)
1588IWMMXT_OP_ENV_SIZE(subs)
1589IWMMXT_OP_ENV_SIZE(adds)
1590
1591IWMMXT_OP_ENV(avgb0)
1592IWMMXT_OP_ENV(avgb1)
1593IWMMXT_OP_ENV(avgw0)
1594IWMMXT_OP_ENV(avgw1)
e677137d 1595
477955bd
PM
1596IWMMXT_OP_ENV(packuw)
1597IWMMXT_OP_ENV(packul)
1598IWMMXT_OP_ENV(packuq)
1599IWMMXT_OP_ENV(packsw)
1600IWMMXT_OP_ENV(packsl)
1601IWMMXT_OP_ENV(packsq)
e677137d 1602
e677137d
PB
1603static void gen_op_iwmmxt_set_mup(void)
1604{
39d5492a 1605 TCGv_i32 tmp;
e677137d
PB
1606 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1607 tcg_gen_ori_i32(tmp, tmp, 2);
1608 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1609}
1610
1611static void gen_op_iwmmxt_set_cup(void)
1612{
39d5492a 1613 TCGv_i32 tmp;
e677137d
PB
1614 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1615 tcg_gen_ori_i32(tmp, tmp, 1);
1616 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1617}
1618
1619static void gen_op_iwmmxt_setpsr_nz(void)
1620{
39d5492a 1621 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1622 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1623 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1624}
1625
1626static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1627{
1628 iwmmxt_load_reg(cpu_V1, rn);
86831435 1629 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1630 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1631}
1632
39d5492a
PM
1633static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1634 TCGv_i32 dest)
18c9b560
AZ
1635{
1636 int rd;
1637 uint32_t offset;
39d5492a 1638 TCGv_i32 tmp;
18c9b560
AZ
1639
1640 rd = (insn >> 16) & 0xf;
da6b5335 1641 tmp = load_reg(s, rd);
18c9b560
AZ
1642
1643 offset = (insn & 0xff) << ((insn >> 7) & 2);
1644 if (insn & (1 << 24)) {
1645 /* Pre indexed */
1646 if (insn & (1 << 23))
da6b5335 1647 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1648 else
da6b5335
FN
1649 tcg_gen_addi_i32(tmp, tmp, -offset);
1650 tcg_gen_mov_i32(dest, tmp);
18c9b560 1651 if (insn & (1 << 21))
da6b5335
FN
1652 store_reg(s, rd, tmp);
1653 else
7d1b0095 1654 tcg_temp_free_i32(tmp);
18c9b560
AZ
1655 } else if (insn & (1 << 21)) {
1656 /* Post indexed */
da6b5335 1657 tcg_gen_mov_i32(dest, tmp);
18c9b560 1658 if (insn & (1 << 23))
da6b5335 1659 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1660 else
da6b5335
FN
1661 tcg_gen_addi_i32(tmp, tmp, -offset);
1662 store_reg(s, rd, tmp);
18c9b560
AZ
1663 } else if (!(insn & (1 << 23)))
1664 return 1;
1665 return 0;
1666}
1667
39d5492a 1668static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1669{
1670 int rd = (insn >> 0) & 0xf;
39d5492a 1671 TCGv_i32 tmp;
18c9b560 1672
da6b5335
FN
1673 if (insn & (1 << 8)) {
1674 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1675 return 1;
da6b5335
FN
1676 } else {
1677 tmp = iwmmxt_load_creg(rd);
1678 }
1679 } else {
7d1b0095 1680 tmp = tcg_temp_new_i32();
da6b5335 1681 iwmmxt_load_reg(cpu_V0, rd);
ecc7b3aa 1682 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
da6b5335
FN
1683 }
1684 tcg_gen_andi_i32(tmp, tmp, mask);
1685 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1686 tcg_temp_free_i32(tmp);
18c9b560
AZ
1687 return 0;
1688}
1689
a1c7273b 1690/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1691 (ie. an undefined instruction). */
7dcc1f89 1692static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1693{
1694 int rd, wrd;
1695 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1696 TCGv_i32 addr;
1697 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1698
1699 if ((insn & 0x0e000e00) == 0x0c000000) {
1700 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1701 wrd = insn & 0xf;
1702 rdlo = (insn >> 12) & 0xf;
1703 rdhi = (insn >> 16) & 0xf;
1704 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335 1705 iwmmxt_load_reg(cpu_V0, wrd);
ecc7b3aa 1706 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
da6b5335 1707 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 1708 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1709 } else { /* TMCRR */
da6b5335
FN
1710 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1711 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1712 gen_op_iwmmxt_set_mup();
1713 }
1714 return 0;
1715 }
1716
1717 wrd = (insn >> 12) & 0xf;
7d1b0095 1718 addr = tcg_temp_new_i32();
da6b5335 1719 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1720 tcg_temp_free_i32(addr);
18c9b560 1721 return 1;
da6b5335 1722 }
18c9b560
AZ
1723 if (insn & ARM_CP_RW_BIT) {
1724 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1725 tmp = tcg_temp_new_i32();
12dcc321 1726 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
da6b5335 1727 iwmmxt_store_creg(wrd, tmp);
18c9b560 1728 } else {
e677137d
PB
1729 i = 1;
1730 if (insn & (1 << 8)) {
1731 if (insn & (1 << 22)) { /* WLDRD */
12dcc321 1732 gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
e677137d
PB
1733 i = 0;
1734 } else { /* WLDRW wRd */
29531141 1735 tmp = tcg_temp_new_i32();
12dcc321 1736 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1737 }
1738 } else {
29531141 1739 tmp = tcg_temp_new_i32();
e677137d 1740 if (insn & (1 << 22)) { /* WLDRH */
12dcc321 1741 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
e677137d 1742 } else { /* WLDRB */
12dcc321 1743 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
e677137d
PB
1744 }
1745 }
1746 if (i) {
1747 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1748 tcg_temp_free_i32(tmp);
e677137d 1749 }
18c9b560
AZ
1750 gen_op_iwmmxt_movq_wRn_M0(wrd);
1751 }
1752 } else {
1753 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1754 tmp = iwmmxt_load_creg(wrd);
12dcc321 1755 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
18c9b560
AZ
1756 } else {
1757 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1758 tmp = tcg_temp_new_i32();
e677137d
PB
1759 if (insn & (1 << 8)) {
1760 if (insn & (1 << 22)) { /* WSTRD */
12dcc321 1761 gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
e677137d 1762 } else { /* WSTRW wRd */
ecc7b3aa 1763 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1764 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e677137d
PB
1765 }
1766 } else {
1767 if (insn & (1 << 22)) { /* WSTRH */
ecc7b3aa 1768 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1769 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
e677137d 1770 } else { /* WSTRB */
ecc7b3aa 1771 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
12dcc321 1772 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
e677137d
PB
1773 }
1774 }
18c9b560 1775 }
29531141 1776 tcg_temp_free_i32(tmp);
18c9b560 1777 }
7d1b0095 1778 tcg_temp_free_i32(addr);
18c9b560
AZ
1779 return 0;
1780 }
1781
1782 if ((insn & 0x0f000000) != 0x0e000000)
1783 return 1;
1784
1785 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1786 case 0x000: /* WOR */
1787 wrd = (insn >> 12) & 0xf;
1788 rd0 = (insn >> 0) & 0xf;
1789 rd1 = (insn >> 16) & 0xf;
1790 gen_op_iwmmxt_movq_M0_wRn(rd0);
1791 gen_op_iwmmxt_orq_M0_wRn(rd1);
1792 gen_op_iwmmxt_setpsr_nz();
1793 gen_op_iwmmxt_movq_wRn_M0(wrd);
1794 gen_op_iwmmxt_set_mup();
1795 gen_op_iwmmxt_set_cup();
1796 break;
1797 case 0x011: /* TMCR */
1798 if (insn & 0xf)
1799 return 1;
1800 rd = (insn >> 12) & 0xf;
1801 wrd = (insn >> 16) & 0xf;
1802 switch (wrd) {
1803 case ARM_IWMMXT_wCID:
1804 case ARM_IWMMXT_wCASF:
1805 break;
1806 case ARM_IWMMXT_wCon:
1807 gen_op_iwmmxt_set_cup();
1808 /* Fall through. */
1809 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1810 tmp = iwmmxt_load_creg(wrd);
1811 tmp2 = load_reg(s, rd);
f669df27 1812 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1813 tcg_temp_free_i32(tmp2);
da6b5335 1814 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1815 break;
1816 case ARM_IWMMXT_wCGR0:
1817 case ARM_IWMMXT_wCGR1:
1818 case ARM_IWMMXT_wCGR2:
1819 case ARM_IWMMXT_wCGR3:
1820 gen_op_iwmmxt_set_cup();
da6b5335
FN
1821 tmp = load_reg(s, rd);
1822 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1823 break;
1824 default:
1825 return 1;
1826 }
1827 break;
1828 case 0x100: /* WXOR */
1829 wrd = (insn >> 12) & 0xf;
1830 rd0 = (insn >> 0) & 0xf;
1831 rd1 = (insn >> 16) & 0xf;
1832 gen_op_iwmmxt_movq_M0_wRn(rd0);
1833 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1834 gen_op_iwmmxt_setpsr_nz();
1835 gen_op_iwmmxt_movq_wRn_M0(wrd);
1836 gen_op_iwmmxt_set_mup();
1837 gen_op_iwmmxt_set_cup();
1838 break;
1839 case 0x111: /* TMRC */
1840 if (insn & 0xf)
1841 return 1;
1842 rd = (insn >> 12) & 0xf;
1843 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1844 tmp = iwmmxt_load_creg(wrd);
1845 store_reg(s, rd, tmp);
18c9b560
AZ
1846 break;
1847 case 0x300: /* WANDN */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 0) & 0xf;
1850 rd1 = (insn >> 16) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1852 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1853 gen_op_iwmmxt_andq_M0_wRn(rd1);
1854 gen_op_iwmmxt_setpsr_nz();
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 gen_op_iwmmxt_set_cup();
1858 break;
1859 case 0x200: /* WAND */
1860 wrd = (insn >> 12) & 0xf;
1861 rd0 = (insn >> 0) & 0xf;
1862 rd1 = (insn >> 16) & 0xf;
1863 gen_op_iwmmxt_movq_M0_wRn(rd0);
1864 gen_op_iwmmxt_andq_M0_wRn(rd1);
1865 gen_op_iwmmxt_setpsr_nz();
1866 gen_op_iwmmxt_movq_wRn_M0(wrd);
1867 gen_op_iwmmxt_set_mup();
1868 gen_op_iwmmxt_set_cup();
1869 break;
1870 case 0x810: case 0xa10: /* WMADD */
1871 wrd = (insn >> 12) & 0xf;
1872 rd0 = (insn >> 0) & 0xf;
1873 rd1 = (insn >> 16) & 0xf;
1874 gen_op_iwmmxt_movq_M0_wRn(rd0);
1875 if (insn & (1 << 21))
1876 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 gen_op_iwmmxt_set_mup();
1881 break;
1882 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1883 wrd = (insn >> 12) & 0xf;
1884 rd0 = (insn >> 16) & 0xf;
1885 rd1 = (insn >> 0) & 0xf;
1886 gen_op_iwmmxt_movq_M0_wRn(rd0);
1887 switch ((insn >> 22) & 3) {
1888 case 0:
1889 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1890 break;
1891 case 1:
1892 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1893 break;
1894 case 2:
1895 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1905 wrd = (insn >> 12) & 0xf;
1906 rd0 = (insn >> 16) & 0xf;
1907 rd1 = (insn >> 0) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1912 break;
1913 case 1:
1914 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1915 break;
1916 case 2:
1917 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1918 break;
1919 case 3:
1920 return 1;
1921 }
1922 gen_op_iwmmxt_movq_wRn_M0(wrd);
1923 gen_op_iwmmxt_set_mup();
1924 gen_op_iwmmxt_set_cup();
1925 break;
1926 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 rd1 = (insn >> 0) & 0xf;
1930 gen_op_iwmmxt_movq_M0_wRn(rd0);
1931 if (insn & (1 << 22))
1932 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1933 else
1934 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1935 if (!(insn & (1 << 20)))
1936 gen_op_iwmmxt_addl_M0_wRn(wrd);
1937 gen_op_iwmmxt_movq_wRn_M0(wrd);
1938 gen_op_iwmmxt_set_mup();
1939 break;
1940 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1941 wrd = (insn >> 12) & 0xf;
1942 rd0 = (insn >> 16) & 0xf;
1943 rd1 = (insn >> 0) & 0xf;
1944 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1945 if (insn & (1 << 21)) {
1946 if (insn & (1 << 20))
1947 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1948 else
1949 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1950 } else {
1951 if (insn & (1 << 20))
1952 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1953 else
1954 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1955 }
18c9b560
AZ
1956 gen_op_iwmmxt_movq_wRn_M0(wrd);
1957 gen_op_iwmmxt_set_mup();
1958 break;
1959 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1960 wrd = (insn >> 12) & 0xf;
1961 rd0 = (insn >> 16) & 0xf;
1962 rd1 = (insn >> 0) & 0xf;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
1964 if (insn & (1 << 21))
1965 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1966 else
1967 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1968 if (!(insn & (1 << 20))) {
e677137d
PB
1969 iwmmxt_load_reg(cpu_V1, wrd);
1970 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1971 }
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 break;
1975 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 rd1 = (insn >> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
1980 switch ((insn >> 22) & 3) {
1981 case 0:
1982 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1983 break;
1984 case 1:
1985 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1986 break;
1987 case 2:
1988 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1989 break;
1990 case 3:
1991 return 1;
1992 }
1993 gen_op_iwmmxt_movq_wRn_M0(wrd);
1994 gen_op_iwmmxt_set_mup();
1995 gen_op_iwmmxt_set_cup();
1996 break;
1997 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 rd1 = (insn >> 0) & 0xf;
2001 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
2002 if (insn & (1 << 22)) {
2003 if (insn & (1 << 20))
2004 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
2005 else
2006 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
2007 } else {
2008 if (insn & (1 << 20))
2009 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
2010 else
2011 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
2012 }
18c9b560
AZ
2013 gen_op_iwmmxt_movq_wRn_M0(wrd);
2014 gen_op_iwmmxt_set_mup();
2015 gen_op_iwmmxt_set_cup();
2016 break;
2017 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
2018 wrd = (insn >> 12) & 0xf;
2019 rd0 = (insn >> 16) & 0xf;
2020 rd1 = (insn >> 0) & 0xf;
2021 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2022 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
2023 tcg_gen_andi_i32(tmp, tmp, 7);
2024 iwmmxt_load_reg(cpu_V1, rd1);
2025 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 2026 tcg_temp_free_i32(tmp);
18c9b560
AZ
2027 gen_op_iwmmxt_movq_wRn_M0(wrd);
2028 gen_op_iwmmxt_set_mup();
2029 break;
2030 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
2031 if (((insn >> 6) & 3) == 3)
2032 return 1;
18c9b560
AZ
2033 rd = (insn >> 12) & 0xf;
2034 wrd = (insn >> 16) & 0xf;
da6b5335 2035 tmp = load_reg(s, rd);
18c9b560
AZ
2036 gen_op_iwmmxt_movq_M0_wRn(wrd);
2037 switch ((insn >> 6) & 3) {
2038 case 0:
da6b5335
FN
2039 tmp2 = tcg_const_i32(0xff);
2040 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
2041 break;
2042 case 1:
da6b5335
FN
2043 tmp2 = tcg_const_i32(0xffff);
2044 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
2045 break;
2046 case 2:
da6b5335
FN
2047 tmp2 = tcg_const_i32(0xffffffff);
2048 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 2049 break;
da6b5335 2050 default:
39d5492a
PM
2051 TCGV_UNUSED_I32(tmp2);
2052 TCGV_UNUSED_I32(tmp3);
18c9b560 2053 }
da6b5335 2054 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
2055 tcg_temp_free_i32(tmp3);
2056 tcg_temp_free_i32(tmp2);
7d1b0095 2057 tcg_temp_free_i32(tmp);
18c9b560
AZ
2058 gen_op_iwmmxt_movq_wRn_M0(wrd);
2059 gen_op_iwmmxt_set_mup();
2060 break;
2061 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
2062 rd = (insn >> 12) & 0xf;
2063 wrd = (insn >> 16) & 0xf;
da6b5335 2064 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2065 return 1;
2066 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 2067 tmp = tcg_temp_new_i32();
18c9b560
AZ
2068 switch ((insn >> 22) & 3) {
2069 case 0:
da6b5335 2070 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
ecc7b3aa 2071 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2072 if (insn & 8) {
2073 tcg_gen_ext8s_i32(tmp, tmp);
2074 } else {
2075 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
2076 }
2077 break;
2078 case 1:
da6b5335 2079 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
ecc7b3aa 2080 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
da6b5335
FN
2081 if (insn & 8) {
2082 tcg_gen_ext16s_i32(tmp, tmp);
2083 } else {
2084 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
2085 }
2086 break;
2087 case 2:
da6b5335 2088 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
ecc7b3aa 2089 tcg_gen_extrl_i64_i32(tmp, cpu_M0);
18c9b560 2090 break;
18c9b560 2091 }
da6b5335 2092 store_reg(s, rd, tmp);
18c9b560
AZ
2093 break;
2094 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 2095 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2096 return 1;
da6b5335 2097 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
2098 switch ((insn >> 22) & 3) {
2099 case 0:
da6b5335 2100 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
2101 break;
2102 case 1:
da6b5335 2103 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
2104 break;
2105 case 2:
da6b5335 2106 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 2107 break;
18c9b560 2108 }
da6b5335
FN
2109 tcg_gen_shli_i32(tmp, tmp, 28);
2110 gen_set_nzcv(tmp);
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 break;
2113 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
2114 if (((insn >> 6) & 3) == 3)
2115 return 1;
18c9b560
AZ
2116 rd = (insn >> 12) & 0xf;
2117 wrd = (insn >> 16) & 0xf;
da6b5335 2118 tmp = load_reg(s, rd);
18c9b560
AZ
2119 switch ((insn >> 6) & 3) {
2120 case 0:
da6b5335 2121 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
2122 break;
2123 case 1:
da6b5335 2124 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
2125 break;
2126 case 2:
da6b5335 2127 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2128 break;
18c9b560 2129 }
7d1b0095 2130 tcg_temp_free_i32(tmp);
18c9b560
AZ
2131 gen_op_iwmmxt_movq_wRn_M0(wrd);
2132 gen_op_iwmmxt_set_mup();
2133 break;
2134 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2135 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2136 return 1;
da6b5335 2137 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2138 tmp2 = tcg_temp_new_i32();
da6b5335 2139 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 for (i = 0; i < 7; i ++) {
da6b5335
FN
2143 tcg_gen_shli_i32(tmp2, tmp2, 4);
2144 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2145 }
2146 break;
2147 case 1:
2148 for (i = 0; i < 3; i ++) {
da6b5335
FN
2149 tcg_gen_shli_i32(tmp2, tmp2, 8);
2150 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2151 }
2152 break;
2153 case 2:
da6b5335
FN
2154 tcg_gen_shli_i32(tmp2, tmp2, 16);
2155 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2156 break;
18c9b560 2157 }
da6b5335 2158 gen_set_nzcv(tmp);
7d1b0095
PM
2159 tcg_temp_free_i32(tmp2);
2160 tcg_temp_free_i32(tmp);
18c9b560
AZ
2161 break;
2162 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2163 wrd = (insn >> 12) & 0xf;
2164 rd0 = (insn >> 16) & 0xf;
2165 gen_op_iwmmxt_movq_M0_wRn(rd0);
2166 switch ((insn >> 22) & 3) {
2167 case 0:
e677137d 2168 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2169 break;
2170 case 1:
e677137d 2171 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2172 break;
2173 case 2:
e677137d 2174 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2175 break;
2176 case 3:
2177 return 1;
2178 }
2179 gen_op_iwmmxt_movq_wRn_M0(wrd);
2180 gen_op_iwmmxt_set_mup();
2181 break;
2182 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2183 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2184 return 1;
da6b5335 2185 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2186 tmp2 = tcg_temp_new_i32();
da6b5335 2187 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2188 switch ((insn >> 22) & 3) {
2189 case 0:
2190 for (i = 0; i < 7; i ++) {
da6b5335
FN
2191 tcg_gen_shli_i32(tmp2, tmp2, 4);
2192 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2193 }
2194 break;
2195 case 1:
2196 for (i = 0; i < 3; i ++) {
da6b5335
FN
2197 tcg_gen_shli_i32(tmp2, tmp2, 8);
2198 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2199 }
2200 break;
2201 case 2:
da6b5335
FN
2202 tcg_gen_shli_i32(tmp2, tmp2, 16);
2203 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2204 break;
18c9b560 2205 }
da6b5335 2206 gen_set_nzcv(tmp);
7d1b0095
PM
2207 tcg_temp_free_i32(tmp2);
2208 tcg_temp_free_i32(tmp);
18c9b560
AZ
2209 break;
2210 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2211 rd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
da6b5335 2213 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2214 return 1;
2215 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2216 tmp = tcg_temp_new_i32();
18c9b560
AZ
2217 switch ((insn >> 22) & 3) {
2218 case 0:
da6b5335 2219 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2220 break;
2221 case 1:
da6b5335 2222 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2223 break;
2224 case 2:
da6b5335 2225 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2226 break;
18c9b560 2227 }
da6b5335 2228 store_reg(s, rd, tmp);
18c9b560
AZ
2229 break;
2230 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2231 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2232 wrd = (insn >> 12) & 0xf;
2233 rd0 = (insn >> 16) & 0xf;
2234 rd1 = (insn >> 0) & 0xf;
2235 gen_op_iwmmxt_movq_M0_wRn(rd0);
2236 switch ((insn >> 22) & 3) {
2237 case 0:
2238 if (insn & (1 << 21))
2239 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2240 else
2241 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2242 break;
2243 case 1:
2244 if (insn & (1 << 21))
2245 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2246 else
2247 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2248 break;
2249 case 2:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2252 else
2253 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2254 break;
2255 case 3:
2256 return 1;
2257 }
2258 gen_op_iwmmxt_movq_wRn_M0(wrd);
2259 gen_op_iwmmxt_set_mup();
2260 gen_op_iwmmxt_set_cup();
2261 break;
2262 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2263 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 gen_op_iwmmxt_movq_M0_wRn(rd0);
2267 switch ((insn >> 22) & 3) {
2268 case 0:
2269 if (insn & (1 << 21))
2270 gen_op_iwmmxt_unpacklsb_M0();
2271 else
2272 gen_op_iwmmxt_unpacklub_M0();
2273 break;
2274 case 1:
2275 if (insn & (1 << 21))
2276 gen_op_iwmmxt_unpacklsw_M0();
2277 else
2278 gen_op_iwmmxt_unpackluw_M0();
2279 break;
2280 case 2:
2281 if (insn & (1 << 21))
2282 gen_op_iwmmxt_unpacklsl_M0();
2283 else
2284 gen_op_iwmmxt_unpacklul_M0();
2285 break;
2286 case 3:
2287 return 1;
2288 }
2289 gen_op_iwmmxt_movq_wRn_M0(wrd);
2290 gen_op_iwmmxt_set_mup();
2291 gen_op_iwmmxt_set_cup();
2292 break;
2293 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2294 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2295 wrd = (insn >> 12) & 0xf;
2296 rd0 = (insn >> 16) & 0xf;
2297 gen_op_iwmmxt_movq_M0_wRn(rd0);
2298 switch ((insn >> 22) & 3) {
2299 case 0:
2300 if (insn & (1 << 21))
2301 gen_op_iwmmxt_unpackhsb_M0();
2302 else
2303 gen_op_iwmmxt_unpackhub_M0();
2304 break;
2305 case 1:
2306 if (insn & (1 << 21))
2307 gen_op_iwmmxt_unpackhsw_M0();
2308 else
2309 gen_op_iwmmxt_unpackhuw_M0();
2310 break;
2311 case 2:
2312 if (insn & (1 << 21))
2313 gen_op_iwmmxt_unpackhsl_M0();
2314 else
2315 gen_op_iwmmxt_unpackhul_M0();
2316 break;
2317 case 3:
2318 return 1;
2319 }
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2325 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2326 if (((insn >> 22) & 3) == 0)
2327 return 1;
18c9b560
AZ
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2331 tmp = tcg_temp_new_i32();
da6b5335 2332 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2333 tcg_temp_free_i32(tmp);
18c9b560 2334 return 1;
da6b5335 2335 }
18c9b560 2336 switch ((insn >> 22) & 3) {
18c9b560 2337 case 1:
477955bd 2338 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2339 break;
2340 case 2:
477955bd 2341 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2342 break;
2343 case 3:
477955bd 2344 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2345 break;
2346 }
7d1b0095 2347 tcg_temp_free_i32(tmp);
18c9b560
AZ
2348 gen_op_iwmmxt_movq_wRn_M0(wrd);
2349 gen_op_iwmmxt_set_mup();
2350 gen_op_iwmmxt_set_cup();
2351 break;
2352 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2353 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2354 if (((insn >> 22) & 3) == 0)
2355 return 1;
18c9b560
AZ
2356 wrd = (insn >> 12) & 0xf;
2357 rd0 = (insn >> 16) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2359 tmp = tcg_temp_new_i32();
da6b5335 2360 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2361 tcg_temp_free_i32(tmp);
18c9b560 2362 return 1;
da6b5335 2363 }
18c9b560 2364 switch ((insn >> 22) & 3) {
18c9b560 2365 case 1:
477955bd 2366 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2367 break;
2368 case 2:
477955bd 2369 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2370 break;
2371 case 3:
477955bd 2372 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2373 break;
2374 }
7d1b0095 2375 tcg_temp_free_i32(tmp);
18c9b560
AZ
2376 gen_op_iwmmxt_movq_wRn_M0(wrd);
2377 gen_op_iwmmxt_set_mup();
2378 gen_op_iwmmxt_set_cup();
2379 break;
2380 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2381 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2382 if (((insn >> 22) & 3) == 0)
2383 return 1;
18c9b560
AZ
2384 wrd = (insn >> 12) & 0xf;
2385 rd0 = (insn >> 16) & 0xf;
2386 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2387 tmp = tcg_temp_new_i32();
da6b5335 2388 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2389 tcg_temp_free_i32(tmp);
18c9b560 2390 return 1;
da6b5335 2391 }
18c9b560 2392 switch ((insn >> 22) & 3) {
18c9b560 2393 case 1:
477955bd 2394 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2395 break;
2396 case 2:
477955bd 2397 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2398 break;
2399 case 3:
477955bd 2400 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2401 break;
2402 }
7d1b0095 2403 tcg_temp_free_i32(tmp);
18c9b560
AZ
2404 gen_op_iwmmxt_movq_wRn_M0(wrd);
2405 gen_op_iwmmxt_set_mup();
2406 gen_op_iwmmxt_set_cup();
2407 break;
2408 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2409 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2410 if (((insn >> 22) & 3) == 0)
2411 return 1;
18c9b560
AZ
2412 wrd = (insn >> 12) & 0xf;
2413 rd0 = (insn >> 16) & 0xf;
2414 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2415 tmp = tcg_temp_new_i32();
18c9b560 2416 switch ((insn >> 22) & 3) {
18c9b560 2417 case 1:
da6b5335 2418 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2419 tcg_temp_free_i32(tmp);
18c9b560 2420 return 1;
da6b5335 2421 }
477955bd 2422 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2423 break;
2424 case 2:
da6b5335 2425 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2426 tcg_temp_free_i32(tmp);
18c9b560 2427 return 1;
da6b5335 2428 }
477955bd 2429 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2430 break;
2431 case 3:
da6b5335 2432 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2433 tcg_temp_free_i32(tmp);
18c9b560 2434 return 1;
da6b5335 2435 }
477955bd 2436 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2437 break;
2438 }
7d1b0095 2439 tcg_temp_free_i32(tmp);
18c9b560
AZ
2440 gen_op_iwmmxt_movq_wRn_M0(wrd);
2441 gen_op_iwmmxt_set_mup();
2442 gen_op_iwmmxt_set_cup();
2443 break;
2444 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2445 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2446 wrd = (insn >> 12) & 0xf;
2447 rd0 = (insn >> 16) & 0xf;
2448 rd1 = (insn >> 0) & 0xf;
2449 gen_op_iwmmxt_movq_M0_wRn(rd0);
2450 switch ((insn >> 22) & 3) {
2451 case 0:
2452 if (insn & (1 << 21))
2453 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2454 else
2455 gen_op_iwmmxt_minub_M0_wRn(rd1);
2456 break;
2457 case 1:
2458 if (insn & (1 << 21))
2459 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2460 else
2461 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2462 break;
2463 case 2:
2464 if (insn & (1 << 21))
2465 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2466 else
2467 gen_op_iwmmxt_minul_M0_wRn(rd1);
2468 break;
2469 case 3:
2470 return 1;
2471 }
2472 gen_op_iwmmxt_movq_wRn_M0(wrd);
2473 gen_op_iwmmxt_set_mup();
2474 break;
2475 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2476 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2477 wrd = (insn >> 12) & 0xf;
2478 rd0 = (insn >> 16) & 0xf;
2479 rd1 = (insn >> 0) & 0xf;
2480 gen_op_iwmmxt_movq_M0_wRn(rd0);
2481 switch ((insn >> 22) & 3) {
2482 case 0:
2483 if (insn & (1 << 21))
2484 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2485 else
2486 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2487 break;
2488 case 1:
2489 if (insn & (1 << 21))
2490 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2491 else
2492 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2493 break;
2494 case 2:
2495 if (insn & (1 << 21))
2496 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2497 else
2498 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2499 break;
2500 case 3:
2501 return 1;
2502 }
2503 gen_op_iwmmxt_movq_wRn_M0(wrd);
2504 gen_op_iwmmxt_set_mup();
2505 break;
2506 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2507 case 0x402: case 0x502: case 0x602: case 0x702:
2508 wrd = (insn >> 12) & 0xf;
2509 rd0 = (insn >> 16) & 0xf;
2510 rd1 = (insn >> 0) & 0xf;
2511 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2512 tmp = tcg_const_i32((insn >> 20) & 3);
2513 iwmmxt_load_reg(cpu_V1, rd1);
2514 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2515 tcg_temp_free_i32(tmp);
18c9b560
AZ
2516 gen_op_iwmmxt_movq_wRn_M0(wrd);
2517 gen_op_iwmmxt_set_mup();
2518 break;
2519 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2520 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2521 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2522 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2523 wrd = (insn >> 12) & 0xf;
2524 rd0 = (insn >> 16) & 0xf;
2525 rd1 = (insn >> 0) & 0xf;
2526 gen_op_iwmmxt_movq_M0_wRn(rd0);
2527 switch ((insn >> 20) & 0xf) {
2528 case 0x0:
2529 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2530 break;
2531 case 0x1:
2532 gen_op_iwmmxt_subub_M0_wRn(rd1);
2533 break;
2534 case 0x3:
2535 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2536 break;
2537 case 0x4:
2538 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2539 break;
2540 case 0x5:
2541 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2542 break;
2543 case 0x7:
2544 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2545 break;
2546 case 0x8:
2547 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2548 break;
2549 case 0x9:
2550 gen_op_iwmmxt_subul_M0_wRn(rd1);
2551 break;
2552 case 0xb:
2553 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2554 break;
2555 default:
2556 return 1;
2557 }
2558 gen_op_iwmmxt_movq_wRn_M0(wrd);
2559 gen_op_iwmmxt_set_mup();
2560 gen_op_iwmmxt_set_cup();
2561 break;
2562 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2563 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2564 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2565 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2566 wrd = (insn >> 12) & 0xf;
2567 rd0 = (insn >> 16) & 0xf;
2568 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2569 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2570 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2571 tcg_temp_free_i32(tmp);
18c9b560
AZ
2572 gen_op_iwmmxt_movq_wRn_M0(wrd);
2573 gen_op_iwmmxt_set_mup();
2574 gen_op_iwmmxt_set_cup();
2575 break;
2576 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2577 case 0x418: case 0x518: case 0x618: case 0x718:
2578 case 0x818: case 0x918: case 0xa18: case 0xb18:
2579 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2580 wrd = (insn >> 12) & 0xf;
2581 rd0 = (insn >> 16) & 0xf;
2582 rd1 = (insn >> 0) & 0xf;
2583 gen_op_iwmmxt_movq_M0_wRn(rd0);
2584 switch ((insn >> 20) & 0xf) {
2585 case 0x0:
2586 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2587 break;
2588 case 0x1:
2589 gen_op_iwmmxt_addub_M0_wRn(rd1);
2590 break;
2591 case 0x3:
2592 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2593 break;
2594 case 0x4:
2595 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2596 break;
2597 case 0x5:
2598 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2599 break;
2600 case 0x7:
2601 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2602 break;
2603 case 0x8:
2604 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2605 break;
2606 case 0x9:
2607 gen_op_iwmmxt_addul_M0_wRn(rd1);
2608 break;
2609 case 0xb:
2610 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2611 break;
2612 default:
2613 return 1;
2614 }
2615 gen_op_iwmmxt_movq_wRn_M0(wrd);
2616 gen_op_iwmmxt_set_mup();
2617 gen_op_iwmmxt_set_cup();
2618 break;
2619 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2620 case 0x408: case 0x508: case 0x608: case 0x708:
2621 case 0x808: case 0x908: case 0xa08: case 0xb08:
2622 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2623 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2624 return 1;
18c9b560
AZ
2625 wrd = (insn >> 12) & 0xf;
2626 rd0 = (insn >> 16) & 0xf;
2627 rd1 = (insn >> 0) & 0xf;
2628 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2629 switch ((insn >> 22) & 3) {
18c9b560
AZ
2630 case 1:
2631 if (insn & (1 << 21))
2632 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2633 else
2634 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2635 break;
2636 case 2:
2637 if (insn & (1 << 21))
2638 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2639 else
2640 gen_op_iwmmxt_packul_M0_wRn(rd1);
2641 break;
2642 case 3:
2643 if (insn & (1 << 21))
2644 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2645 else
2646 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2647 break;
2648 }
2649 gen_op_iwmmxt_movq_wRn_M0(wrd);
2650 gen_op_iwmmxt_set_mup();
2651 gen_op_iwmmxt_set_cup();
2652 break;
2653 case 0x201: case 0x203: case 0x205: case 0x207:
2654 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2655 case 0x211: case 0x213: case 0x215: case 0x217:
2656 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2657 wrd = (insn >> 5) & 0xf;
2658 rd0 = (insn >> 12) & 0xf;
2659 rd1 = (insn >> 0) & 0xf;
2660 if (rd0 == 0xf || rd1 == 0xf)
2661 return 1;
2662 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2663 tmp = load_reg(s, rd0);
2664 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2665 switch ((insn >> 16) & 0xf) {
2666 case 0x0: /* TMIA */
da6b5335 2667 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2668 break;
2669 case 0x8: /* TMIAPH */
da6b5335 2670 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2671 break;
2672 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2673 if (insn & (1 << 16))
da6b5335 2674 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2675 if (insn & (1 << 17))
da6b5335
FN
2676 tcg_gen_shri_i32(tmp2, tmp2, 16);
2677 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2678 break;
2679 default:
7d1b0095
PM
2680 tcg_temp_free_i32(tmp2);
2681 tcg_temp_free_i32(tmp);
18c9b560
AZ
2682 return 1;
2683 }
7d1b0095
PM
2684 tcg_temp_free_i32(tmp2);
2685 tcg_temp_free_i32(tmp);
18c9b560
AZ
2686 gen_op_iwmmxt_movq_wRn_M0(wrd);
2687 gen_op_iwmmxt_set_mup();
2688 break;
2689 default:
2690 return 1;
2691 }
2692
2693 return 0;
2694}
2695
a1c7273b 2696/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2697 (ie. an undefined instruction). */
7dcc1f89 2698static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2699{
2700 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2701 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2702
2703 if ((insn & 0x0ff00f10) == 0x0e200010) {
2704 /* Multiply with Internal Accumulate Format */
2705 rd0 = (insn >> 12) & 0xf;
2706 rd1 = insn & 0xf;
2707 acc = (insn >> 5) & 7;
2708
2709 if (acc != 0)
2710 return 1;
2711
3a554c0f
FN
2712 tmp = load_reg(s, rd0);
2713 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2714 switch ((insn >> 16) & 0xf) {
2715 case 0x0: /* MIA */
3a554c0f 2716 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2717 break;
2718 case 0x8: /* MIAPH */
3a554c0f 2719 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2720 break;
2721 case 0xc: /* MIABB */
2722 case 0xd: /* MIABT */
2723 case 0xe: /* MIATB */
2724 case 0xf: /* MIATT */
18c9b560 2725 if (insn & (1 << 16))
3a554c0f 2726 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2727 if (insn & (1 << 17))
3a554c0f
FN
2728 tcg_gen_shri_i32(tmp2, tmp2, 16);
2729 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2730 break;
2731 default:
2732 return 1;
2733 }
7d1b0095
PM
2734 tcg_temp_free_i32(tmp2);
2735 tcg_temp_free_i32(tmp);
18c9b560
AZ
2736
2737 gen_op_iwmmxt_movq_wRn_M0(acc);
2738 return 0;
2739 }
2740
2741 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2742 /* Internal Accumulator Access Format */
2743 rdhi = (insn >> 16) & 0xf;
2744 rdlo = (insn >> 12) & 0xf;
2745 acc = insn & 7;
2746
2747 if (acc != 0)
2748 return 1;
2749
2750 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f 2751 iwmmxt_load_reg(cpu_V0, acc);
ecc7b3aa 2752 tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
3a554c0f 2753 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 2754 tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
3a554c0f 2755 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2756 } else { /* MAR */
3a554c0f
FN
2757 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2758 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2759 }
2760 return 0;
2761 }
2762
2763 return 1;
2764}
2765
9ee6e8bb
PB
2766#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2767#define VFP_SREG(insn, bigbit, smallbit) \
2768 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2769#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2770 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2771 reg = (((insn) >> (bigbit)) & 0x0f) \
2772 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2773 } else { \
2774 if (insn & (1 << (smallbit))) \
2775 return 1; \
2776 reg = ((insn) >> (bigbit)) & 0x0f; \
2777 }} while (0)
2778
2779#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2780#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2781#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2782#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2783#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2784#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2785
4373f3ce 2786/* Move between integer and VFP cores. */
39d5492a 2787static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2788{
39d5492a 2789 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2790 tcg_gen_mov_i32(tmp, cpu_F0s);
2791 return tmp;
2792}
2793
39d5492a 2794static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2795{
2796 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2797 tcg_temp_free_i32(tmp);
4373f3ce
PB
2798}
2799
39d5492a 2800static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2801{
39d5492a 2802 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2803 if (shift)
2804 tcg_gen_shri_i32(var, var, shift);
86831435 2805 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2806 tcg_gen_shli_i32(tmp, var, 8);
2807 tcg_gen_or_i32(var, var, tmp);
2808 tcg_gen_shli_i32(tmp, var, 16);
2809 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2810 tcg_temp_free_i32(tmp);
ad69471c
PB
2811}
2812
39d5492a 2813static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2814{
39d5492a 2815 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2816 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2817 tcg_gen_shli_i32(tmp, var, 16);
2818 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2819 tcg_temp_free_i32(tmp);
ad69471c
PB
2820}
2821
39d5492a 2822static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2823{
39d5492a 2824 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2825 tcg_gen_andi_i32(var, var, 0xffff0000);
2826 tcg_gen_shri_i32(tmp, var, 16);
2827 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2828 tcg_temp_free_i32(tmp);
ad69471c
PB
2829}
2830
39d5492a 2831static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2832{
2833 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2834 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2835 switch (size) {
2836 case 0:
12dcc321 2837 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2838 gen_neon_dup_u8(tmp, 0);
2839 break;
2840 case 1:
12dcc321 2841 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2842 gen_neon_dup_low16(tmp);
2843 break;
2844 case 2:
12dcc321 2845 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
8e18cde3
PM
2846 break;
2847 default: /* Avoid compiler warnings. */
2848 abort();
2849 }
2850 return tmp;
2851}
2852
04731fb5
WN
2853static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2854 uint32_t dp)
2855{
2856 uint32_t cc = extract32(insn, 20, 2);
2857
2858 if (dp) {
2859 TCGv_i64 frn, frm, dest;
2860 TCGv_i64 tmp, zero, zf, nf, vf;
2861
2862 zero = tcg_const_i64(0);
2863
2864 frn = tcg_temp_new_i64();
2865 frm = tcg_temp_new_i64();
2866 dest = tcg_temp_new_i64();
2867
2868 zf = tcg_temp_new_i64();
2869 nf = tcg_temp_new_i64();
2870 vf = tcg_temp_new_i64();
2871
2872 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2873 tcg_gen_ext_i32_i64(nf, cpu_NF);
2874 tcg_gen_ext_i32_i64(vf, cpu_VF);
2875
2876 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2877 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2878 switch (cc) {
2879 case 0: /* eq: Z */
2880 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2881 frn, frm);
2882 break;
2883 case 1: /* vs: V */
2884 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2885 frn, frm);
2886 break;
2887 case 2: /* ge: N == V -> N ^ V == 0 */
2888 tmp = tcg_temp_new_i64();
2889 tcg_gen_xor_i64(tmp, vf, nf);
2890 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2891 frn, frm);
2892 tcg_temp_free_i64(tmp);
2893 break;
2894 case 3: /* gt: !Z && N == V */
2895 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2896 frn, frm);
2897 tmp = tcg_temp_new_i64();
2898 tcg_gen_xor_i64(tmp, vf, nf);
2899 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2900 dest, frm);
2901 tcg_temp_free_i64(tmp);
2902 break;
2903 }
2904 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2905 tcg_temp_free_i64(frn);
2906 tcg_temp_free_i64(frm);
2907 tcg_temp_free_i64(dest);
2908
2909 tcg_temp_free_i64(zf);
2910 tcg_temp_free_i64(nf);
2911 tcg_temp_free_i64(vf);
2912
2913 tcg_temp_free_i64(zero);
2914 } else {
2915 TCGv_i32 frn, frm, dest;
2916 TCGv_i32 tmp, zero;
2917
2918 zero = tcg_const_i32(0);
2919
2920 frn = tcg_temp_new_i32();
2921 frm = tcg_temp_new_i32();
2922 dest = tcg_temp_new_i32();
2923 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2924 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2925 switch (cc) {
2926 case 0: /* eq: Z */
2927 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2928 frn, frm);
2929 break;
2930 case 1: /* vs: V */
2931 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2932 frn, frm);
2933 break;
2934 case 2: /* ge: N == V -> N ^ V == 0 */
2935 tmp = tcg_temp_new_i32();
2936 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2937 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2938 frn, frm);
2939 tcg_temp_free_i32(tmp);
2940 break;
2941 case 3: /* gt: !Z && N == V */
2942 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2943 frn, frm);
2944 tmp = tcg_temp_new_i32();
2945 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2946 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2947 dest, frm);
2948 tcg_temp_free_i32(tmp);
2949 break;
2950 }
2951 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2952 tcg_temp_free_i32(frn);
2953 tcg_temp_free_i32(frm);
2954 tcg_temp_free_i32(dest);
2955
2956 tcg_temp_free_i32(zero);
2957 }
2958
2959 return 0;
2960}
2961
40cfacdd
WN
2962static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2963 uint32_t rm, uint32_t dp)
2964{
2965 uint32_t vmin = extract32(insn, 6, 1);
2966 TCGv_ptr fpst = get_fpstatus_ptr(0);
2967
2968 if (dp) {
2969 TCGv_i64 frn, frm, dest;
2970
2971 frn = tcg_temp_new_i64();
2972 frm = tcg_temp_new_i64();
2973 dest = tcg_temp_new_i64();
2974
2975 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2976 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2977 if (vmin) {
f71a2ae5 2978 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2979 } else {
f71a2ae5 2980 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2981 }
2982 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2983 tcg_temp_free_i64(frn);
2984 tcg_temp_free_i64(frm);
2985 tcg_temp_free_i64(dest);
2986 } else {
2987 TCGv_i32 frn, frm, dest;
2988
2989 frn = tcg_temp_new_i32();
2990 frm = tcg_temp_new_i32();
2991 dest = tcg_temp_new_i32();
2992
2993 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2994 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2995 if (vmin) {
f71a2ae5 2996 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2997 } else {
f71a2ae5 2998 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2999 }
3000 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
3001 tcg_temp_free_i32(frn);
3002 tcg_temp_free_i32(frm);
3003 tcg_temp_free_i32(dest);
3004 }
3005
3006 tcg_temp_free_ptr(fpst);
3007 return 0;
3008}
3009
7655f39b
WN
3010static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3011 int rounding)
3012{
3013 TCGv_ptr fpst = get_fpstatus_ptr(0);
3014 TCGv_i32 tcg_rmode;
3015
3016 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3017 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3018
3019 if (dp) {
3020 TCGv_i64 tcg_op;
3021 TCGv_i64 tcg_res;
3022 tcg_op = tcg_temp_new_i64();
3023 tcg_res = tcg_temp_new_i64();
3024 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3025 gen_helper_rintd(tcg_res, tcg_op, fpst);
3026 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3027 tcg_temp_free_i64(tcg_op);
3028 tcg_temp_free_i64(tcg_res);
3029 } else {
3030 TCGv_i32 tcg_op;
3031 TCGv_i32 tcg_res;
3032 tcg_op = tcg_temp_new_i32();
3033 tcg_res = tcg_temp_new_i32();
3034 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
3035 gen_helper_rints(tcg_res, tcg_op, fpst);
3036 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
3037 tcg_temp_free_i32(tcg_op);
3038 tcg_temp_free_i32(tcg_res);
3039 }
3040
3041 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3042 tcg_temp_free_i32(tcg_rmode);
3043
3044 tcg_temp_free_ptr(fpst);
3045 return 0;
3046}
3047
c9975a83
WN
3048static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
3049 int rounding)
3050{
3051 bool is_signed = extract32(insn, 7, 1);
3052 TCGv_ptr fpst = get_fpstatus_ptr(0);
3053 TCGv_i32 tcg_rmode, tcg_shift;
3054
3055 tcg_shift = tcg_const_i32(0);
3056
3057 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
3058 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3059
3060 if (dp) {
3061 TCGv_i64 tcg_double, tcg_res;
3062 TCGv_i32 tcg_tmp;
3063 /* Rd is encoded as a single precision register even when the source
3064 * is double precision.
3065 */
3066 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
3067 tcg_double = tcg_temp_new_i64();
3068 tcg_res = tcg_temp_new_i64();
3069 tcg_tmp = tcg_temp_new_i32();
3070 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
3071 if (is_signed) {
3072 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
3073 } else {
3074 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
3075 }
ecc7b3aa 3076 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
c9975a83
WN
3077 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
3078 tcg_temp_free_i32(tcg_tmp);
3079 tcg_temp_free_i64(tcg_res);
3080 tcg_temp_free_i64(tcg_double);
3081 } else {
3082 TCGv_i32 tcg_single, tcg_res;
3083 tcg_single = tcg_temp_new_i32();
3084 tcg_res = tcg_temp_new_i32();
3085 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
3086 if (is_signed) {
3087 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
3088 } else {
3089 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
3090 }
3091 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
3092 tcg_temp_free_i32(tcg_res);
3093 tcg_temp_free_i32(tcg_single);
3094 }
3095
3096 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3097 tcg_temp_free_i32(tcg_rmode);
3098
3099 tcg_temp_free_i32(tcg_shift);
3100
3101 tcg_temp_free_ptr(fpst);
3102
3103 return 0;
3104}
7655f39b
WN
3105
3106/* Table for converting the most common AArch32 encoding of
3107 * rounding mode to arm_fprounding order (which matches the
3108 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
3109 */
3110static const uint8_t fp_decode_rm[] = {
3111 FPROUNDING_TIEAWAY,
3112 FPROUNDING_TIEEVEN,
3113 FPROUNDING_POSINF,
3114 FPROUNDING_NEGINF,
3115};
3116
7dcc1f89 3117static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
3118{
3119 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
3120
d614a513 3121 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
3122 return 1;
3123 }
3124
3125 if (dp) {
3126 VFP_DREG_D(rd, insn);
3127 VFP_DREG_N(rn, insn);
3128 VFP_DREG_M(rm, insn);
3129 } else {
3130 rd = VFP_SREG_D(insn);
3131 rn = VFP_SREG_N(insn);
3132 rm = VFP_SREG_M(insn);
3133 }
3134
3135 if ((insn & 0x0f800e50) == 0x0e000a00) {
3136 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3137 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3138 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3139 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3140 /* VRINTA, VRINTN, VRINTP, VRINTM */
3141 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3142 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3143 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3144 /* VCVTA, VCVTN, VCVTP, VCVTM */
3145 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3146 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3147 }
3148 return 1;
3149}
3150
a1c7273b 3151/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3152 (ie. an undefined instruction). */
7dcc1f89 3153static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3154{
3155 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3156 int dp, veclen;
39d5492a
PM
3157 TCGv_i32 addr;
3158 TCGv_i32 tmp;
3159 TCGv_i32 tmp2;
b7bcbe95 3160
d614a513 3161 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3162 return 1;
d614a513 3163 }
40f137e1 3164
2c7ffc41
PM
3165 /* FIXME: this access check should not take precedence over UNDEF
3166 * for invalid encodings; we will generate incorrect syndrome information
3167 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3168 */
9dbbc748 3169 if (s->fp_excp_el) {
2c7ffc41 3170 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 3171 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
3172 return 0;
3173 }
3174
5df8bac1 3175 if (!s->vfp_enabled) {
9ee6e8bb 3176 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3177 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3178 return 1;
3179 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3180 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3181 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3182 return 1;
a50c0f51 3183 }
40f137e1 3184 }
6a57f3eb
WN
3185
3186 if (extract32(insn, 28, 4) == 0xf) {
3187 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3188 * only used in v8 and above.
3189 */
7dcc1f89 3190 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3191 }
3192
b7bcbe95
FB
3193 dp = ((insn & 0xf00) == 0xb00);
3194 switch ((insn >> 24) & 0xf) {
3195 case 0xe:
3196 if (insn & (1 << 4)) {
3197 /* single register transfer */
b7bcbe95
FB
3198 rd = (insn >> 12) & 0xf;
3199 if (dp) {
9ee6e8bb
PB
3200 int size;
3201 int pass;
3202
3203 VFP_DREG_N(rn, insn);
3204 if (insn & 0xf)
b7bcbe95 3205 return 1;
9ee6e8bb 3206 if (insn & 0x00c00060
d614a513 3207 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3208 return 1;
d614a513 3209 }
9ee6e8bb
PB
3210
3211 pass = (insn >> 21) & 1;
3212 if (insn & (1 << 22)) {
3213 size = 0;
3214 offset = ((insn >> 5) & 3) * 8;
3215 } else if (insn & (1 << 5)) {
3216 size = 1;
3217 offset = (insn & (1 << 6)) ? 16 : 0;
3218 } else {
3219 size = 2;
3220 offset = 0;
3221 }
18c9b560 3222 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3223 /* vfp->arm */
ad69471c 3224 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3225 switch (size) {
3226 case 0:
9ee6e8bb 3227 if (offset)
ad69471c 3228 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3229 if (insn & (1 << 23))
ad69471c 3230 gen_uxtb(tmp);
9ee6e8bb 3231 else
ad69471c 3232 gen_sxtb(tmp);
9ee6e8bb
PB
3233 break;
3234 case 1:
9ee6e8bb
PB
3235 if (insn & (1 << 23)) {
3236 if (offset) {
ad69471c 3237 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3238 } else {
ad69471c 3239 gen_uxth(tmp);
9ee6e8bb
PB
3240 }
3241 } else {
3242 if (offset) {
ad69471c 3243 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3244 } else {
ad69471c 3245 gen_sxth(tmp);
9ee6e8bb
PB
3246 }
3247 }
3248 break;
3249 case 2:
9ee6e8bb
PB
3250 break;
3251 }
ad69471c 3252 store_reg(s, rd, tmp);
b7bcbe95
FB
3253 } else {
3254 /* arm->vfp */
ad69471c 3255 tmp = load_reg(s, rd);
9ee6e8bb
PB
3256 if (insn & (1 << 23)) {
3257 /* VDUP */
3258 if (size == 0) {
ad69471c 3259 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3260 } else if (size == 1) {
ad69471c 3261 gen_neon_dup_low16(tmp);
9ee6e8bb 3262 }
cbbccffc 3263 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3264 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3265 tcg_gen_mov_i32(tmp2, tmp);
3266 neon_store_reg(rn, n, tmp2);
3267 }
3268 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3269 } else {
3270 /* VMOV */
3271 switch (size) {
3272 case 0:
ad69471c 3273 tmp2 = neon_load_reg(rn, pass);
d593c48e 3274 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3275 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3276 break;
3277 case 1:
ad69471c 3278 tmp2 = neon_load_reg(rn, pass);
d593c48e 3279 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3280 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3281 break;
3282 case 2:
9ee6e8bb
PB
3283 break;
3284 }
ad69471c 3285 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3286 }
b7bcbe95 3287 }
9ee6e8bb
PB
3288 } else { /* !dp */
3289 if ((insn & 0x6f) != 0x00)
3290 return 1;
3291 rn = VFP_SREG_N(insn);
18c9b560 3292 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3293 /* vfp->arm */
3294 if (insn & (1 << 21)) {
3295 /* system register */
40f137e1 3296 rn >>= 1;
9ee6e8bb 3297
b7bcbe95 3298 switch (rn) {
40f137e1 3299 case ARM_VFP_FPSID:
4373f3ce 3300 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3301 VFP3 restricts all id registers to privileged
3302 accesses. */
3303 if (IS_USER(s)
d614a513 3304 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3305 return 1;
d614a513 3306 }
4373f3ce 3307 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3308 break;
40f137e1 3309 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3310 if (IS_USER(s))
3311 return 1;
4373f3ce 3312 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3313 break;
40f137e1
PB
3314 case ARM_VFP_FPINST:
3315 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3316 /* Not present in VFP3. */
3317 if (IS_USER(s)
d614a513 3318 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3319 return 1;
d614a513 3320 }
4373f3ce 3321 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3322 break;
40f137e1 3323 case ARM_VFP_FPSCR:
601d70b9 3324 if (rd == 15) {
4373f3ce
PB
3325 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3326 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3327 } else {
7d1b0095 3328 tmp = tcg_temp_new_i32();
4373f3ce
PB
3329 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3330 }
b7bcbe95 3331 break;
a50c0f51 3332 case ARM_VFP_MVFR2:
d614a513 3333 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3334 return 1;
3335 }
3336 /* fall through */
9ee6e8bb
PB
3337 case ARM_VFP_MVFR0:
3338 case ARM_VFP_MVFR1:
3339 if (IS_USER(s)
d614a513 3340 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3341 return 1;
d614a513 3342 }
4373f3ce 3343 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3344 break;
b7bcbe95
FB
3345 default:
3346 return 1;
3347 }
3348 } else {
3349 gen_mov_F0_vreg(0, rn);
4373f3ce 3350 tmp = gen_vfp_mrs();
b7bcbe95
FB
3351 }
3352 if (rd == 15) {
b5ff1b31 3353 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3354 gen_set_nzcv(tmp);
7d1b0095 3355 tcg_temp_free_i32(tmp);
4373f3ce
PB
3356 } else {
3357 store_reg(s, rd, tmp);
3358 }
b7bcbe95
FB
3359 } else {
3360 /* arm->vfp */
b7bcbe95 3361 if (insn & (1 << 21)) {
40f137e1 3362 rn >>= 1;
b7bcbe95
FB
3363 /* system register */
3364 switch (rn) {
40f137e1 3365 case ARM_VFP_FPSID:
9ee6e8bb
PB
3366 case ARM_VFP_MVFR0:
3367 case ARM_VFP_MVFR1:
b7bcbe95
FB
3368 /* Writes are ignored. */
3369 break;
40f137e1 3370 case ARM_VFP_FPSCR:
e4c1cfa5 3371 tmp = load_reg(s, rd);
4373f3ce 3372 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3373 tcg_temp_free_i32(tmp);
b5ff1b31 3374 gen_lookup_tb(s);
b7bcbe95 3375 break;
40f137e1 3376 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3377 if (IS_USER(s))
3378 return 1;
71b3c3de
JR
3379 /* TODO: VFP subarchitecture support.
3380 * For now, keep the EN bit only */
e4c1cfa5 3381 tmp = load_reg(s, rd);
71b3c3de 3382 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3383 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3384 gen_lookup_tb(s);
3385 break;
3386 case ARM_VFP_FPINST:
3387 case ARM_VFP_FPINST2:
23adb861
PM
3388 if (IS_USER(s)) {
3389 return 1;
3390 }
e4c1cfa5 3391 tmp = load_reg(s, rd);
4373f3ce 3392 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3393 break;
b7bcbe95
FB
3394 default:
3395 return 1;
3396 }
3397 } else {
e4c1cfa5 3398 tmp = load_reg(s, rd);
4373f3ce 3399 gen_vfp_msr(tmp);
b7bcbe95
FB
3400 gen_mov_vreg_F0(0, rn);
3401 }
3402 }
3403 }
3404 } else {
3405 /* data processing */
3406 /* The opcode is in bits 23, 21, 20 and 6. */
3407 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3408 if (dp) {
3409 if (op == 15) {
3410 /* rn is opcode */
3411 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3412 } else {
3413 /* rn is register number */
9ee6e8bb 3414 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3415 }
3416
239c20c7
WN
3417 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3418 ((rn & 0x1e) == 0x6))) {
3419 /* Integer or single/half precision destination. */
9ee6e8bb 3420 rd = VFP_SREG_D(insn);
b7bcbe95 3421 } else {
9ee6e8bb 3422 VFP_DREG_D(rd, insn);
b7bcbe95 3423 }
04595bf6 3424 if (op == 15 &&
239c20c7
WN
3425 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3426 ((rn & 0x1e) == 0x4))) {
3427 /* VCVT from int or half precision is always from S reg
3428 * regardless of dp bit. VCVT with immediate frac_bits
3429 * has same format as SREG_M.
04595bf6
PM
3430 */
3431 rm = VFP_SREG_M(insn);
b7bcbe95 3432 } else {
9ee6e8bb 3433 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3434 }
3435 } else {
9ee6e8bb 3436 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3437 if (op == 15 && rn == 15) {
3438 /* Double precision destination. */
9ee6e8bb
PB
3439 VFP_DREG_D(rd, insn);
3440 } else {
3441 rd = VFP_SREG_D(insn);
3442 }
04595bf6
PM
3443 /* NB that we implicitly rely on the encoding for the frac_bits
3444 * in VCVT of fixed to float being the same as that of an SREG_M
3445 */
9ee6e8bb 3446 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3447 }
3448
69d1fc22 3449 veclen = s->vec_len;
b7bcbe95
FB
3450 if (op == 15 && rn > 3)
3451 veclen = 0;
3452
3453 /* Shut up compiler warnings. */
3454 delta_m = 0;
3455 delta_d = 0;
3456 bank_mask = 0;
3b46e624 3457
b7bcbe95
FB
3458 if (veclen > 0) {
3459 if (dp)
3460 bank_mask = 0xc;
3461 else
3462 bank_mask = 0x18;
3463
3464 /* Figure out what type of vector operation this is. */
3465 if ((rd & bank_mask) == 0) {
3466 /* scalar */
3467 veclen = 0;
3468 } else {
3469 if (dp)
69d1fc22 3470 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3471 else
69d1fc22 3472 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3473
3474 if ((rm & bank_mask) == 0) {
3475 /* mixed scalar/vector */
3476 delta_m = 0;
3477 } else {
3478 /* vector */
3479 delta_m = delta_d;
3480 }
3481 }
3482 }
3483
3484 /* Load the initial operands. */
3485 if (op == 15) {
3486 switch (rn) {
3487 case 16:
3488 case 17:
3489 /* Integer source */
3490 gen_mov_F0_vreg(0, rm);
3491 break;
3492 case 8:
3493 case 9:
3494 /* Compare */
3495 gen_mov_F0_vreg(dp, rd);
3496 gen_mov_F1_vreg(dp, rm);
3497 break;
3498 case 10:
3499 case 11:
3500 /* Compare with zero */
3501 gen_mov_F0_vreg(dp, rd);
3502 gen_vfp_F1_ld0(dp);
3503 break;
9ee6e8bb
PB
3504 case 20:
3505 case 21:
3506 case 22:
3507 case 23:
644ad806
PB
3508 case 28:
3509 case 29:
3510 case 30:
3511 case 31:
9ee6e8bb
PB
3512 /* Source and destination the same. */
3513 gen_mov_F0_vreg(dp, rd);
3514 break;
6e0c0ed1
PM
3515 case 4:
3516 case 5:
3517 case 6:
3518 case 7:
239c20c7
WN
3519 /* VCVTB, VCVTT: only present with the halfprec extension
3520 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3521 * (we choose to UNDEF)
6e0c0ed1 3522 */
d614a513
PM
3523 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3524 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3525 return 1;
3526 }
239c20c7
WN
3527 if (!extract32(rn, 1, 1)) {
3528 /* Half precision source. */
3529 gen_mov_F0_vreg(0, rm);
3530 break;
3531 }
6e0c0ed1 3532 /* Otherwise fall through */
b7bcbe95
FB
3533 default:
3534 /* One source operand. */
3535 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3536 break;
b7bcbe95
FB
3537 }
3538 } else {
3539 /* Two source operands. */
3540 gen_mov_F0_vreg(dp, rn);
3541 gen_mov_F1_vreg(dp, rm);
3542 }
3543
3544 for (;;) {
3545 /* Perform the calculation. */
3546 switch (op) {
605a6aed
PM
3547 case 0: /* VMLA: fd + (fn * fm) */
3548 /* Note that order of inputs to the add matters for NaNs */
3549 gen_vfp_F1_mul(dp);
3550 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3551 gen_vfp_add(dp);
3552 break;
605a6aed 3553 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3554 gen_vfp_mul(dp);
605a6aed
PM
3555 gen_vfp_F1_neg(dp);
3556 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3557 gen_vfp_add(dp);
3558 break;
605a6aed
PM
3559 case 2: /* VNMLS: -fd + (fn * fm) */
3560 /* Note that it isn't valid to replace (-A + B) with (B - A)
3561 * or similar plausible looking simplifications
3562 * because this will give wrong results for NaNs.
3563 */
3564 gen_vfp_F1_mul(dp);
3565 gen_mov_F0_vreg(dp, rd);
3566 gen_vfp_neg(dp);
3567 gen_vfp_add(dp);
b7bcbe95 3568 break;
605a6aed 3569 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3570 gen_vfp_mul(dp);
605a6aed
PM
3571 gen_vfp_F1_neg(dp);
3572 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3573 gen_vfp_neg(dp);
605a6aed 3574 gen_vfp_add(dp);
b7bcbe95
FB
3575 break;
3576 case 4: /* mul: fn * fm */
3577 gen_vfp_mul(dp);
3578 break;
3579 case 5: /* nmul: -(fn * fm) */
3580 gen_vfp_mul(dp);
3581 gen_vfp_neg(dp);
3582 break;
3583 case 6: /* add: fn + fm */
3584 gen_vfp_add(dp);
3585 break;
3586 case 7: /* sub: fn - fm */
3587 gen_vfp_sub(dp);
3588 break;
3589 case 8: /* div: fn / fm */
3590 gen_vfp_div(dp);
3591 break;
da97f52c
PM
3592 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3593 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3594 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3595 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3596 /* These are fused multiply-add, and must be done as one
3597 * floating point operation with no rounding between the
3598 * multiplication and addition steps.
3599 * NB that doing the negations here as separate steps is
3600 * correct : an input NaN should come out with its sign bit
3601 * flipped if it is a negated-input.
3602 */
d614a513 3603 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3604 return 1;
3605 }
3606 if (dp) {
3607 TCGv_ptr fpst;
3608 TCGv_i64 frd;
3609 if (op & 1) {
3610 /* VFNMS, VFMS */
3611 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3612 }
3613 frd = tcg_temp_new_i64();
3614 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3615 if (op & 2) {
3616 /* VFNMA, VFNMS */
3617 gen_helper_vfp_negd(frd, frd);
3618 }
3619 fpst = get_fpstatus_ptr(0);
3620 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3621 cpu_F1d, frd, fpst);
3622 tcg_temp_free_ptr(fpst);
3623 tcg_temp_free_i64(frd);
3624 } else {
3625 TCGv_ptr fpst;
3626 TCGv_i32 frd;
3627 if (op & 1) {
3628 /* VFNMS, VFMS */
3629 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3630 }
3631 frd = tcg_temp_new_i32();
3632 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3633 if (op & 2) {
3634 gen_helper_vfp_negs(frd, frd);
3635 }
3636 fpst = get_fpstatus_ptr(0);
3637 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3638 cpu_F1s, frd, fpst);
3639 tcg_temp_free_ptr(fpst);
3640 tcg_temp_free_i32(frd);
3641 }
3642 break;
9ee6e8bb 3643 case 14: /* fconst */
d614a513
PM
3644 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3645 return 1;
3646 }
9ee6e8bb
PB
3647
3648 n = (insn << 12) & 0x80000000;
3649 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3650 if (dp) {
3651 if (i & 0x40)
3652 i |= 0x3f80;
3653 else
3654 i |= 0x4000;
3655 n |= i << 16;
4373f3ce 3656 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3657 } else {
3658 if (i & 0x40)
3659 i |= 0x780;
3660 else
3661 i |= 0x800;
3662 n |= i << 19;
5b340b51 3663 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3664 }
9ee6e8bb 3665 break;
b7bcbe95
FB
3666 case 15: /* extension space */
3667 switch (rn) {
3668 case 0: /* cpy */
3669 /* no-op */
3670 break;
3671 case 1: /* abs */
3672 gen_vfp_abs(dp);
3673 break;
3674 case 2: /* neg */
3675 gen_vfp_neg(dp);
3676 break;
3677 case 3: /* sqrt */
3678 gen_vfp_sqrt(dp);
3679 break;
239c20c7 3680 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3681 tmp = gen_vfp_mrs();
3682 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3683 if (dp) {
3684 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3685 cpu_env);
3686 } else {
3687 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3688 cpu_env);
3689 }
7d1b0095 3690 tcg_temp_free_i32(tmp);
60011498 3691 break;
239c20c7 3692 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3693 tmp = gen_vfp_mrs();
3694 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3695 if (dp) {
3696 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3697 cpu_env);
3698 } else {
3699 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3700 cpu_env);
3701 }
7d1b0095 3702 tcg_temp_free_i32(tmp);
60011498 3703 break;
239c20c7 3704 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3705 tmp = tcg_temp_new_i32();
239c20c7
WN
3706 if (dp) {
3707 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3708 cpu_env);
3709 } else {
3710 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3711 cpu_env);
3712 }
60011498
PB
3713 gen_mov_F0_vreg(0, rd);
3714 tmp2 = gen_vfp_mrs();
3715 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3716 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3717 tcg_temp_free_i32(tmp2);
60011498
PB
3718 gen_vfp_msr(tmp);
3719 break;
239c20c7 3720 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3721 tmp = tcg_temp_new_i32();
239c20c7
WN
3722 if (dp) {
3723 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3724 cpu_env);
3725 } else {
3726 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3727 cpu_env);
3728 }
60011498
PB
3729 tcg_gen_shli_i32(tmp, tmp, 16);
3730 gen_mov_F0_vreg(0, rd);
3731 tmp2 = gen_vfp_mrs();
3732 tcg_gen_ext16u_i32(tmp2, tmp2);
3733 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3734 tcg_temp_free_i32(tmp2);
60011498
PB
3735 gen_vfp_msr(tmp);
3736 break;
b7bcbe95
FB
3737 case 8: /* cmp */
3738 gen_vfp_cmp(dp);
3739 break;
3740 case 9: /* cmpe */
3741 gen_vfp_cmpe(dp);
3742 break;
3743 case 10: /* cmpz */
3744 gen_vfp_cmp(dp);
3745 break;
3746 case 11: /* cmpez */
3747 gen_vfp_F1_ld0(dp);
3748 gen_vfp_cmpe(dp);
3749 break;
664c6733
WN
3750 case 12: /* vrintr */
3751 {
3752 TCGv_ptr fpst = get_fpstatus_ptr(0);
3753 if (dp) {
3754 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3755 } else {
3756 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3757 }
3758 tcg_temp_free_ptr(fpst);
3759 break;
3760 }
a290c62a
WN
3761 case 13: /* vrintz */
3762 {
3763 TCGv_ptr fpst = get_fpstatus_ptr(0);
3764 TCGv_i32 tcg_rmode;
3765 tcg_rmode = tcg_const_i32(float_round_to_zero);
3766 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3767 if (dp) {
3768 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3769 } else {
3770 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3771 }
3772 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3773 tcg_temp_free_i32(tcg_rmode);
3774 tcg_temp_free_ptr(fpst);
3775 break;
3776 }
4e82bc01
WN
3777 case 14: /* vrintx */
3778 {
3779 TCGv_ptr fpst = get_fpstatus_ptr(0);
3780 if (dp) {
3781 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3782 } else {
3783 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3784 }
3785 tcg_temp_free_ptr(fpst);
3786 break;
3787 }
b7bcbe95
FB
3788 case 15: /* single<->double conversion */
3789 if (dp)
4373f3ce 3790 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3791 else
4373f3ce 3792 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3793 break;
3794 case 16: /* fuito */
5500b06c 3795 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3796 break;
3797 case 17: /* fsito */
5500b06c 3798 gen_vfp_sito(dp, 0);
b7bcbe95 3799 break;
9ee6e8bb 3800 case 20: /* fshto */
d614a513
PM
3801 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3802 return 1;
3803 }
5500b06c 3804 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3805 break;
3806 case 21: /* fslto */
d614a513
PM
3807 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3808 return 1;
3809 }
5500b06c 3810 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3811 break;
3812 case 22: /* fuhto */
d614a513
PM
3813 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3814 return 1;
3815 }
5500b06c 3816 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3817 break;
3818 case 23: /* fulto */
d614a513
PM
3819 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3820 return 1;
3821 }
5500b06c 3822 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3823 break;
b7bcbe95 3824 case 24: /* ftoui */
5500b06c 3825 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3826 break;
3827 case 25: /* ftouiz */
5500b06c 3828 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3829 break;
3830 case 26: /* ftosi */
5500b06c 3831 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3832 break;
3833 case 27: /* ftosiz */
5500b06c 3834 gen_vfp_tosiz(dp, 0);
b7bcbe95 3835 break;
9ee6e8bb 3836 case 28: /* ftosh */
d614a513
PM
3837 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3838 return 1;
3839 }
5500b06c 3840 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3841 break;
3842 case 29: /* ftosl */
d614a513
PM
3843 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3844 return 1;
3845 }
5500b06c 3846 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3847 break;
3848 case 30: /* ftouh */
d614a513
PM
3849 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3850 return 1;
3851 }
5500b06c 3852 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3853 break;
3854 case 31: /* ftoul */
d614a513
PM
3855 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3856 return 1;
3857 }
5500b06c 3858 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3859 break;
b7bcbe95 3860 default: /* undefined */
b7bcbe95
FB
3861 return 1;
3862 }
3863 break;
3864 default: /* undefined */
b7bcbe95
FB
3865 return 1;
3866 }
3867
3868 /* Write back the result. */
239c20c7
WN
3869 if (op == 15 && (rn >= 8 && rn <= 11)) {
3870 /* Comparison, do nothing. */
3871 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3872 (rn & 0x1e) == 0x6)) {
3873 /* VCVT double to int: always integer result.
3874 * VCVT double to half precision is always a single
3875 * precision result.
3876 */
b7bcbe95 3877 gen_mov_vreg_F0(0, rd);
239c20c7 3878 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3879 /* conversion */
3880 gen_mov_vreg_F0(!dp, rd);
239c20c7 3881 } else {
b7bcbe95 3882 gen_mov_vreg_F0(dp, rd);
239c20c7 3883 }
b7bcbe95
FB
3884
3885 /* break out of the loop if we have finished */
3886 if (veclen == 0)
3887 break;
3888
3889 if (op == 15 && delta_m == 0) {
3890 /* single source one-many */
3891 while (veclen--) {
3892 rd = ((rd + delta_d) & (bank_mask - 1))
3893 | (rd & bank_mask);
3894 gen_mov_vreg_F0(dp, rd);
3895 }
3896 break;
3897 }
3898 /* Setup the next operands. */
3899 veclen--;
3900 rd = ((rd + delta_d) & (bank_mask - 1))
3901 | (rd & bank_mask);
3902
3903 if (op == 15) {
3904 /* One source operand. */
3905 rm = ((rm + delta_m) & (bank_mask - 1))
3906 | (rm & bank_mask);
3907 gen_mov_F0_vreg(dp, rm);
3908 } else {
3909 /* Two source operands. */
3910 rn = ((rn + delta_d) & (bank_mask - 1))
3911 | (rn & bank_mask);
3912 gen_mov_F0_vreg(dp, rn);
3913 if (delta_m) {
3914 rm = ((rm + delta_m) & (bank_mask - 1))
3915 | (rm & bank_mask);
3916 gen_mov_F1_vreg(dp, rm);
3917 }
3918 }
3919 }
3920 }
3921 break;
3922 case 0xc:
3923 case 0xd:
8387da81 3924 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3925 /* two-register transfer */
3926 rn = (insn >> 16) & 0xf;
3927 rd = (insn >> 12) & 0xf;
3928 if (dp) {
9ee6e8bb
PB
3929 VFP_DREG_M(rm, insn);
3930 } else {
3931 rm = VFP_SREG_M(insn);
3932 }
b7bcbe95 3933
18c9b560 3934 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3935 /* vfp->arm */
3936 if (dp) {
4373f3ce
PB
3937 gen_mov_F0_vreg(0, rm * 2);
3938 tmp = gen_vfp_mrs();
3939 store_reg(s, rd, tmp);
3940 gen_mov_F0_vreg(0, rm * 2 + 1);
3941 tmp = gen_vfp_mrs();
3942 store_reg(s, rn, tmp);
b7bcbe95
FB
3943 } else {
3944 gen_mov_F0_vreg(0, rm);
4373f3ce 3945 tmp = gen_vfp_mrs();
8387da81 3946 store_reg(s, rd, tmp);
b7bcbe95 3947 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3948 tmp = gen_vfp_mrs();
8387da81 3949 store_reg(s, rn, tmp);
b7bcbe95
FB
3950 }
3951 } else {
3952 /* arm->vfp */
3953 if (dp) {
4373f3ce
PB
3954 tmp = load_reg(s, rd);
3955 gen_vfp_msr(tmp);
3956 gen_mov_vreg_F0(0, rm * 2);
3957 tmp = load_reg(s, rn);
3958 gen_vfp_msr(tmp);
3959 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3960 } else {
8387da81 3961 tmp = load_reg(s, rd);
4373f3ce 3962 gen_vfp_msr(tmp);
b7bcbe95 3963 gen_mov_vreg_F0(0, rm);
8387da81 3964 tmp = load_reg(s, rn);
4373f3ce 3965 gen_vfp_msr(tmp);
b7bcbe95
FB
3966 gen_mov_vreg_F0(0, rm + 1);
3967 }
3968 }
3969 } else {
3970 /* Load/store */
3971 rn = (insn >> 16) & 0xf;
3972 if (dp)
9ee6e8bb 3973 VFP_DREG_D(rd, insn);
b7bcbe95 3974 else
9ee6e8bb 3975 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3976 if ((insn & 0x01200000) == 0x01000000) {
3977 /* Single load/store */
3978 offset = (insn & 0xff) << 2;
3979 if ((insn & (1 << 23)) == 0)
3980 offset = -offset;
934814f1
PM
3981 if (s->thumb && rn == 15) {
3982 /* This is actually UNPREDICTABLE */
3983 addr = tcg_temp_new_i32();
3984 tcg_gen_movi_i32(addr, s->pc & ~2);
3985 } else {
3986 addr = load_reg(s, rn);
3987 }
312eea9f 3988 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3989 if (insn & (1 << 20)) {
312eea9f 3990 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3991 gen_mov_vreg_F0(dp, rd);
3992 } else {
3993 gen_mov_F0_vreg(dp, rd);
312eea9f 3994 gen_vfp_st(s, dp, addr);
b7bcbe95 3995 }
7d1b0095 3996 tcg_temp_free_i32(addr);
b7bcbe95
FB
3997 } else {
3998 /* load/store multiple */
934814f1 3999 int w = insn & (1 << 21);
b7bcbe95
FB
4000 if (dp)
4001 n = (insn >> 1) & 0x7f;
4002 else
4003 n = insn & 0xff;
4004
934814f1
PM
4005 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
4006 /* P == U , W == 1 => UNDEF */
4007 return 1;
4008 }
4009 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
4010 /* UNPREDICTABLE cases for bad immediates: we choose to
4011 * UNDEF to avoid generating huge numbers of TCG ops
4012 */
4013 return 1;
4014 }
4015 if (rn == 15 && w) {
4016 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
4017 return 1;
4018 }
4019
4020 if (s->thumb && rn == 15) {
4021 /* This is actually UNPREDICTABLE */
4022 addr = tcg_temp_new_i32();
4023 tcg_gen_movi_i32(addr, s->pc & ~2);
4024 } else {
4025 addr = load_reg(s, rn);
4026 }
b7bcbe95 4027 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 4028 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
4029
4030 if (dp)
4031 offset = 8;
4032 else
4033 offset = 4;
4034 for (i = 0; i < n; i++) {
18c9b560 4035 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 4036 /* load */
312eea9f 4037 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
4038 gen_mov_vreg_F0(dp, rd + i);
4039 } else {
4040 /* store */
4041 gen_mov_F0_vreg(dp, rd + i);
312eea9f 4042 gen_vfp_st(s, dp, addr);
b7bcbe95 4043 }
312eea9f 4044 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 4045 }
934814f1 4046 if (w) {
b7bcbe95
FB
4047 /* writeback */
4048 if (insn & (1 << 24))
4049 offset = -offset * n;
4050 else if (dp && (insn & 1))
4051 offset = 4;
4052 else
4053 offset = 0;
4054
4055 if (offset != 0)
312eea9f
FN
4056 tcg_gen_addi_i32(addr, addr, offset);
4057 store_reg(s, rn, addr);
4058 } else {
7d1b0095 4059 tcg_temp_free_i32(addr);
b7bcbe95
FB
4060 }
4061 }
4062 }
4063 break;
4064 default:
4065 /* Should never happen. */
4066 return 1;
4067 }
4068 return 0;
4069}
4070
90aa39a1 4071static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
c53be334 4072{
90aa39a1
SF
4073#ifndef CONFIG_USER_ONLY
4074 return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
4075 ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
4076#else
4077 return true;
4078#endif
4079}
6e256c93 4080
90aa39a1
SF
4081static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
4082{
4083 if (use_goto_tb(s, dest)) {
57fec1fe 4084 tcg_gen_goto_tb(n);
eaed129d 4085 gen_set_pc_im(s, dest);
90aa39a1 4086 tcg_gen_exit_tb((uintptr_t)s->tb + n);
6e256c93 4087 } else {
eaed129d 4088 gen_set_pc_im(s, dest);
57fec1fe 4089 tcg_gen_exit_tb(0);
6e256c93 4090 }
c53be334
FB
4091}
4092
8aaca4c0
FB
4093static inline void gen_jmp (DisasContext *s, uint32_t dest)
4094{
50225ad0 4095 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 4096 /* An indirect jump so that we still trigger the debug exception. */
5899f386 4097 if (s->thumb)
d9ba4830
PB
4098 dest |= 1;
4099 gen_bx_im(s, dest);
8aaca4c0 4100 } else {
6e256c93 4101 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
4102 s->is_jmp = DISAS_TB_JUMP;
4103 }
4104}
4105
39d5492a 4106static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 4107{
ee097184 4108 if (x)
d9ba4830 4109 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 4110 else
d9ba4830 4111 gen_sxth(t0);
ee097184 4112 if (y)
d9ba4830 4113 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 4114 else
d9ba4830
PB
4115 gen_sxth(t1);
4116 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
4117}
4118
4119/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
4120static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
4121{
b5ff1b31
FB
4122 uint32_t mask;
4123
4124 mask = 0;
4125 if (flags & (1 << 0))
4126 mask |= 0xff;
4127 if (flags & (1 << 1))
4128 mask |= 0xff00;
4129 if (flags & (1 << 2))
4130 mask |= 0xff0000;
4131 if (flags & (1 << 3))
4132 mask |= 0xff000000;
9ee6e8bb 4133
2ae23e75 4134 /* Mask out undefined bits. */
9ee6e8bb 4135 mask &= ~CPSR_RESERVED;
d614a513 4136 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4137 mask &= ~CPSR_T;
d614a513
PM
4138 }
4139 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4140 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4141 }
4142 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4143 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4144 }
4145 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4146 mask &= ~CPSR_IT;
d614a513 4147 }
4051e12c
PM
4148 /* Mask out execution state and reserved bits. */
4149 if (!spsr) {
4150 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4151 }
b5ff1b31
FB
4152 /* Mask out privileged bits. */
4153 if (IS_USER(s))
9ee6e8bb 4154 mask &= CPSR_USER;
b5ff1b31
FB
4155 return mask;
4156}
4157
2fbac54b 4158/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4159static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4160{
39d5492a 4161 TCGv_i32 tmp;
b5ff1b31
FB
4162 if (spsr) {
4163 /* ??? This is also undefined in system mode. */
4164 if (IS_USER(s))
4165 return 1;
d9ba4830
PB
4166
4167 tmp = load_cpu_field(spsr);
4168 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4169 tcg_gen_andi_i32(t0, t0, mask);
4170 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4171 store_cpu_field(tmp, spsr);
b5ff1b31 4172 } else {
2fbac54b 4173 gen_set_cpsr(t0, mask);
b5ff1b31 4174 }
7d1b0095 4175 tcg_temp_free_i32(t0);
b5ff1b31
FB
4176 gen_lookup_tb(s);
4177 return 0;
4178}
4179
2fbac54b
FN
4180/* Returns nonzero if access to the PSR is not permitted. */
4181static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4182{
39d5492a 4183 TCGv_i32 tmp;
7d1b0095 4184 tmp = tcg_temp_new_i32();
2fbac54b
FN
4185 tcg_gen_movi_i32(tmp, val);
4186 return gen_set_psr(s, mask, spsr, tmp);
4187}
4188
8bfd0550
PM
4189static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
4190 int *tgtmode, int *regno)
4191{
4192 /* Decode the r and sysm fields of MSR/MRS banked accesses into
4193 * the target mode and register number, and identify the various
4194 * unpredictable cases.
4195 * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
4196 * + executed in user mode
4197 * + using R15 as the src/dest register
4198 * + accessing an unimplemented register
4199 * + accessing a register that's inaccessible at current PL/security state*
4200 * + accessing a register that you could access with a different insn
4201 * We choose to UNDEF in all these cases.
4202 * Since we don't know which of the various AArch32 modes we are in
4203 * we have to defer some checks to runtime.
4204 * Accesses to Monitor mode registers from Secure EL1 (which implies
4205 * that EL3 is AArch64) must trap to EL3.
4206 *
4207 * If the access checks fail this function will emit code to take
4208 * an exception and return false. Otherwise it will return true,
4209 * and set *tgtmode and *regno appropriately.
4210 */
4211 int exc_target = default_exception_el(s);
4212
4213 /* These instructions are present only in ARMv8, or in ARMv7 with the
4214 * Virtualization Extensions.
4215 */
4216 if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
4217 !arm_dc_feature(s, ARM_FEATURE_EL2)) {
4218 goto undef;
4219 }
4220
4221 if (IS_USER(s) || rn == 15) {
4222 goto undef;
4223 }
4224
4225 /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
4226 * of registers into (r, sysm).
4227 */
4228 if (r) {
4229 /* SPSRs for other modes */
4230 switch (sysm) {
4231 case 0xe: /* SPSR_fiq */
4232 *tgtmode = ARM_CPU_MODE_FIQ;
4233 break;
4234 case 0x10: /* SPSR_irq */
4235 *tgtmode = ARM_CPU_MODE_IRQ;
4236 break;
4237 case 0x12: /* SPSR_svc */
4238 *tgtmode = ARM_CPU_MODE_SVC;
4239 break;
4240 case 0x14: /* SPSR_abt */
4241 *tgtmode = ARM_CPU_MODE_ABT;
4242 break;
4243 case 0x16: /* SPSR_und */
4244 *tgtmode = ARM_CPU_MODE_UND;
4245 break;
4246 case 0x1c: /* SPSR_mon */
4247 *tgtmode = ARM_CPU_MODE_MON;
4248 break;
4249 case 0x1e: /* SPSR_hyp */
4250 *tgtmode = ARM_CPU_MODE_HYP;
4251 break;
4252 default: /* unallocated */
4253 goto undef;
4254 }
4255 /* We arbitrarily assign SPSR a register number of 16. */
4256 *regno = 16;
4257 } else {
4258 /* general purpose registers for other modes */
4259 switch (sysm) {
4260 case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
4261 *tgtmode = ARM_CPU_MODE_USR;
4262 *regno = sysm + 8;
4263 break;
4264 case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
4265 *tgtmode = ARM_CPU_MODE_FIQ;
4266 *regno = sysm;
4267 break;
4268 case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
4269 *tgtmode = ARM_CPU_MODE_IRQ;
4270 *regno = sysm & 1 ? 13 : 14;
4271 break;
4272 case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
4273 *tgtmode = ARM_CPU_MODE_SVC;
4274 *regno = sysm & 1 ? 13 : 14;
4275 break;
4276 case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
4277 *tgtmode = ARM_CPU_MODE_ABT;
4278 *regno = sysm & 1 ? 13 : 14;
4279 break;
4280 case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
4281 *tgtmode = ARM_CPU_MODE_UND;
4282 *regno = sysm & 1 ? 13 : 14;
4283 break;
4284 case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
4285 *tgtmode = ARM_CPU_MODE_MON;
4286 *regno = sysm & 1 ? 13 : 14;
4287 break;
4288 case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
4289 *tgtmode = ARM_CPU_MODE_HYP;
4290 /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
4291 *regno = sysm & 1 ? 13 : 17;
4292 break;
4293 default: /* unallocated */
4294 goto undef;
4295 }
4296 }
4297
4298 /* Catch the 'accessing inaccessible register' cases we can detect
4299 * at translate time.
4300 */
4301 switch (*tgtmode) {
4302 case ARM_CPU_MODE_MON:
4303 if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
4304 goto undef;
4305 }
4306 if (s->current_el == 1) {
4307 /* If we're in Secure EL1 (which implies that EL3 is AArch64)
4308 * then accesses to Mon registers trap to EL3
4309 */
4310 exc_target = 3;
4311 goto undef;
4312 }
4313 break;
4314 case ARM_CPU_MODE_HYP:
4315 /* Note that we can forbid accesses from EL2 here because they
4316 * must be from Hyp mode itself
4317 */
4318 if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
4319 goto undef;
4320 }
4321 break;
4322 default:
4323 break;
4324 }
4325
4326 return true;
4327
4328undef:
4329 /* If we get here then some access check did not pass */
4330 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
4331 return false;
4332}
4333
4334static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
4335{
4336 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4337 int tgtmode = 0, regno = 0;
4338
4339 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4340 return;
4341 }
4342
4343 /* Sync state because msr_banked() can raise exceptions */
4344 gen_set_condexec(s);
4345 gen_set_pc_im(s, s->pc - 4);
4346 tcg_reg = load_reg(s, rn);
4347 tcg_tgtmode = tcg_const_i32(tgtmode);
4348 tcg_regno = tcg_const_i32(regno);
4349 gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
4350 tcg_temp_free_i32(tcg_tgtmode);
4351 tcg_temp_free_i32(tcg_regno);
4352 tcg_temp_free_i32(tcg_reg);
4353 s->is_jmp = DISAS_UPDATE;
4354}
4355
4356static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
4357{
4358 TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
4359 int tgtmode = 0, regno = 0;
4360
4361 if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
4362 return;
4363 }
4364
4365 /* Sync state because mrs_banked() can raise exceptions */
4366 gen_set_condexec(s);
4367 gen_set_pc_im(s, s->pc - 4);
4368 tcg_reg = tcg_temp_new_i32();
4369 tcg_tgtmode = tcg_const_i32(tgtmode);
4370 tcg_regno = tcg_const_i32(regno);
4371 gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
4372 tcg_temp_free_i32(tcg_tgtmode);
4373 tcg_temp_free_i32(tcg_regno);
4374 store_reg(s, rn, tcg_reg);
4375 s->is_jmp = DISAS_UPDATE;
4376}
4377
fb0e8e79
PM
4378/* Store value to PC as for an exception return (ie don't
4379 * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
4380 * will do the masking based on the new value of the Thumb bit.
4381 */
4382static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4383{
fb0e8e79
PM
4384 tcg_gen_mov_i32(cpu_R[15], pc);
4385 tcg_temp_free_i32(pc);
b5ff1b31
FB
4386}
4387
b0109805 4388/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4389static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4390{
fb0e8e79
PM
4391 store_pc_exc_ret(s, pc);
4392 /* The cpsr_write_eret helper will mask the low bits of PC
4393 * appropriately depending on the new Thumb bit, so it must
4394 * be called after storing the new PC.
4395 */
235ea1f5 4396 gen_helper_cpsr_write_eret(cpu_env, cpsr);
7d1b0095 4397 tcg_temp_free_i32(cpsr);
577bf808 4398 s->is_jmp = DISAS_JUMP;
9ee6e8bb 4399}
3b46e624 4400
fb0e8e79
PM
4401/* Generate an old-style exception return. Marks pc as dead. */
4402static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
4403{
4404 gen_rfe(s, pc, load_cpu_field(spsr));
4405}
4406
c22edfeb
AB
4407/*
4408 * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we
4409 * only call the helper when running single threaded TCG code to ensure
4410 * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
4411 * just skip this instruction. Currently the SEV/SEVL instructions
4412 * which are *one* of many ways to wake the CPU from WFE are not
4413 * implemented so we can't sleep like WFI does.
4414 */
9ee6e8bb
PB
4415static void gen_nop_hint(DisasContext *s, int val)
4416{
4417 switch (val) {
c87e5a61 4418 case 1: /* yield */
c22edfeb
AB
4419 if (!parallel_cpus) {
4420 gen_set_pc_im(s, s->pc);
4421 s->is_jmp = DISAS_YIELD;
4422 }
c87e5a61 4423 break;
9ee6e8bb 4424 case 3: /* wfi */
eaed129d 4425 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4426 s->is_jmp = DISAS_WFI;
4427 break;
4428 case 2: /* wfe */
c22edfeb
AB
4429 if (!parallel_cpus) {
4430 gen_set_pc_im(s, s->pc);
4431 s->is_jmp = DISAS_WFE;
4432 }
72c1d3af 4433 break;
9ee6e8bb 4434 case 4: /* sev */
12b10571
MR
4435 case 5: /* sevl */
4436 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4437 default: /* nop */
4438 break;
4439 }
4440}
99c475ab 4441
ad69471c 4442#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4443
39d5492a 4444static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4445{
4446 switch (size) {
dd8fbd78
FN
4447 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4448 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4449 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4450 default: abort();
9ee6e8bb 4451 }
9ee6e8bb
PB
4452}
4453
39d5492a 4454static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4455{
4456 switch (size) {
dd8fbd78
FN
4457 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4458 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4459 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4460 default: return;
4461 }
4462}
4463
4464/* 32-bit pairwise ops end up the same as the elementwise versions. */
4465#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4466#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4467#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4468#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4469
ad69471c
PB
4470#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4471 switch ((size << 1) | u) { \
4472 case 0: \
dd8fbd78 4473 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4474 break; \
4475 case 1: \
dd8fbd78 4476 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4477 break; \
4478 case 2: \
dd8fbd78 4479 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4480 break; \
4481 case 3: \
dd8fbd78 4482 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4483 break; \
4484 case 4: \
dd8fbd78 4485 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4486 break; \
4487 case 5: \
dd8fbd78 4488 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4489 break; \
4490 default: return 1; \
4491 }} while (0)
9ee6e8bb
PB
4492
4493#define GEN_NEON_INTEGER_OP(name) do { \
4494 switch ((size << 1) | u) { \
ad69471c 4495 case 0: \
dd8fbd78 4496 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4497 break; \
4498 case 1: \
dd8fbd78 4499 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4500 break; \
4501 case 2: \
dd8fbd78 4502 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4503 break; \
4504 case 3: \
dd8fbd78 4505 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4506 break; \
4507 case 4: \
dd8fbd78 4508 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4509 break; \
4510 case 5: \
dd8fbd78 4511 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4512 break; \
9ee6e8bb
PB
4513 default: return 1; \
4514 }} while (0)
4515
39d5492a 4516static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4517{
39d5492a 4518 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4519 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4520 return tmp;
9ee6e8bb
PB
4521}
4522
39d5492a 4523static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4524{
dd8fbd78 4525 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4526 tcg_temp_free_i32(var);
9ee6e8bb
PB
4527}
4528
39d5492a 4529static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4530{
39d5492a 4531 TCGv_i32 tmp;
9ee6e8bb 4532 if (size == 1) {
0fad6efc
PM
4533 tmp = neon_load_reg(reg & 7, reg >> 4);
4534 if (reg & 8) {
dd8fbd78 4535 gen_neon_dup_high16(tmp);
0fad6efc
PM
4536 } else {
4537 gen_neon_dup_low16(tmp);
dd8fbd78 4538 }
0fad6efc
PM
4539 } else {
4540 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4541 }
dd8fbd78 4542 return tmp;
9ee6e8bb
PB
4543}
4544
02acedf9 4545static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4546{
39d5492a 4547 TCGv_i32 tmp, tmp2;
600b828c 4548 if (!q && size == 2) {
02acedf9
PM
4549 return 1;
4550 }
4551 tmp = tcg_const_i32(rd);
4552 tmp2 = tcg_const_i32(rm);
4553 if (q) {
4554 switch (size) {
4555 case 0:
02da0b2d 4556 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4557 break;
4558 case 1:
02da0b2d 4559 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4560 break;
4561 case 2:
02da0b2d 4562 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4563 break;
4564 default:
4565 abort();
4566 }
4567 } else {
4568 switch (size) {
4569 case 0:
02da0b2d 4570 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4571 break;
4572 case 1:
02da0b2d 4573 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4574 break;
4575 default:
4576 abort();
4577 }
4578 }
4579 tcg_temp_free_i32(tmp);
4580 tcg_temp_free_i32(tmp2);
4581 return 0;
19457615
FN
4582}
4583
d68a6f3a 4584static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4585{
39d5492a 4586 TCGv_i32 tmp, tmp2;
600b828c 4587 if (!q && size == 2) {
d68a6f3a
PM
4588 return 1;
4589 }
4590 tmp = tcg_const_i32(rd);
4591 tmp2 = tcg_const_i32(rm);
4592 if (q) {
4593 switch (size) {
4594 case 0:
02da0b2d 4595 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4596 break;
4597 case 1:
02da0b2d 4598 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4599 break;
4600 case 2:
02da0b2d 4601 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4602 break;
4603 default:
4604 abort();
4605 }
4606 } else {
4607 switch (size) {
4608 case 0:
02da0b2d 4609 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4610 break;
4611 case 1:
02da0b2d 4612 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4613 break;
4614 default:
4615 abort();
4616 }
4617 }
4618 tcg_temp_free_i32(tmp);
4619 tcg_temp_free_i32(tmp2);
4620 return 0;
19457615
FN
4621}
4622
39d5492a 4623static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4624{
39d5492a 4625 TCGv_i32 rd, tmp;
19457615 4626
7d1b0095
PM
4627 rd = tcg_temp_new_i32();
4628 tmp = tcg_temp_new_i32();
19457615
FN
4629
4630 tcg_gen_shli_i32(rd, t0, 8);
4631 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4632 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4633 tcg_gen_or_i32(rd, rd, tmp);
4634
4635 tcg_gen_shri_i32(t1, t1, 8);
4636 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4637 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4638 tcg_gen_or_i32(t1, t1, tmp);
4639 tcg_gen_mov_i32(t0, rd);
4640
7d1b0095
PM
4641 tcg_temp_free_i32(tmp);
4642 tcg_temp_free_i32(rd);
19457615
FN
4643}
4644
39d5492a 4645static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4646{
39d5492a 4647 TCGv_i32 rd, tmp;
19457615 4648
7d1b0095
PM
4649 rd = tcg_temp_new_i32();
4650 tmp = tcg_temp_new_i32();
19457615
FN
4651
4652 tcg_gen_shli_i32(rd, t0, 16);
4653 tcg_gen_andi_i32(tmp, t1, 0xffff);
4654 tcg_gen_or_i32(rd, rd, tmp);
4655 tcg_gen_shri_i32(t1, t1, 16);
4656 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4657 tcg_gen_or_i32(t1, t1, tmp);
4658 tcg_gen_mov_i32(t0, rd);
4659
7d1b0095
PM
4660 tcg_temp_free_i32(tmp);
4661 tcg_temp_free_i32(rd);
19457615
FN
4662}
4663
4664
9ee6e8bb
PB
4665static struct {
4666 int nregs;
4667 int interleave;
4668 int spacing;
4669} neon_ls_element_type[11] = {
4670 {4, 4, 1},
4671 {4, 4, 2},
4672 {4, 1, 1},
4673 {4, 2, 1},
4674 {3, 3, 1},
4675 {3, 3, 2},
4676 {3, 1, 1},
4677 {1, 1, 1},
4678 {2, 2, 1},
4679 {2, 2, 2},
4680 {2, 1, 1}
4681};
4682
4683/* Translate a NEON load/store element instruction. Return nonzero if the
4684 instruction is invalid. */
7dcc1f89 4685static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4686{
4687 int rd, rn, rm;
4688 int op;
4689 int nregs;
4690 int interleave;
84496233 4691 int spacing;
9ee6e8bb
PB
4692 int stride;
4693 int size;
4694 int reg;
4695 int pass;
4696 int load;
4697 int shift;
9ee6e8bb 4698 int n;
39d5492a
PM
4699 TCGv_i32 addr;
4700 TCGv_i32 tmp;
4701 TCGv_i32 tmp2;
84496233 4702 TCGv_i64 tmp64;
9ee6e8bb 4703
2c7ffc41
PM
4704 /* FIXME: this access check should not take precedence over UNDEF
4705 * for invalid encodings; we will generate incorrect syndrome information
4706 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4707 */
9dbbc748 4708 if (s->fp_excp_el) {
2c7ffc41 4709 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 4710 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
4711 return 0;
4712 }
4713
5df8bac1 4714 if (!s->vfp_enabled)
9ee6e8bb
PB
4715 return 1;
4716 VFP_DREG_D(rd, insn);
4717 rn = (insn >> 16) & 0xf;
4718 rm = insn & 0xf;
4719 load = (insn & (1 << 21)) != 0;
4720 if ((insn & (1 << 23)) == 0) {
4721 /* Load store all elements. */
4722 op = (insn >> 8) & 0xf;
4723 size = (insn >> 6) & 3;
84496233 4724 if (op > 10)
9ee6e8bb 4725 return 1;
f2dd89d0
PM
4726 /* Catch UNDEF cases for bad values of align field */
4727 switch (op & 0xc) {
4728 case 4:
4729 if (((insn >> 5) & 1) == 1) {
4730 return 1;
4731 }
4732 break;
4733 case 8:
4734 if (((insn >> 4) & 3) == 3) {
4735 return 1;
4736 }
4737 break;
4738 default:
4739 break;
4740 }
9ee6e8bb
PB
4741 nregs = neon_ls_element_type[op].nregs;
4742 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4743 spacing = neon_ls_element_type[op].spacing;
4744 if (size == 3 && (interleave | spacing) != 1)
4745 return 1;
e318a60b 4746 addr = tcg_temp_new_i32();
dcc65026 4747 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4748 stride = (1 << size) * interleave;
4749 for (reg = 0; reg < nregs; reg++) {
4750 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4751 load_reg_var(s, addr, rn);
4752 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4753 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4754 load_reg_var(s, addr, rn);
4755 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4756 }
84496233 4757 if (size == 3) {
8ed1237d 4758 tmp64 = tcg_temp_new_i64();
84496233 4759 if (load) {
12dcc321 4760 gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
84496233 4761 neon_store_reg64(tmp64, rd);
84496233 4762 } else {
84496233 4763 neon_load_reg64(tmp64, rd);
12dcc321 4764 gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
84496233 4765 }
8ed1237d 4766 tcg_temp_free_i64(tmp64);
84496233
JR
4767 tcg_gen_addi_i32(addr, addr, stride);
4768 } else {
4769 for (pass = 0; pass < 2; pass++) {
4770 if (size == 2) {
4771 if (load) {
58ab8e96 4772 tmp = tcg_temp_new_i32();
12dcc321 4773 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
84496233
JR
4774 neon_store_reg(rd, pass, tmp);
4775 } else {
4776 tmp = neon_load_reg(rd, pass);
12dcc321 4777 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
58ab8e96 4778 tcg_temp_free_i32(tmp);
84496233 4779 }
1b2b1e54 4780 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4781 } else if (size == 1) {
4782 if (load) {
58ab8e96 4783 tmp = tcg_temp_new_i32();
12dcc321 4784 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
84496233 4785 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4786 tmp2 = tcg_temp_new_i32();
12dcc321 4787 gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
84496233 4788 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4789 tcg_gen_shli_i32(tmp2, tmp2, 16);
4790 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4791 tcg_temp_free_i32(tmp2);
84496233
JR
4792 neon_store_reg(rd, pass, tmp);
4793 } else {
4794 tmp = neon_load_reg(rd, pass);
7d1b0095 4795 tmp2 = tcg_temp_new_i32();
84496233 4796 tcg_gen_shri_i32(tmp2, tmp, 16);
12dcc321 4797 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
58ab8e96 4798 tcg_temp_free_i32(tmp);
84496233 4799 tcg_gen_addi_i32(addr, addr, stride);
12dcc321 4800 gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
58ab8e96 4801 tcg_temp_free_i32(tmp2);
1b2b1e54 4802 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4803 }
84496233
JR
4804 } else /* size == 0 */ {
4805 if (load) {
39d5492a 4806 TCGV_UNUSED_I32(tmp2);
84496233 4807 for (n = 0; n < 4; n++) {
58ab8e96 4808 tmp = tcg_temp_new_i32();
12dcc321 4809 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
84496233
JR
4810 tcg_gen_addi_i32(addr, addr, stride);
4811 if (n == 0) {
4812 tmp2 = tmp;
4813 } else {
41ba8341
PB
4814 tcg_gen_shli_i32(tmp, tmp, n * 8);
4815 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4816 tcg_temp_free_i32(tmp);
84496233 4817 }
9ee6e8bb 4818 }
84496233
JR
4819 neon_store_reg(rd, pass, tmp2);
4820 } else {
4821 tmp2 = neon_load_reg(rd, pass);
4822 for (n = 0; n < 4; n++) {
7d1b0095 4823 tmp = tcg_temp_new_i32();
84496233
JR
4824 if (n == 0) {
4825 tcg_gen_mov_i32(tmp, tmp2);
4826 } else {
4827 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4828 }
12dcc321 4829 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
58ab8e96 4830 tcg_temp_free_i32(tmp);
84496233
JR
4831 tcg_gen_addi_i32(addr, addr, stride);
4832 }
7d1b0095 4833 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4834 }
4835 }
4836 }
4837 }
84496233 4838 rd += spacing;
9ee6e8bb 4839 }
e318a60b 4840 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4841 stride = nregs * 8;
4842 } else {
4843 size = (insn >> 10) & 3;
4844 if (size == 3) {
4845 /* Load single element to all lanes. */
8e18cde3
PM
4846 int a = (insn >> 4) & 1;
4847 if (!load) {
9ee6e8bb 4848 return 1;
8e18cde3 4849 }
9ee6e8bb
PB
4850 size = (insn >> 6) & 3;
4851 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4852
4853 if (size == 3) {
4854 if (nregs != 4 || a == 0) {
9ee6e8bb 4855 return 1;
99c475ab 4856 }
8e18cde3
PM
4857 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4858 size = 2;
4859 }
4860 if (nregs == 1 && a == 1 && size == 0) {
4861 return 1;
4862 }
4863 if (nregs == 3 && a == 1) {
4864 return 1;
4865 }
e318a60b 4866 addr = tcg_temp_new_i32();
8e18cde3
PM
4867 load_reg_var(s, addr, rn);
4868 if (nregs == 1) {
4869 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4870 tmp = gen_load_and_replicate(s, addr, size);
4871 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4872 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4873 if (insn & (1 << 5)) {
4874 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4875 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4876 }
4877 tcg_temp_free_i32(tmp);
4878 } else {
4879 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4880 stride = (insn & (1 << 5)) ? 2 : 1;
4881 for (reg = 0; reg < nregs; reg++) {
4882 tmp = gen_load_and_replicate(s, addr, size);
4883 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4884 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4885 tcg_temp_free_i32(tmp);
4886 tcg_gen_addi_i32(addr, addr, 1 << size);
4887 rd += stride;
4888 }
9ee6e8bb 4889 }
e318a60b 4890 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4891 stride = (1 << size) * nregs;
4892 } else {
4893 /* Single element. */
93262b16 4894 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4895 pass = (insn >> 7) & 1;
4896 switch (size) {
4897 case 0:
4898 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4899 stride = 1;
4900 break;
4901 case 1:
4902 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4903 stride = (insn & (1 << 5)) ? 2 : 1;
4904 break;
4905 case 2:
4906 shift = 0;
9ee6e8bb
PB
4907 stride = (insn & (1 << 6)) ? 2 : 1;
4908 break;
4909 default:
4910 abort();
4911 }
4912 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4913 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4914 switch (nregs) {
4915 case 1:
4916 if (((idx & (1 << size)) != 0) ||
4917 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4918 return 1;
4919 }
4920 break;
4921 case 3:
4922 if ((idx & 1) != 0) {
4923 return 1;
4924 }
4925 /* fall through */
4926 case 2:
4927 if (size == 2 && (idx & 2) != 0) {
4928 return 1;
4929 }
4930 break;
4931 case 4:
4932 if ((size == 2) && ((idx & 3) == 3)) {
4933 return 1;
4934 }
4935 break;
4936 default:
4937 abort();
4938 }
4939 if ((rd + stride * (nregs - 1)) > 31) {
4940 /* Attempts to write off the end of the register file
4941 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4942 * the neon_load_reg() would write off the end of the array.
4943 */
4944 return 1;
4945 }
e318a60b 4946 addr = tcg_temp_new_i32();
dcc65026 4947 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4948 for (reg = 0; reg < nregs; reg++) {
4949 if (load) {
58ab8e96 4950 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4951 switch (size) {
4952 case 0:
12dcc321 4953 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4954 break;
4955 case 1:
12dcc321 4956 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4957 break;
4958 case 2:
12dcc321 4959 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4960 break;
a50f5b91
PB
4961 default: /* Avoid compiler warnings. */
4962 abort();
9ee6e8bb
PB
4963 }
4964 if (size != 2) {
8f8e3aa4 4965 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4966 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4967 shift, size ? 16 : 8);
7d1b0095 4968 tcg_temp_free_i32(tmp2);
9ee6e8bb 4969 }
8f8e3aa4 4970 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4971 } else { /* Store */
8f8e3aa4
PB
4972 tmp = neon_load_reg(rd, pass);
4973 if (shift)
4974 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4975 switch (size) {
4976 case 0:
12dcc321 4977 gen_aa32_st8(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4978 break;
4979 case 1:
12dcc321 4980 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4981 break;
4982 case 2:
12dcc321 4983 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
9ee6e8bb 4984 break;
99c475ab 4985 }
58ab8e96 4986 tcg_temp_free_i32(tmp);
99c475ab 4987 }
9ee6e8bb 4988 rd += stride;
1b2b1e54 4989 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4990 }
e318a60b 4991 tcg_temp_free_i32(addr);
9ee6e8bb 4992 stride = nregs * (1 << size);
99c475ab 4993 }
9ee6e8bb
PB
4994 }
4995 if (rm != 15) {
39d5492a 4996 TCGv_i32 base;
b26eefb6
PB
4997
4998 base = load_reg(s, rn);
9ee6e8bb 4999 if (rm == 13) {
b26eefb6 5000 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 5001 } else {
39d5492a 5002 TCGv_i32 index;
b26eefb6
PB
5003 index = load_reg(s, rm);
5004 tcg_gen_add_i32(base, base, index);
7d1b0095 5005 tcg_temp_free_i32(index);
9ee6e8bb 5006 }
b26eefb6 5007 store_reg(s, rn, base);
9ee6e8bb
PB
5008 }
5009 return 0;
5010}
3b46e624 5011
8f8e3aa4 5012/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 5013static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
5014{
5015 tcg_gen_and_i32(t, t, c);
f669df27 5016 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
5017 tcg_gen_or_i32(dest, t, f);
5018}
5019
39d5492a 5020static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5021{
5022 switch (size) {
5023 case 0: gen_helper_neon_narrow_u8(dest, src); break;
5024 case 1: gen_helper_neon_narrow_u16(dest, src); break;
ecc7b3aa 5025 case 2: tcg_gen_extrl_i64_i32(dest, src); break;
ad69471c
PB
5026 default: abort();
5027 }
5028}
5029
39d5492a 5030static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5031{
5032 switch (size) {
02da0b2d
PM
5033 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
5034 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
5035 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
5036 default: abort();
5037 }
5038}
5039
39d5492a 5040static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
5041{
5042 switch (size) {
02da0b2d
PM
5043 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
5044 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
5045 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
5046 default: abort();
5047 }
5048}
5049
39d5492a 5050static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
5051{
5052 switch (size) {
02da0b2d
PM
5053 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
5054 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
5055 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
5056 default: abort();
5057 }
5058}
5059
39d5492a 5060static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
5061 int q, int u)
5062{
5063 if (q) {
5064 if (u) {
5065 switch (size) {
5066 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
5067 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
5068 default: abort();
5069 }
5070 } else {
5071 switch (size) {
5072 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
5073 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
5074 default: abort();
5075 }
5076 }
5077 } else {
5078 if (u) {
5079 switch (size) {
b408a9b0
CL
5080 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
5081 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
5082 default: abort();
5083 }
5084 } else {
5085 switch (size) {
5086 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
5087 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
5088 default: abort();
5089 }
5090 }
5091 }
5092}
5093
39d5492a 5094static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
5095{
5096 if (u) {
5097 switch (size) {
5098 case 0: gen_helper_neon_widen_u8(dest, src); break;
5099 case 1: gen_helper_neon_widen_u16(dest, src); break;
5100 case 2: tcg_gen_extu_i32_i64(dest, src); break;
5101 default: abort();
5102 }
5103 } else {
5104 switch (size) {
5105 case 0: gen_helper_neon_widen_s8(dest, src); break;
5106 case 1: gen_helper_neon_widen_s16(dest, src); break;
5107 case 2: tcg_gen_ext_i32_i64(dest, src); break;
5108 default: abort();
5109 }
5110 }
7d1b0095 5111 tcg_temp_free_i32(src);
ad69471c
PB
5112}
5113
5114static inline void gen_neon_addl(int size)
5115{
5116 switch (size) {
5117 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
5118 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
5119 case 2: tcg_gen_add_i64(CPU_V001); break;
5120 default: abort();
5121 }
5122}
5123
5124static inline void gen_neon_subl(int size)
5125{
5126 switch (size) {
5127 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
5128 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
5129 case 2: tcg_gen_sub_i64(CPU_V001); break;
5130 default: abort();
5131 }
5132}
5133
a7812ae4 5134static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
5135{
5136 switch (size) {
5137 case 0: gen_helper_neon_negl_u16(var, var); break;
5138 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
5139 case 2:
5140 tcg_gen_neg_i64(var, var);
5141 break;
ad69471c
PB
5142 default: abort();
5143 }
5144}
5145
a7812ae4 5146static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
5147{
5148 switch (size) {
02da0b2d
PM
5149 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
5150 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
5151 default: abort();
5152 }
5153}
5154
39d5492a
PM
5155static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
5156 int size, int u)
ad69471c 5157{
a7812ae4 5158 TCGv_i64 tmp;
ad69471c
PB
5159
5160 switch ((size << 1) | u) {
5161 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
5162 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
5163 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
5164 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
5165 case 4:
5166 tmp = gen_muls_i64_i32(a, b);
5167 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5168 tcg_temp_free_i64(tmp);
ad69471c
PB
5169 break;
5170 case 5:
5171 tmp = gen_mulu_i64_i32(a, b);
5172 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 5173 tcg_temp_free_i64(tmp);
ad69471c
PB
5174 break;
5175 default: abort();
5176 }
c6067f04
CL
5177
5178 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
5179 Don't forget to clean them now. */
5180 if (size < 2) {
7d1b0095
PM
5181 tcg_temp_free_i32(a);
5182 tcg_temp_free_i32(b);
c6067f04 5183 }
ad69471c
PB
5184}
5185
39d5492a
PM
5186static void gen_neon_narrow_op(int op, int u, int size,
5187 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
5188{
5189 if (op) {
5190 if (u) {
5191 gen_neon_unarrow_sats(size, dest, src);
5192 } else {
5193 gen_neon_narrow(size, dest, src);
5194 }
5195 } else {
5196 if (u) {
5197 gen_neon_narrow_satu(size, dest, src);
5198 } else {
5199 gen_neon_narrow_sats(size, dest, src);
5200 }
5201 }
5202}
5203
62698be3
PM
5204/* Symbolic constants for op fields for Neon 3-register same-length.
5205 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
5206 * table A7-9.
5207 */
5208#define NEON_3R_VHADD 0
5209#define NEON_3R_VQADD 1
5210#define NEON_3R_VRHADD 2
5211#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
5212#define NEON_3R_VHSUB 4
5213#define NEON_3R_VQSUB 5
5214#define NEON_3R_VCGT 6
5215#define NEON_3R_VCGE 7
5216#define NEON_3R_VSHL 8
5217#define NEON_3R_VQSHL 9
5218#define NEON_3R_VRSHL 10
5219#define NEON_3R_VQRSHL 11
5220#define NEON_3R_VMAX 12
5221#define NEON_3R_VMIN 13
5222#define NEON_3R_VABD 14
5223#define NEON_3R_VABA 15
5224#define NEON_3R_VADD_VSUB 16
5225#define NEON_3R_VTST_VCEQ 17
5226#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
5227#define NEON_3R_VMUL 19
5228#define NEON_3R_VPMAX 20
5229#define NEON_3R_VPMIN 21
5230#define NEON_3R_VQDMULH_VQRDMULH 22
5231#define NEON_3R_VPADD 23
f1ecb913 5232#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 5233#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
5234#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
5235#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
5236#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
5237#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
5238#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 5239#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
5240
5241static const uint8_t neon_3r_sizes[] = {
5242 [NEON_3R_VHADD] = 0x7,
5243 [NEON_3R_VQADD] = 0xf,
5244 [NEON_3R_VRHADD] = 0x7,
5245 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
5246 [NEON_3R_VHSUB] = 0x7,
5247 [NEON_3R_VQSUB] = 0xf,
5248 [NEON_3R_VCGT] = 0x7,
5249 [NEON_3R_VCGE] = 0x7,
5250 [NEON_3R_VSHL] = 0xf,
5251 [NEON_3R_VQSHL] = 0xf,
5252 [NEON_3R_VRSHL] = 0xf,
5253 [NEON_3R_VQRSHL] = 0xf,
5254 [NEON_3R_VMAX] = 0x7,
5255 [NEON_3R_VMIN] = 0x7,
5256 [NEON_3R_VABD] = 0x7,
5257 [NEON_3R_VABA] = 0x7,
5258 [NEON_3R_VADD_VSUB] = 0xf,
5259 [NEON_3R_VTST_VCEQ] = 0x7,
5260 [NEON_3R_VML] = 0x7,
5261 [NEON_3R_VMUL] = 0x7,
5262 [NEON_3R_VPMAX] = 0x7,
5263 [NEON_3R_VPMIN] = 0x7,
5264 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
5265 [NEON_3R_VPADD] = 0x7,
f1ecb913 5266 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 5267 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5268 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
5269 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
5270 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
5271 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
5272 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 5273 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
5274};
5275
600b828c
PM
5276/* Symbolic constants for op fields for Neon 2-register miscellaneous.
5277 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
5278 * table A7-13.
5279 */
5280#define NEON_2RM_VREV64 0
5281#define NEON_2RM_VREV32 1
5282#define NEON_2RM_VREV16 2
5283#define NEON_2RM_VPADDL 4
5284#define NEON_2RM_VPADDL_U 5
9d935509
AB
5285#define NEON_2RM_AESE 6 /* Includes AESD */
5286#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
5287#define NEON_2RM_VCLS 8
5288#define NEON_2RM_VCLZ 9
5289#define NEON_2RM_VCNT 10
5290#define NEON_2RM_VMVN 11
5291#define NEON_2RM_VPADAL 12
5292#define NEON_2RM_VPADAL_U 13
5293#define NEON_2RM_VQABS 14
5294#define NEON_2RM_VQNEG 15
5295#define NEON_2RM_VCGT0 16
5296#define NEON_2RM_VCGE0 17
5297#define NEON_2RM_VCEQ0 18
5298#define NEON_2RM_VCLE0 19
5299#define NEON_2RM_VCLT0 20
f1ecb913 5300#define NEON_2RM_SHA1H 21
600b828c
PM
5301#define NEON_2RM_VABS 22
5302#define NEON_2RM_VNEG 23
5303#define NEON_2RM_VCGT0_F 24
5304#define NEON_2RM_VCGE0_F 25
5305#define NEON_2RM_VCEQ0_F 26
5306#define NEON_2RM_VCLE0_F 27
5307#define NEON_2RM_VCLT0_F 28
5308#define NEON_2RM_VABS_F 30
5309#define NEON_2RM_VNEG_F 31
5310#define NEON_2RM_VSWP 32
5311#define NEON_2RM_VTRN 33
5312#define NEON_2RM_VUZP 34
5313#define NEON_2RM_VZIP 35
5314#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
5315#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
5316#define NEON_2RM_VSHLL 38
f1ecb913 5317#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 5318#define NEON_2RM_VRINTN 40
2ce70625 5319#define NEON_2RM_VRINTX 41
34f7b0a2
WN
5320#define NEON_2RM_VRINTA 42
5321#define NEON_2RM_VRINTZ 43
600b828c 5322#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 5323#define NEON_2RM_VRINTM 45
600b828c 5324#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 5325#define NEON_2RM_VRINTP 47
901ad525
WN
5326#define NEON_2RM_VCVTAU 48
5327#define NEON_2RM_VCVTAS 49
5328#define NEON_2RM_VCVTNU 50
5329#define NEON_2RM_VCVTNS 51
5330#define NEON_2RM_VCVTPU 52
5331#define NEON_2RM_VCVTPS 53
5332#define NEON_2RM_VCVTMU 54
5333#define NEON_2RM_VCVTMS 55
600b828c
PM
5334#define NEON_2RM_VRECPE 56
5335#define NEON_2RM_VRSQRTE 57
5336#define NEON_2RM_VRECPE_F 58
5337#define NEON_2RM_VRSQRTE_F 59
5338#define NEON_2RM_VCVT_FS 60
5339#define NEON_2RM_VCVT_FU 61
5340#define NEON_2RM_VCVT_SF 62
5341#define NEON_2RM_VCVT_UF 63
5342
5343static int neon_2rm_is_float_op(int op)
5344{
5345 /* Return true if this neon 2reg-misc op is float-to-float */
5346 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 5347 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
5348 op == NEON_2RM_VRINTM ||
5349 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5350 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5351}
5352
fe8fcf3d
PM
5353static bool neon_2rm_is_v8_op(int op)
5354{
5355 /* Return true if this neon 2reg-misc op is ARMv8 and up */
5356 switch (op) {
5357 case NEON_2RM_VRINTN:
5358 case NEON_2RM_VRINTA:
5359 case NEON_2RM_VRINTM:
5360 case NEON_2RM_VRINTP:
5361 case NEON_2RM_VRINTZ:
5362 case NEON_2RM_VRINTX:
5363 case NEON_2RM_VCVTAU:
5364 case NEON_2RM_VCVTAS:
5365 case NEON_2RM_VCVTNU:
5366 case NEON_2RM_VCVTNS:
5367 case NEON_2RM_VCVTPU:
5368 case NEON_2RM_VCVTPS:
5369 case NEON_2RM_VCVTMU:
5370 case NEON_2RM_VCVTMS:
5371 return true;
5372 default:
5373 return false;
5374 }
5375}
5376
600b828c
PM
5377/* Each entry in this array has bit n set if the insn allows
5378 * size value n (otherwise it will UNDEF). Since unallocated
5379 * op values will have no bits set they always UNDEF.
5380 */
5381static const uint8_t neon_2rm_sizes[] = {
5382 [NEON_2RM_VREV64] = 0x7,
5383 [NEON_2RM_VREV32] = 0x3,
5384 [NEON_2RM_VREV16] = 0x1,
5385 [NEON_2RM_VPADDL] = 0x7,
5386 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5387 [NEON_2RM_AESE] = 0x1,
5388 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5389 [NEON_2RM_VCLS] = 0x7,
5390 [NEON_2RM_VCLZ] = 0x7,
5391 [NEON_2RM_VCNT] = 0x1,
5392 [NEON_2RM_VMVN] = 0x1,
5393 [NEON_2RM_VPADAL] = 0x7,
5394 [NEON_2RM_VPADAL_U] = 0x7,
5395 [NEON_2RM_VQABS] = 0x7,
5396 [NEON_2RM_VQNEG] = 0x7,
5397 [NEON_2RM_VCGT0] = 0x7,
5398 [NEON_2RM_VCGE0] = 0x7,
5399 [NEON_2RM_VCEQ0] = 0x7,
5400 [NEON_2RM_VCLE0] = 0x7,
5401 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5402 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5403 [NEON_2RM_VABS] = 0x7,
5404 [NEON_2RM_VNEG] = 0x7,
5405 [NEON_2RM_VCGT0_F] = 0x4,
5406 [NEON_2RM_VCGE0_F] = 0x4,
5407 [NEON_2RM_VCEQ0_F] = 0x4,
5408 [NEON_2RM_VCLE0_F] = 0x4,
5409 [NEON_2RM_VCLT0_F] = 0x4,
5410 [NEON_2RM_VABS_F] = 0x4,
5411 [NEON_2RM_VNEG_F] = 0x4,
5412 [NEON_2RM_VSWP] = 0x1,
5413 [NEON_2RM_VTRN] = 0x7,
5414 [NEON_2RM_VUZP] = 0x7,
5415 [NEON_2RM_VZIP] = 0x7,
5416 [NEON_2RM_VMOVN] = 0x7,
5417 [NEON_2RM_VQMOVN] = 0x7,
5418 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5419 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5420 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5421 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5422 [NEON_2RM_VRINTA] = 0x4,
5423 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5424 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5425 [NEON_2RM_VRINTM] = 0x4,
600b828c 5426 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5427 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5428 [NEON_2RM_VCVTAU] = 0x4,
5429 [NEON_2RM_VCVTAS] = 0x4,
5430 [NEON_2RM_VCVTNU] = 0x4,
5431 [NEON_2RM_VCVTNS] = 0x4,
5432 [NEON_2RM_VCVTPU] = 0x4,
5433 [NEON_2RM_VCVTPS] = 0x4,
5434 [NEON_2RM_VCVTMU] = 0x4,
5435 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5436 [NEON_2RM_VRECPE] = 0x4,
5437 [NEON_2RM_VRSQRTE] = 0x4,
5438 [NEON_2RM_VRECPE_F] = 0x4,
5439 [NEON_2RM_VRSQRTE_F] = 0x4,
5440 [NEON_2RM_VCVT_FS] = 0x4,
5441 [NEON_2RM_VCVT_FU] = 0x4,
5442 [NEON_2RM_VCVT_SF] = 0x4,
5443 [NEON_2RM_VCVT_UF] = 0x4,
5444};
5445
9ee6e8bb
PB
5446/* Translate a NEON data processing instruction. Return nonzero if the
5447 instruction is invalid.
ad69471c
PB
5448 We process data in a mixture of 32-bit and 64-bit chunks.
5449 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5450
7dcc1f89 5451static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5452{
5453 int op;
5454 int q;
5455 int rd, rn, rm;
5456 int size;
5457 int shift;
5458 int pass;
5459 int count;
5460 int pairwise;
5461 int u;
ca9a32e4 5462 uint32_t imm, mask;
39d5492a 5463 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5464 TCGv_i64 tmp64;
9ee6e8bb 5465
2c7ffc41
PM
5466 /* FIXME: this access check should not take precedence over UNDEF
5467 * for invalid encodings; we will generate incorrect syndrome information
5468 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5469 */
9dbbc748 5470 if (s->fp_excp_el) {
2c7ffc41 5471 gen_exception_insn(s, 4, EXCP_UDEF,
7d197d2d 5472 syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
2c7ffc41
PM
5473 return 0;
5474 }
5475
5df8bac1 5476 if (!s->vfp_enabled)
9ee6e8bb
PB
5477 return 1;
5478 q = (insn & (1 << 6)) != 0;
5479 u = (insn >> 24) & 1;
5480 VFP_DREG_D(rd, insn);
5481 VFP_DREG_N(rn, insn);
5482 VFP_DREG_M(rm, insn);
5483 size = (insn >> 20) & 3;
5484 if ((insn & (1 << 23)) == 0) {
5485 /* Three register same length. */
5486 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5487 /* Catch invalid op and bad size combinations: UNDEF */
5488 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5489 return 1;
5490 }
25f84f79
PM
5491 /* All insns of this form UNDEF for either this condition or the
5492 * superset of cases "Q==1"; we catch the latter later.
5493 */
5494 if (q && ((rd | rn | rm) & 1)) {
5495 return 1;
5496 }
f1ecb913
AB
5497 /*
5498 * The SHA-1/SHA-256 3-register instructions require special treatment
5499 * here, as their size field is overloaded as an op type selector, and
5500 * they all consume their input in a single pass.
5501 */
5502 if (op == NEON_3R_SHA) {
5503 if (!q) {
5504 return 1;
5505 }
5506 if (!u) { /* SHA-1 */
d614a513 5507 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5508 return 1;
5509 }
5510 tmp = tcg_const_i32(rd);
5511 tmp2 = tcg_const_i32(rn);
5512 tmp3 = tcg_const_i32(rm);
5513 tmp4 = tcg_const_i32(size);
5514 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5515 tcg_temp_free_i32(tmp4);
5516 } else { /* SHA-256 */
d614a513 5517 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5518 return 1;
5519 }
5520 tmp = tcg_const_i32(rd);
5521 tmp2 = tcg_const_i32(rn);
5522 tmp3 = tcg_const_i32(rm);
5523 switch (size) {
5524 case 0:
5525 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5526 break;
5527 case 1:
5528 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5529 break;
5530 case 2:
5531 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5532 break;
5533 }
5534 }
5535 tcg_temp_free_i32(tmp);
5536 tcg_temp_free_i32(tmp2);
5537 tcg_temp_free_i32(tmp3);
5538 return 0;
5539 }
62698be3
PM
5540 if (size == 3 && op != NEON_3R_LOGIC) {
5541 /* 64-bit element instructions. */
9ee6e8bb 5542 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5543 neon_load_reg64(cpu_V0, rn + pass);
5544 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5545 switch (op) {
62698be3 5546 case NEON_3R_VQADD:
9ee6e8bb 5547 if (u) {
02da0b2d
PM
5548 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5549 cpu_V0, cpu_V1);
2c0262af 5550 } else {
02da0b2d
PM
5551 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5552 cpu_V0, cpu_V1);
2c0262af 5553 }
9ee6e8bb 5554 break;
62698be3 5555 case NEON_3R_VQSUB:
9ee6e8bb 5556 if (u) {
02da0b2d
PM
5557 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5558 cpu_V0, cpu_V1);
ad69471c 5559 } else {
02da0b2d
PM
5560 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5561 cpu_V0, cpu_V1);
ad69471c
PB
5562 }
5563 break;
62698be3 5564 case NEON_3R_VSHL:
ad69471c
PB
5565 if (u) {
5566 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5567 } else {
5568 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5569 }
5570 break;
62698be3 5571 case NEON_3R_VQSHL:
ad69471c 5572 if (u) {
02da0b2d
PM
5573 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5574 cpu_V1, cpu_V0);
ad69471c 5575 } else {
02da0b2d
PM
5576 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5577 cpu_V1, cpu_V0);
ad69471c
PB
5578 }
5579 break;
62698be3 5580 case NEON_3R_VRSHL:
ad69471c
PB
5581 if (u) {
5582 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5583 } else {
ad69471c
PB
5584 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5585 }
5586 break;
62698be3 5587 case NEON_3R_VQRSHL:
ad69471c 5588 if (u) {
02da0b2d
PM
5589 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5590 cpu_V1, cpu_V0);
ad69471c 5591 } else {
02da0b2d
PM
5592 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5593 cpu_V1, cpu_V0);
1e8d4eec 5594 }
9ee6e8bb 5595 break;
62698be3 5596 case NEON_3R_VADD_VSUB:
9ee6e8bb 5597 if (u) {
ad69471c 5598 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5599 } else {
ad69471c 5600 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5601 }
5602 break;
5603 default:
5604 abort();
2c0262af 5605 }
ad69471c 5606 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5607 }
9ee6e8bb 5608 return 0;
2c0262af 5609 }
25f84f79 5610 pairwise = 0;
9ee6e8bb 5611 switch (op) {
62698be3
PM
5612 case NEON_3R_VSHL:
5613 case NEON_3R_VQSHL:
5614 case NEON_3R_VRSHL:
5615 case NEON_3R_VQRSHL:
9ee6e8bb 5616 {
ad69471c
PB
5617 int rtmp;
5618 /* Shift instruction operands are reversed. */
5619 rtmp = rn;
9ee6e8bb 5620 rn = rm;
ad69471c 5621 rm = rtmp;
9ee6e8bb 5622 }
2c0262af 5623 break;
25f84f79
PM
5624 case NEON_3R_VPADD:
5625 if (u) {
5626 return 1;
5627 }
5628 /* Fall through */
62698be3
PM
5629 case NEON_3R_VPMAX:
5630 case NEON_3R_VPMIN:
9ee6e8bb 5631 pairwise = 1;
2c0262af 5632 break;
25f84f79
PM
5633 case NEON_3R_FLOAT_ARITH:
5634 pairwise = (u && size < 2); /* if VPADD (float) */
5635 break;
5636 case NEON_3R_FLOAT_MINMAX:
5637 pairwise = u; /* if VPMIN/VPMAX (float) */
5638 break;
5639 case NEON_3R_FLOAT_CMP:
5640 if (!u && size) {
5641 /* no encoding for U=0 C=1x */
5642 return 1;
5643 }
5644 break;
5645 case NEON_3R_FLOAT_ACMP:
5646 if (!u) {
5647 return 1;
5648 }
5649 break;
505935fc
WN
5650 case NEON_3R_FLOAT_MISC:
5651 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5652 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5653 return 1;
5654 }
2c0262af 5655 break;
25f84f79
PM
5656 case NEON_3R_VMUL:
5657 if (u && (size != 0)) {
5658 /* UNDEF on invalid size for polynomial subcase */
5659 return 1;
5660 }
2c0262af 5661 break;
da97f52c 5662 case NEON_3R_VFM:
d614a513 5663 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5664 return 1;
5665 }
5666 break;
9ee6e8bb 5667 default:
2c0262af 5668 break;
9ee6e8bb 5669 }
dd8fbd78 5670
25f84f79
PM
5671 if (pairwise && q) {
5672 /* All the pairwise insns UNDEF if Q is set */
5673 return 1;
5674 }
5675
9ee6e8bb
PB
5676 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5677
5678 if (pairwise) {
5679 /* Pairwise. */
a5a14945
JR
5680 if (pass < 1) {
5681 tmp = neon_load_reg(rn, 0);
5682 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5683 } else {
a5a14945
JR
5684 tmp = neon_load_reg(rm, 0);
5685 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5686 }
5687 } else {
5688 /* Elementwise. */
dd8fbd78
FN
5689 tmp = neon_load_reg(rn, pass);
5690 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5691 }
5692 switch (op) {
62698be3 5693 case NEON_3R_VHADD:
9ee6e8bb
PB
5694 GEN_NEON_INTEGER_OP(hadd);
5695 break;
62698be3 5696 case NEON_3R_VQADD:
02da0b2d 5697 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5698 break;
62698be3 5699 case NEON_3R_VRHADD:
9ee6e8bb 5700 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5701 break;
62698be3 5702 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5703 switch ((u << 2) | size) {
5704 case 0: /* VAND */
dd8fbd78 5705 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5706 break;
5707 case 1: /* BIC */
f669df27 5708 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5709 break;
5710 case 2: /* VORR */
dd8fbd78 5711 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5712 break;
5713 case 3: /* VORN */
f669df27 5714 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5715 break;
5716 case 4: /* VEOR */
dd8fbd78 5717 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5718 break;
5719 case 5: /* VBSL */
dd8fbd78
FN
5720 tmp3 = neon_load_reg(rd, pass);
5721 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5722 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5723 break;
5724 case 6: /* VBIT */
dd8fbd78
FN
5725 tmp3 = neon_load_reg(rd, pass);
5726 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5727 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5728 break;
5729 case 7: /* VBIF */
dd8fbd78
FN
5730 tmp3 = neon_load_reg(rd, pass);
5731 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5732 tcg_temp_free_i32(tmp3);
9ee6e8bb 5733 break;
2c0262af
FB
5734 }
5735 break;
62698be3 5736 case NEON_3R_VHSUB:
9ee6e8bb
PB
5737 GEN_NEON_INTEGER_OP(hsub);
5738 break;
62698be3 5739 case NEON_3R_VQSUB:
02da0b2d 5740 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5741 break;
62698be3 5742 case NEON_3R_VCGT:
9ee6e8bb
PB
5743 GEN_NEON_INTEGER_OP(cgt);
5744 break;
62698be3 5745 case NEON_3R_VCGE:
9ee6e8bb
PB
5746 GEN_NEON_INTEGER_OP(cge);
5747 break;
62698be3 5748 case NEON_3R_VSHL:
ad69471c 5749 GEN_NEON_INTEGER_OP(shl);
2c0262af 5750 break;
62698be3 5751 case NEON_3R_VQSHL:
02da0b2d 5752 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5753 break;
62698be3 5754 case NEON_3R_VRSHL:
ad69471c 5755 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5756 break;
62698be3 5757 case NEON_3R_VQRSHL:
02da0b2d 5758 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5759 break;
62698be3 5760 case NEON_3R_VMAX:
9ee6e8bb
PB
5761 GEN_NEON_INTEGER_OP(max);
5762 break;
62698be3 5763 case NEON_3R_VMIN:
9ee6e8bb
PB
5764 GEN_NEON_INTEGER_OP(min);
5765 break;
62698be3 5766 case NEON_3R_VABD:
9ee6e8bb
PB
5767 GEN_NEON_INTEGER_OP(abd);
5768 break;
62698be3 5769 case NEON_3R_VABA:
9ee6e8bb 5770 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5771 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5772 tmp2 = neon_load_reg(rd, pass);
5773 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5774 break;
62698be3 5775 case NEON_3R_VADD_VSUB:
9ee6e8bb 5776 if (!u) { /* VADD */
62698be3 5777 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5778 } else { /* VSUB */
5779 switch (size) {
dd8fbd78
FN
5780 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5781 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5782 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5783 default: abort();
9ee6e8bb
PB
5784 }
5785 }
5786 break;
62698be3 5787 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5788 if (!u) { /* VTST */
5789 switch (size) {
dd8fbd78
FN
5790 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5791 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5792 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5793 default: abort();
9ee6e8bb
PB
5794 }
5795 } else { /* VCEQ */
5796 switch (size) {
dd8fbd78
FN
5797 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5798 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5799 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5800 default: abort();
9ee6e8bb
PB
5801 }
5802 }
5803 break;
62698be3 5804 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5805 switch (size) {
dd8fbd78
FN
5806 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5807 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5808 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5809 default: abort();
9ee6e8bb 5810 }
7d1b0095 5811 tcg_temp_free_i32(tmp2);
dd8fbd78 5812 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5813 if (u) { /* VMLS */
dd8fbd78 5814 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5815 } else { /* VMLA */
dd8fbd78 5816 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5817 }
5818 break;
62698be3 5819 case NEON_3R_VMUL:
9ee6e8bb 5820 if (u) { /* polynomial */
dd8fbd78 5821 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5822 } else { /* Integer */
5823 switch (size) {
dd8fbd78
FN
5824 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5825 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5826 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5827 default: abort();
9ee6e8bb
PB
5828 }
5829 }
5830 break;
62698be3 5831 case NEON_3R_VPMAX:
9ee6e8bb
PB
5832 GEN_NEON_INTEGER_OP(pmax);
5833 break;
62698be3 5834 case NEON_3R_VPMIN:
9ee6e8bb
PB
5835 GEN_NEON_INTEGER_OP(pmin);
5836 break;
62698be3 5837 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5838 if (!u) { /* VQDMULH */
5839 switch (size) {
02da0b2d
PM
5840 case 1:
5841 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5842 break;
5843 case 2:
5844 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5845 break;
62698be3 5846 default: abort();
9ee6e8bb 5847 }
62698be3 5848 } else { /* VQRDMULH */
9ee6e8bb 5849 switch (size) {
02da0b2d
PM
5850 case 1:
5851 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5852 break;
5853 case 2:
5854 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5855 break;
62698be3 5856 default: abort();
9ee6e8bb
PB
5857 }
5858 }
5859 break;
62698be3 5860 case NEON_3R_VPADD:
9ee6e8bb 5861 switch (size) {
dd8fbd78
FN
5862 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5863 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5864 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5865 default: abort();
9ee6e8bb
PB
5866 }
5867 break;
62698be3 5868 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5869 {
5870 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5871 switch ((u << 2) | size) {
5872 case 0: /* VADD */
aa47cfdd
PM
5873 case 4: /* VPADD */
5874 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5875 break;
5876 case 2: /* VSUB */
aa47cfdd 5877 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5878 break;
5879 case 6: /* VABD */
aa47cfdd 5880 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5881 break;
5882 default:
62698be3 5883 abort();
9ee6e8bb 5884 }
aa47cfdd 5885 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5886 break;
aa47cfdd 5887 }
62698be3 5888 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5889 {
5890 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5891 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5892 if (!u) {
7d1b0095 5893 tcg_temp_free_i32(tmp2);
dd8fbd78 5894 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5895 if (size == 0) {
aa47cfdd 5896 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5897 } else {
aa47cfdd 5898 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5899 }
5900 }
aa47cfdd 5901 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5902 break;
aa47cfdd 5903 }
62698be3 5904 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5905 {
5906 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5907 if (!u) {
aa47cfdd 5908 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5909 } else {
aa47cfdd
PM
5910 if (size == 0) {
5911 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5912 } else {
5913 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5914 }
b5ff1b31 5915 }
aa47cfdd 5916 tcg_temp_free_ptr(fpstatus);
2c0262af 5917 break;
aa47cfdd 5918 }
62698be3 5919 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5920 {
5921 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5922 if (size == 0) {
5923 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5924 } else {
5925 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5926 }
5927 tcg_temp_free_ptr(fpstatus);
2c0262af 5928 break;
aa47cfdd 5929 }
62698be3 5930 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5931 {
5932 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5933 if (size == 0) {
f71a2ae5 5934 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5935 } else {
f71a2ae5 5936 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5937 }
5938 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5939 break;
aa47cfdd 5940 }
505935fc
WN
5941 case NEON_3R_FLOAT_MISC:
5942 if (u) {
5943 /* VMAXNM/VMINNM */
5944 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5945 if (size == 0) {
f71a2ae5 5946 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5947 } else {
f71a2ae5 5948 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5949 }
5950 tcg_temp_free_ptr(fpstatus);
5951 } else {
5952 if (size == 0) {
5953 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5954 } else {
5955 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5956 }
5957 }
2c0262af 5958 break;
da97f52c
PM
5959 case NEON_3R_VFM:
5960 {
5961 /* VFMA, VFMS: fused multiply-add */
5962 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5963 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5964 if (size) {
5965 /* VFMS */
5966 gen_helper_vfp_negs(tmp, tmp);
5967 }
5968 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5969 tcg_temp_free_i32(tmp3);
5970 tcg_temp_free_ptr(fpstatus);
5971 break;
5972 }
9ee6e8bb
PB
5973 default:
5974 abort();
2c0262af 5975 }
7d1b0095 5976 tcg_temp_free_i32(tmp2);
dd8fbd78 5977
9ee6e8bb
PB
5978 /* Save the result. For elementwise operations we can put it
5979 straight into the destination register. For pairwise operations
5980 we have to be careful to avoid clobbering the source operands. */
5981 if (pairwise && rd == rm) {
dd8fbd78 5982 neon_store_scratch(pass, tmp);
9ee6e8bb 5983 } else {
dd8fbd78 5984 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5985 }
5986
5987 } /* for pass */
5988 if (pairwise && rd == rm) {
5989 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5990 tmp = neon_load_scratch(pass);
5991 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5992 }
5993 }
ad69471c 5994 /* End of 3 register same size operations. */
9ee6e8bb
PB
5995 } else if (insn & (1 << 4)) {
5996 if ((insn & 0x00380080) != 0) {
5997 /* Two registers and shift. */
5998 op = (insn >> 8) & 0xf;
5999 if (insn & (1 << 7)) {
cc13115b
PM
6000 /* 64-bit shift. */
6001 if (op > 7) {
6002 return 1;
6003 }
9ee6e8bb
PB
6004 size = 3;
6005 } else {
6006 size = 2;
6007 while ((insn & (1 << (size + 19))) == 0)
6008 size--;
6009 }
6010 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 6011 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
6012 by immediate using the variable shift operations. */
6013 if (op < 8) {
6014 /* Shift by immediate:
6015 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
6016 if (q && ((rd | rm) & 1)) {
6017 return 1;
6018 }
6019 if (!u && (op == 4 || op == 6)) {
6020 return 1;
6021 }
9ee6e8bb
PB
6022 /* Right shifts are encoded as N - shift, where N is the
6023 element size in bits. */
6024 if (op <= 4)
6025 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
6026 if (size == 3) {
6027 count = q + 1;
6028 } else {
6029 count = q ? 4: 2;
6030 }
6031 switch (size) {
6032 case 0:
6033 imm = (uint8_t) shift;
6034 imm |= imm << 8;
6035 imm |= imm << 16;
6036 break;
6037 case 1:
6038 imm = (uint16_t) shift;
6039 imm |= imm << 16;
6040 break;
6041 case 2:
6042 case 3:
6043 imm = shift;
6044 break;
6045 default:
6046 abort();
6047 }
6048
6049 for (pass = 0; pass < count; pass++) {
ad69471c
PB
6050 if (size == 3) {
6051 neon_load_reg64(cpu_V0, rm + pass);
6052 tcg_gen_movi_i64(cpu_V1, imm);
6053 switch (op) {
6054 case 0: /* VSHR */
6055 case 1: /* VSRA */
6056 if (u)
6057 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6058 else
ad69471c 6059 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6060 break;
ad69471c
PB
6061 case 2: /* VRSHR */
6062 case 3: /* VRSRA */
6063 if (u)
6064 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6065 else
ad69471c 6066 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 6067 break;
ad69471c 6068 case 4: /* VSRI */
ad69471c
PB
6069 case 5: /* VSHL, VSLI */
6070 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
6071 break;
0322b26e 6072 case 6: /* VQSHLU */
02da0b2d
PM
6073 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
6074 cpu_V0, cpu_V1);
ad69471c 6075 break;
0322b26e
PM
6076 case 7: /* VQSHL */
6077 if (u) {
02da0b2d 6078 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
6079 cpu_V0, cpu_V1);
6080 } else {
02da0b2d 6081 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
6082 cpu_V0, cpu_V1);
6083 }
9ee6e8bb 6084 break;
9ee6e8bb 6085 }
ad69471c
PB
6086 if (op == 1 || op == 3) {
6087 /* Accumulate. */
5371cb81 6088 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
6089 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
6090 } else if (op == 4 || (op == 5 && u)) {
6091 /* Insert */
923e6509
CL
6092 neon_load_reg64(cpu_V1, rd + pass);
6093 uint64_t mask;
6094 if (shift < -63 || shift > 63) {
6095 mask = 0;
6096 } else {
6097 if (op == 4) {
6098 mask = 0xffffffffffffffffull >> -shift;
6099 } else {
6100 mask = 0xffffffffffffffffull << shift;
6101 }
6102 }
6103 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
6104 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
6105 }
6106 neon_store_reg64(cpu_V0, rd + pass);
6107 } else { /* size < 3 */
6108 /* Operands in T0 and T1. */
dd8fbd78 6109 tmp = neon_load_reg(rm, pass);
7d1b0095 6110 tmp2 = tcg_temp_new_i32();
dd8fbd78 6111 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
6112 switch (op) {
6113 case 0: /* VSHR */
6114 case 1: /* VSRA */
6115 GEN_NEON_INTEGER_OP(shl);
6116 break;
6117 case 2: /* VRSHR */
6118 case 3: /* VRSRA */
6119 GEN_NEON_INTEGER_OP(rshl);
6120 break;
6121 case 4: /* VSRI */
ad69471c
PB
6122 case 5: /* VSHL, VSLI */
6123 switch (size) {
dd8fbd78
FN
6124 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
6125 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
6126 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 6127 default: abort();
ad69471c
PB
6128 }
6129 break;
0322b26e 6130 case 6: /* VQSHLU */
ad69471c 6131 switch (size) {
0322b26e 6132 case 0:
02da0b2d
PM
6133 gen_helper_neon_qshlu_s8(tmp, cpu_env,
6134 tmp, tmp2);
0322b26e
PM
6135 break;
6136 case 1:
02da0b2d
PM
6137 gen_helper_neon_qshlu_s16(tmp, cpu_env,
6138 tmp, tmp2);
0322b26e
PM
6139 break;
6140 case 2:
02da0b2d
PM
6141 gen_helper_neon_qshlu_s32(tmp, cpu_env,
6142 tmp, tmp2);
0322b26e
PM
6143 break;
6144 default:
cc13115b 6145 abort();
ad69471c
PB
6146 }
6147 break;
0322b26e 6148 case 7: /* VQSHL */
02da0b2d 6149 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 6150 break;
ad69471c 6151 }
7d1b0095 6152 tcg_temp_free_i32(tmp2);
ad69471c
PB
6153
6154 if (op == 1 || op == 3) {
6155 /* Accumulate. */
dd8fbd78 6156 tmp2 = neon_load_reg(rd, pass);
5371cb81 6157 gen_neon_add(size, tmp, tmp2);
7d1b0095 6158 tcg_temp_free_i32(tmp2);
ad69471c
PB
6159 } else if (op == 4 || (op == 5 && u)) {
6160 /* Insert */
6161 switch (size) {
6162 case 0:
6163 if (op == 4)
ca9a32e4 6164 mask = 0xff >> -shift;
ad69471c 6165 else
ca9a32e4
JR
6166 mask = (uint8_t)(0xff << shift);
6167 mask |= mask << 8;
6168 mask |= mask << 16;
ad69471c
PB
6169 break;
6170 case 1:
6171 if (op == 4)
ca9a32e4 6172 mask = 0xffff >> -shift;
ad69471c 6173 else
ca9a32e4
JR
6174 mask = (uint16_t)(0xffff << shift);
6175 mask |= mask << 16;
ad69471c
PB
6176 break;
6177 case 2:
ca9a32e4
JR
6178 if (shift < -31 || shift > 31) {
6179 mask = 0;
6180 } else {
6181 if (op == 4)
6182 mask = 0xffffffffu >> -shift;
6183 else
6184 mask = 0xffffffffu << shift;
6185 }
ad69471c
PB
6186 break;
6187 default:
6188 abort();
6189 }
dd8fbd78 6190 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
6191 tcg_gen_andi_i32(tmp, tmp, mask);
6192 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 6193 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 6194 tcg_temp_free_i32(tmp2);
ad69471c 6195 }
dd8fbd78 6196 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6197 }
6198 } /* for pass */
6199 } else if (op < 10) {
ad69471c 6200 /* Shift by immediate and narrow:
9ee6e8bb 6201 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 6202 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
6203 if (rm & 1) {
6204 return 1;
6205 }
9ee6e8bb
PB
6206 shift = shift - (1 << (size + 3));
6207 size++;
92cdfaeb 6208 if (size == 3) {
a7812ae4 6209 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
6210 neon_load_reg64(cpu_V0, rm);
6211 neon_load_reg64(cpu_V1, rm + 1);
6212 for (pass = 0; pass < 2; pass++) {
6213 TCGv_i64 in;
6214 if (pass == 0) {
6215 in = cpu_V0;
6216 } else {
6217 in = cpu_V1;
6218 }
ad69471c 6219 if (q) {
0b36f4cd 6220 if (input_unsigned) {
92cdfaeb 6221 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 6222 } else {
92cdfaeb 6223 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 6224 }
ad69471c 6225 } else {
0b36f4cd 6226 if (input_unsigned) {
92cdfaeb 6227 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 6228 } else {
92cdfaeb 6229 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 6230 }
ad69471c 6231 }
7d1b0095 6232 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6233 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6234 neon_store_reg(rd, pass, tmp);
6235 } /* for pass */
6236 tcg_temp_free_i64(tmp64);
6237 } else {
6238 if (size == 1) {
6239 imm = (uint16_t)shift;
6240 imm |= imm << 16;
2c0262af 6241 } else {
92cdfaeb
PM
6242 /* size == 2 */
6243 imm = (uint32_t)shift;
6244 }
6245 tmp2 = tcg_const_i32(imm);
6246 tmp4 = neon_load_reg(rm + 1, 0);
6247 tmp5 = neon_load_reg(rm + 1, 1);
6248 for (pass = 0; pass < 2; pass++) {
6249 if (pass == 0) {
6250 tmp = neon_load_reg(rm, 0);
6251 } else {
6252 tmp = tmp4;
6253 }
0b36f4cd
CL
6254 gen_neon_shift_narrow(size, tmp, tmp2, q,
6255 input_unsigned);
92cdfaeb
PM
6256 if (pass == 0) {
6257 tmp3 = neon_load_reg(rm, 1);
6258 } else {
6259 tmp3 = tmp5;
6260 }
0b36f4cd
CL
6261 gen_neon_shift_narrow(size, tmp3, tmp2, q,
6262 input_unsigned);
36aa55dc 6263 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
6264 tcg_temp_free_i32(tmp);
6265 tcg_temp_free_i32(tmp3);
6266 tmp = tcg_temp_new_i32();
92cdfaeb
PM
6267 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
6268 neon_store_reg(rd, pass, tmp);
6269 } /* for pass */
c6067f04 6270 tcg_temp_free_i32(tmp2);
b75263d6 6271 }
9ee6e8bb 6272 } else if (op == 10) {
cc13115b
PM
6273 /* VSHLL, VMOVL */
6274 if (q || (rd & 1)) {
9ee6e8bb 6275 return 1;
cc13115b 6276 }
ad69471c
PB
6277 tmp = neon_load_reg(rm, 0);
6278 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6279 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6280 if (pass == 1)
6281 tmp = tmp2;
6282
6283 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 6284
9ee6e8bb
PB
6285 if (shift != 0) {
6286 /* The shift is less than the width of the source
ad69471c
PB
6287 type, so we can just shift the whole register. */
6288 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
6289 /* Widen the result of shift: we need to clear
6290 * the potential overflow bits resulting from
6291 * left bits of the narrow input appearing as
6292 * right bits of left the neighbour narrow
6293 * input. */
ad69471c
PB
6294 if (size < 2 || !u) {
6295 uint64_t imm64;
6296 if (size == 0) {
6297 imm = (0xffu >> (8 - shift));
6298 imm |= imm << 16;
acdf01ef 6299 } else if (size == 1) {
ad69471c 6300 imm = 0xffff >> (16 - shift);
acdf01ef
CL
6301 } else {
6302 /* size == 2 */
6303 imm = 0xffffffff >> (32 - shift);
6304 }
6305 if (size < 2) {
6306 imm64 = imm | (((uint64_t)imm) << 32);
6307 } else {
6308 imm64 = imm;
9ee6e8bb 6309 }
acdf01ef 6310 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
6311 }
6312 }
ad69471c 6313 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6314 }
f73534a5 6315 } else if (op >= 14) {
9ee6e8bb 6316 /* VCVT fixed-point. */
cc13115b
PM
6317 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
6318 return 1;
6319 }
f73534a5
PM
6320 /* We have already masked out the must-be-1 top bit of imm6,
6321 * hence this 32-shift where the ARM ARM has 64-imm6.
6322 */
6323 shift = 32 - shift;
9ee6e8bb 6324 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 6325 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 6326 if (!(op & 1)) {
9ee6e8bb 6327 if (u)
5500b06c 6328 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 6329 else
5500b06c 6330 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
6331 } else {
6332 if (u)
5500b06c 6333 gen_vfp_toul(0, shift, 1);
9ee6e8bb 6334 else
5500b06c 6335 gen_vfp_tosl(0, shift, 1);
2c0262af 6336 }
4373f3ce 6337 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
6338 }
6339 } else {
9ee6e8bb
PB
6340 return 1;
6341 }
6342 } else { /* (insn & 0x00380080) == 0 */
6343 int invert;
7d80fee5
PM
6344 if (q && (rd & 1)) {
6345 return 1;
6346 }
9ee6e8bb
PB
6347
6348 op = (insn >> 8) & 0xf;
6349 /* One register and immediate. */
6350 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
6351 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
6352 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
6353 * We choose to not special-case this and will behave as if a
6354 * valid constant encoding of 0 had been given.
6355 */
9ee6e8bb
PB
6356 switch (op) {
6357 case 0: case 1:
6358 /* no-op */
6359 break;
6360 case 2: case 3:
6361 imm <<= 8;
6362 break;
6363 case 4: case 5:
6364 imm <<= 16;
6365 break;
6366 case 6: case 7:
6367 imm <<= 24;
6368 break;
6369 case 8: case 9:
6370 imm |= imm << 16;
6371 break;
6372 case 10: case 11:
6373 imm = (imm << 8) | (imm << 24);
6374 break;
6375 case 12:
8e31209e 6376 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6377 break;
6378 case 13:
6379 imm = (imm << 16) | 0xffff;
6380 break;
6381 case 14:
6382 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6383 if (invert)
6384 imm = ~imm;
6385 break;
6386 case 15:
7d80fee5
PM
6387 if (invert) {
6388 return 1;
6389 }
9ee6e8bb
PB
6390 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6391 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6392 break;
6393 }
6394 if (invert)
6395 imm = ~imm;
6396
9ee6e8bb
PB
6397 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6398 if (op & 1 && op < 12) {
ad69471c 6399 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6400 if (invert) {
6401 /* The immediate value has already been inverted, so
6402 BIC becomes AND. */
ad69471c 6403 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6404 } else {
ad69471c 6405 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6406 }
9ee6e8bb 6407 } else {
ad69471c 6408 /* VMOV, VMVN. */
7d1b0095 6409 tmp = tcg_temp_new_i32();
9ee6e8bb 6410 if (op == 14 && invert) {
a5a14945 6411 int n;
ad69471c
PB
6412 uint32_t val;
6413 val = 0;
9ee6e8bb
PB
6414 for (n = 0; n < 4; n++) {
6415 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6416 val |= 0xff << (n * 8);
9ee6e8bb 6417 }
ad69471c
PB
6418 tcg_gen_movi_i32(tmp, val);
6419 } else {
6420 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6421 }
9ee6e8bb 6422 }
ad69471c 6423 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6424 }
6425 }
e4b3861d 6426 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6427 if (size != 3) {
6428 op = (insn >> 8) & 0xf;
6429 if ((insn & (1 << 6)) == 0) {
6430 /* Three registers of different lengths. */
6431 int src1_wide;
6432 int src2_wide;
6433 int prewiden;
526d0096
PM
6434 /* undefreq: bit 0 : UNDEF if size == 0
6435 * bit 1 : UNDEF if size == 1
6436 * bit 2 : UNDEF if size == 2
6437 * bit 3 : UNDEF if U == 1
6438 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6439 */
6440 int undefreq;
6441 /* prewiden, src1_wide, src2_wide, undefreq */
6442 static const int neon_3reg_wide[16][4] = {
6443 {1, 0, 0, 0}, /* VADDL */
6444 {1, 1, 0, 0}, /* VADDW */
6445 {1, 0, 0, 0}, /* VSUBL */
6446 {1, 1, 0, 0}, /* VSUBW */
6447 {0, 1, 1, 0}, /* VADDHN */
6448 {0, 0, 0, 0}, /* VABAL */
6449 {0, 1, 1, 0}, /* VSUBHN */
6450 {0, 0, 0, 0}, /* VABDL */
6451 {0, 0, 0, 0}, /* VMLAL */
526d0096 6452 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6453 {0, 0, 0, 0}, /* VMLSL */
526d0096 6454 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6455 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6456 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6457 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6458 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6459 };
6460
6461 prewiden = neon_3reg_wide[op][0];
6462 src1_wide = neon_3reg_wide[op][1];
6463 src2_wide = neon_3reg_wide[op][2];
695272dc 6464 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6465
526d0096
PM
6466 if ((undefreq & (1 << size)) ||
6467 ((undefreq & 8) && u)) {
695272dc
PM
6468 return 1;
6469 }
6470 if ((src1_wide && (rn & 1)) ||
6471 (src2_wide && (rm & 1)) ||
6472 (!src2_wide && (rd & 1))) {
ad69471c 6473 return 1;
695272dc 6474 }
ad69471c 6475
4e624eda
PM
6476 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6477 * outside the loop below as it only performs a single pass.
6478 */
6479 if (op == 14 && size == 2) {
6480 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6481
d614a513 6482 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6483 return 1;
6484 }
6485 tcg_rn = tcg_temp_new_i64();
6486 tcg_rm = tcg_temp_new_i64();
6487 tcg_rd = tcg_temp_new_i64();
6488 neon_load_reg64(tcg_rn, rn);
6489 neon_load_reg64(tcg_rm, rm);
6490 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6491 neon_store_reg64(tcg_rd, rd);
6492 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6493 neon_store_reg64(tcg_rd, rd + 1);
6494 tcg_temp_free_i64(tcg_rn);
6495 tcg_temp_free_i64(tcg_rm);
6496 tcg_temp_free_i64(tcg_rd);
6497 return 0;
6498 }
6499
9ee6e8bb
PB
6500 /* Avoid overlapping operands. Wide source operands are
6501 always aligned so will never overlap with wide
6502 destinations in problematic ways. */
8f8e3aa4 6503 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6504 tmp = neon_load_reg(rm, 1);
6505 neon_store_scratch(2, tmp);
8f8e3aa4 6506 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6507 tmp = neon_load_reg(rn, 1);
6508 neon_store_scratch(2, tmp);
9ee6e8bb 6509 }
39d5492a 6510 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6511 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6512 if (src1_wide) {
6513 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6514 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6515 } else {
ad69471c 6516 if (pass == 1 && rd == rn) {
dd8fbd78 6517 tmp = neon_load_scratch(2);
9ee6e8bb 6518 } else {
ad69471c
PB
6519 tmp = neon_load_reg(rn, pass);
6520 }
6521 if (prewiden) {
6522 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6523 }
6524 }
ad69471c
PB
6525 if (src2_wide) {
6526 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6527 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6528 } else {
ad69471c 6529 if (pass == 1 && rd == rm) {
dd8fbd78 6530 tmp2 = neon_load_scratch(2);
9ee6e8bb 6531 } else {
ad69471c
PB
6532 tmp2 = neon_load_reg(rm, pass);
6533 }
6534 if (prewiden) {
6535 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6536 }
9ee6e8bb
PB
6537 }
6538 switch (op) {
6539 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6540 gen_neon_addl(size);
9ee6e8bb 6541 break;
79b0e534 6542 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6543 gen_neon_subl(size);
9ee6e8bb
PB
6544 break;
6545 case 5: case 7: /* VABAL, VABDL */
6546 switch ((size << 1) | u) {
ad69471c
PB
6547 case 0:
6548 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6549 break;
6550 case 1:
6551 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6552 break;
6553 case 2:
6554 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6555 break;
6556 case 3:
6557 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6558 break;
6559 case 4:
6560 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6561 break;
6562 case 5:
6563 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6564 break;
9ee6e8bb
PB
6565 default: abort();
6566 }
7d1b0095
PM
6567 tcg_temp_free_i32(tmp2);
6568 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6569 break;
6570 case 8: case 9: case 10: case 11: case 12: case 13:
6571 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6572 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6573 break;
6574 case 14: /* Polynomial VMULL */
e5ca24cb 6575 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6576 tcg_temp_free_i32(tmp2);
6577 tcg_temp_free_i32(tmp);
e5ca24cb 6578 break;
695272dc
PM
6579 default: /* 15 is RESERVED: caught earlier */
6580 abort();
9ee6e8bb 6581 }
ebcd88ce
PM
6582 if (op == 13) {
6583 /* VQDMULL */
6584 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6585 neon_store_reg64(cpu_V0, rd + pass);
6586 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6587 /* Accumulate. */
ebcd88ce 6588 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6589 switch (op) {
4dc064e6
PM
6590 case 10: /* VMLSL */
6591 gen_neon_negl(cpu_V0, size);
6592 /* Fall through */
6593 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6594 gen_neon_addl(size);
9ee6e8bb
PB
6595 break;
6596 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6597 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6598 if (op == 11) {
6599 gen_neon_negl(cpu_V0, size);
6600 }
ad69471c
PB
6601 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6602 break;
9ee6e8bb
PB
6603 default:
6604 abort();
6605 }
ad69471c 6606 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6607 } else if (op == 4 || op == 6) {
6608 /* Narrowing operation. */
7d1b0095 6609 tmp = tcg_temp_new_i32();
79b0e534 6610 if (!u) {
9ee6e8bb 6611 switch (size) {
ad69471c
PB
6612 case 0:
6613 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6614 break;
6615 case 1:
6616 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6617 break;
6618 case 2:
6619 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6620 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6621 break;
9ee6e8bb
PB
6622 default: abort();
6623 }
6624 } else {
6625 switch (size) {
ad69471c
PB
6626 case 0:
6627 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6628 break;
6629 case 1:
6630 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6631 break;
6632 case 2:
6633 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6634 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
ecc7b3aa 6635 tcg_gen_extrl_i64_i32(tmp, cpu_V0);
ad69471c 6636 break;
9ee6e8bb
PB
6637 default: abort();
6638 }
6639 }
ad69471c
PB
6640 if (pass == 0) {
6641 tmp3 = tmp;
6642 } else {
6643 neon_store_reg(rd, 0, tmp3);
6644 neon_store_reg(rd, 1, tmp);
6645 }
9ee6e8bb
PB
6646 } else {
6647 /* Write back the result. */
ad69471c 6648 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6649 }
6650 }
6651 } else {
3e3326df
PM
6652 /* Two registers and a scalar. NB that for ops of this form
6653 * the ARM ARM labels bit 24 as Q, but it is in our variable
6654 * 'u', not 'q'.
6655 */
6656 if (size == 0) {
6657 return 1;
6658 }
9ee6e8bb 6659 switch (op) {
9ee6e8bb 6660 case 1: /* Float VMLA scalar */
9ee6e8bb 6661 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6662 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6663 if (size == 1) {
6664 return 1;
6665 }
6666 /* fall through */
6667 case 0: /* Integer VMLA scalar */
6668 case 4: /* Integer VMLS scalar */
6669 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6670 case 12: /* VQDMULH scalar */
6671 case 13: /* VQRDMULH scalar */
3e3326df
PM
6672 if (u && ((rd | rn) & 1)) {
6673 return 1;
6674 }
dd8fbd78
FN
6675 tmp = neon_get_scalar(size, rm);
6676 neon_store_scratch(0, tmp);
9ee6e8bb 6677 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6678 tmp = neon_load_scratch(0);
6679 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6680 if (op == 12) {
6681 if (size == 1) {
02da0b2d 6682 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6683 } else {
02da0b2d 6684 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6685 }
6686 } else if (op == 13) {
6687 if (size == 1) {
02da0b2d 6688 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6689 } else {
02da0b2d 6690 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6691 }
6692 } else if (op & 1) {
aa47cfdd
PM
6693 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6694 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6695 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6696 } else {
6697 switch (size) {
dd8fbd78
FN
6698 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6699 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6700 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6701 default: abort();
9ee6e8bb
PB
6702 }
6703 }
7d1b0095 6704 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6705 if (op < 8) {
6706 /* Accumulate. */
dd8fbd78 6707 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6708 switch (op) {
6709 case 0:
dd8fbd78 6710 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6711 break;
6712 case 1:
aa47cfdd
PM
6713 {
6714 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6715 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6716 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6717 break;
aa47cfdd 6718 }
9ee6e8bb 6719 case 4:
dd8fbd78 6720 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6721 break;
6722 case 5:
aa47cfdd
PM
6723 {
6724 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6725 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6726 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6727 break;
aa47cfdd 6728 }
9ee6e8bb
PB
6729 default:
6730 abort();
6731 }
7d1b0095 6732 tcg_temp_free_i32(tmp2);
9ee6e8bb 6733 }
dd8fbd78 6734 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6735 }
6736 break;
9ee6e8bb 6737 case 3: /* VQDMLAL scalar */
9ee6e8bb 6738 case 7: /* VQDMLSL scalar */
9ee6e8bb 6739 case 11: /* VQDMULL scalar */
3e3326df 6740 if (u == 1) {
ad69471c 6741 return 1;
3e3326df
PM
6742 }
6743 /* fall through */
6744 case 2: /* VMLAL sclar */
6745 case 6: /* VMLSL scalar */
6746 case 10: /* VMULL scalar */
6747 if (rd & 1) {
6748 return 1;
6749 }
dd8fbd78 6750 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6751 /* We need a copy of tmp2 because gen_neon_mull
6752 * deletes it during pass 0. */
7d1b0095 6753 tmp4 = tcg_temp_new_i32();
c6067f04 6754 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6755 tmp3 = neon_load_reg(rn, 1);
ad69471c 6756
9ee6e8bb 6757 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6758 if (pass == 0) {
6759 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6760 } else {
dd8fbd78 6761 tmp = tmp3;
c6067f04 6762 tmp2 = tmp4;
9ee6e8bb 6763 }
ad69471c 6764 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6765 if (op != 11) {
6766 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6767 }
9ee6e8bb 6768 switch (op) {
4dc064e6
PM
6769 case 6:
6770 gen_neon_negl(cpu_V0, size);
6771 /* Fall through */
6772 case 2:
ad69471c 6773 gen_neon_addl(size);
9ee6e8bb
PB
6774 break;
6775 case 3: case 7:
ad69471c 6776 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6777 if (op == 7) {
6778 gen_neon_negl(cpu_V0, size);
6779 }
ad69471c 6780 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6781 break;
6782 case 10:
6783 /* no-op */
6784 break;
6785 case 11:
ad69471c 6786 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6787 break;
6788 default:
6789 abort();
6790 }
ad69471c 6791 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6792 }
dd8fbd78 6793
dd8fbd78 6794
9ee6e8bb
PB
6795 break;
6796 default: /* 14 and 15 are RESERVED */
6797 return 1;
6798 }
6799 }
6800 } else { /* size == 3 */
6801 if (!u) {
6802 /* Extract. */
9ee6e8bb 6803 imm = (insn >> 8) & 0xf;
ad69471c
PB
6804
6805 if (imm > 7 && !q)
6806 return 1;
6807
52579ea1
PM
6808 if (q && ((rd | rn | rm) & 1)) {
6809 return 1;
6810 }
6811
ad69471c
PB
6812 if (imm == 0) {
6813 neon_load_reg64(cpu_V0, rn);
6814 if (q) {
6815 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6816 }
ad69471c
PB
6817 } else if (imm == 8) {
6818 neon_load_reg64(cpu_V0, rn + 1);
6819 if (q) {
6820 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6821 }
ad69471c 6822 } else if (q) {
a7812ae4 6823 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6824 if (imm < 8) {
6825 neon_load_reg64(cpu_V0, rn);
a7812ae4 6826 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6827 } else {
6828 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6829 neon_load_reg64(tmp64, rm);
ad69471c
PB
6830 }
6831 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6832 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6833 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6834 if (imm < 8) {
6835 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6836 } else {
ad69471c
PB
6837 neon_load_reg64(cpu_V1, rm + 1);
6838 imm -= 8;
9ee6e8bb 6839 }
ad69471c 6840 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6841 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6842 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6843 tcg_temp_free_i64(tmp64);
ad69471c 6844 } else {
a7812ae4 6845 /* BUGFIX */
ad69471c 6846 neon_load_reg64(cpu_V0, rn);
a7812ae4 6847 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6848 neon_load_reg64(cpu_V1, rm);
a7812ae4 6849 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6850 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6851 }
6852 neon_store_reg64(cpu_V0, rd);
6853 if (q) {
6854 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6855 }
6856 } else if ((insn & (1 << 11)) == 0) {
6857 /* Two register misc. */
6858 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6859 size = (insn >> 18) & 3;
600b828c
PM
6860 /* UNDEF for unknown op values and bad op-size combinations */
6861 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6862 return 1;
6863 }
fe8fcf3d
PM
6864 if (neon_2rm_is_v8_op(op) &&
6865 !arm_dc_feature(s, ARM_FEATURE_V8)) {
6866 return 1;
6867 }
fc2a9b37
PM
6868 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6869 q && ((rm | rd) & 1)) {
6870 return 1;
6871 }
9ee6e8bb 6872 switch (op) {
600b828c 6873 case NEON_2RM_VREV64:
9ee6e8bb 6874 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6875 tmp = neon_load_reg(rm, pass * 2);
6876 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6877 switch (size) {
dd8fbd78
FN
6878 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6879 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6880 case 2: /* no-op */ break;
6881 default: abort();
6882 }
dd8fbd78 6883 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6884 if (size == 2) {
dd8fbd78 6885 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6886 } else {
9ee6e8bb 6887 switch (size) {
dd8fbd78
FN
6888 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6889 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6890 default: abort();
6891 }
dd8fbd78 6892 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6893 }
6894 }
6895 break;
600b828c
PM
6896 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6897 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6898 for (pass = 0; pass < q + 1; pass++) {
6899 tmp = neon_load_reg(rm, pass * 2);
6900 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6901 tmp = neon_load_reg(rm, pass * 2 + 1);
6902 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6903 switch (size) {
6904 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6905 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6906 case 2: tcg_gen_add_i64(CPU_V001); break;
6907 default: abort();
6908 }
600b828c 6909 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6910 /* Accumulate. */
ad69471c
PB
6911 neon_load_reg64(cpu_V1, rd + pass);
6912 gen_neon_addl(size);
9ee6e8bb 6913 }
ad69471c 6914 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6915 }
6916 break;
600b828c 6917 case NEON_2RM_VTRN:
9ee6e8bb 6918 if (size == 2) {
a5a14945 6919 int n;
9ee6e8bb 6920 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6921 tmp = neon_load_reg(rm, n);
6922 tmp2 = neon_load_reg(rd, n + 1);
6923 neon_store_reg(rm, n, tmp2);
6924 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6925 }
6926 } else {
6927 goto elementwise;
6928 }
6929 break;
600b828c 6930 case NEON_2RM_VUZP:
02acedf9 6931 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6932 return 1;
9ee6e8bb
PB
6933 }
6934 break;
600b828c 6935 case NEON_2RM_VZIP:
d68a6f3a 6936 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6937 return 1;
9ee6e8bb
PB
6938 }
6939 break;
600b828c
PM
6940 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6941 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6942 if (rm & 1) {
6943 return 1;
6944 }
39d5492a 6945 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6946 for (pass = 0; pass < 2; pass++) {
ad69471c 6947 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6948 tmp = tcg_temp_new_i32();
600b828c
PM
6949 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6950 tmp, cpu_V0);
ad69471c
PB
6951 if (pass == 0) {
6952 tmp2 = tmp;
6953 } else {
6954 neon_store_reg(rd, 0, tmp2);
6955 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6956 }
9ee6e8bb
PB
6957 }
6958 break;
600b828c 6959 case NEON_2RM_VSHLL:
fc2a9b37 6960 if (q || (rd & 1)) {
9ee6e8bb 6961 return 1;
600b828c 6962 }
ad69471c
PB
6963 tmp = neon_load_reg(rm, 0);
6964 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6965 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6966 if (pass == 1)
6967 tmp = tmp2;
6968 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6969 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6970 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6971 }
6972 break;
600b828c 6973 case NEON_2RM_VCVT_F16_F32:
d614a513 6974 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6975 q || (rm & 1)) {
6976 return 1;
6977 }
7d1b0095
PM
6978 tmp = tcg_temp_new_i32();
6979 tmp2 = tcg_temp_new_i32();
60011498 6980 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6981 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6982 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6983 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6984 tcg_gen_shli_i32(tmp2, tmp2, 16);
6985 tcg_gen_or_i32(tmp2, tmp2, tmp);
6986 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6987 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6988 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6989 neon_store_reg(rd, 0, tmp2);
7d1b0095 6990 tmp2 = tcg_temp_new_i32();
2d981da7 6991 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6992 tcg_gen_shli_i32(tmp2, tmp2, 16);
6993 tcg_gen_or_i32(tmp2, tmp2, tmp);
6994 neon_store_reg(rd, 1, tmp2);
7d1b0095 6995 tcg_temp_free_i32(tmp);
60011498 6996 break;
600b828c 6997 case NEON_2RM_VCVT_F32_F16:
d614a513 6998 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6999 q || (rd & 1)) {
7000 return 1;
7001 }
7d1b0095 7002 tmp3 = tcg_temp_new_i32();
60011498
PB
7003 tmp = neon_load_reg(rm, 0);
7004 tmp2 = neon_load_reg(rm, 1);
7005 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 7006 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7007 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
7008 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 7009 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7010 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 7011 tcg_temp_free_i32(tmp);
60011498 7012 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 7013 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
7014 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
7015 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 7016 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 7017 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
7018 tcg_temp_free_i32(tmp2);
7019 tcg_temp_free_i32(tmp3);
60011498 7020 break;
9d935509 7021 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 7022 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
7023 || ((rm | rd) & 1)) {
7024 return 1;
7025 }
7026 tmp = tcg_const_i32(rd);
7027 tmp2 = tcg_const_i32(rm);
7028
7029 /* Bit 6 is the lowest opcode bit; it distinguishes between
7030 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
7031 */
7032 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
7033
7034 if (op == NEON_2RM_AESE) {
7035 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
7036 } else {
7037 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
7038 }
7039 tcg_temp_free_i32(tmp);
7040 tcg_temp_free_i32(tmp2);
7041 tcg_temp_free_i32(tmp3);
7042 break;
f1ecb913 7043 case NEON_2RM_SHA1H:
d614a513 7044 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
7045 || ((rm | rd) & 1)) {
7046 return 1;
7047 }
7048 tmp = tcg_const_i32(rd);
7049 tmp2 = tcg_const_i32(rm);
7050
7051 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
7052
7053 tcg_temp_free_i32(tmp);
7054 tcg_temp_free_i32(tmp2);
7055 break;
7056 case NEON_2RM_SHA1SU1:
7057 if ((rm | rd) & 1) {
7058 return 1;
7059 }
7060 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
7061 if (q) {
d614a513 7062 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
7063 return 1;
7064 }
d614a513 7065 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
7066 return 1;
7067 }
7068 tmp = tcg_const_i32(rd);
7069 tmp2 = tcg_const_i32(rm);
7070 if (q) {
7071 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
7072 } else {
7073 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
7074 }
7075 tcg_temp_free_i32(tmp);
7076 tcg_temp_free_i32(tmp2);
7077 break;
9ee6e8bb
PB
7078 default:
7079 elementwise:
7080 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 7081 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7082 tcg_gen_ld_f32(cpu_F0s, cpu_env,
7083 neon_reg_offset(rm, pass));
39d5492a 7084 TCGV_UNUSED_I32(tmp);
9ee6e8bb 7085 } else {
dd8fbd78 7086 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
7087 }
7088 switch (op) {
600b828c 7089 case NEON_2RM_VREV32:
9ee6e8bb 7090 switch (size) {
dd8fbd78
FN
7091 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
7092 case 1: gen_swap_half(tmp); break;
600b828c 7093 default: abort();
9ee6e8bb
PB
7094 }
7095 break;
600b828c 7096 case NEON_2RM_VREV16:
dd8fbd78 7097 gen_rev16(tmp);
9ee6e8bb 7098 break;
600b828c 7099 case NEON_2RM_VCLS:
9ee6e8bb 7100 switch (size) {
dd8fbd78
FN
7101 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
7102 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
7103 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 7104 default: abort();
9ee6e8bb
PB
7105 }
7106 break;
600b828c 7107 case NEON_2RM_VCLZ:
9ee6e8bb 7108 switch (size) {
dd8fbd78
FN
7109 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
7110 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
7539a012 7111 case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
600b828c 7112 default: abort();
9ee6e8bb
PB
7113 }
7114 break;
600b828c 7115 case NEON_2RM_VCNT:
dd8fbd78 7116 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 7117 break;
600b828c 7118 case NEON_2RM_VMVN:
dd8fbd78 7119 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 7120 break;
600b828c 7121 case NEON_2RM_VQABS:
9ee6e8bb 7122 switch (size) {
02da0b2d
PM
7123 case 0:
7124 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
7125 break;
7126 case 1:
7127 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
7128 break;
7129 case 2:
7130 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
7131 break;
600b828c 7132 default: abort();
9ee6e8bb
PB
7133 }
7134 break;
600b828c 7135 case NEON_2RM_VQNEG:
9ee6e8bb 7136 switch (size) {
02da0b2d
PM
7137 case 0:
7138 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
7139 break;
7140 case 1:
7141 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
7142 break;
7143 case 2:
7144 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
7145 break;
600b828c 7146 default: abort();
9ee6e8bb
PB
7147 }
7148 break;
600b828c 7149 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 7150 tmp2 = tcg_const_i32(0);
9ee6e8bb 7151 switch(size) {
dd8fbd78
FN
7152 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
7153 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
7154 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 7155 default: abort();
9ee6e8bb 7156 }
39d5492a 7157 tcg_temp_free_i32(tmp2);
600b828c 7158 if (op == NEON_2RM_VCLE0) {
dd8fbd78 7159 tcg_gen_not_i32(tmp, tmp);
600b828c 7160 }
9ee6e8bb 7161 break;
600b828c 7162 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 7163 tmp2 = tcg_const_i32(0);
9ee6e8bb 7164 switch(size) {
dd8fbd78
FN
7165 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
7166 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
7167 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 7168 default: abort();
9ee6e8bb 7169 }
39d5492a 7170 tcg_temp_free_i32(tmp2);
600b828c 7171 if (op == NEON_2RM_VCLT0) {
dd8fbd78 7172 tcg_gen_not_i32(tmp, tmp);
600b828c 7173 }
9ee6e8bb 7174 break;
600b828c 7175 case NEON_2RM_VCEQ0:
dd8fbd78 7176 tmp2 = tcg_const_i32(0);
9ee6e8bb 7177 switch(size) {
dd8fbd78
FN
7178 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
7179 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
7180 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 7181 default: abort();
9ee6e8bb 7182 }
39d5492a 7183 tcg_temp_free_i32(tmp2);
9ee6e8bb 7184 break;
600b828c 7185 case NEON_2RM_VABS:
9ee6e8bb 7186 switch(size) {
dd8fbd78
FN
7187 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
7188 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
7189 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 7190 default: abort();
9ee6e8bb
PB
7191 }
7192 break;
600b828c 7193 case NEON_2RM_VNEG:
dd8fbd78
FN
7194 tmp2 = tcg_const_i32(0);
7195 gen_neon_rsb(size, tmp, tmp2);
39d5492a 7196 tcg_temp_free_i32(tmp2);
9ee6e8bb 7197 break;
600b828c 7198 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
7199 {
7200 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7201 tmp2 = tcg_const_i32(0);
aa47cfdd 7202 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7203 tcg_temp_free_i32(tmp2);
aa47cfdd 7204 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7205 break;
aa47cfdd 7206 }
600b828c 7207 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
7208 {
7209 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7210 tmp2 = tcg_const_i32(0);
aa47cfdd 7211 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7212 tcg_temp_free_i32(tmp2);
aa47cfdd 7213 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7214 break;
aa47cfdd 7215 }
600b828c 7216 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
7217 {
7218 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 7219 tmp2 = tcg_const_i32(0);
aa47cfdd 7220 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 7221 tcg_temp_free_i32(tmp2);
aa47cfdd 7222 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7223 break;
aa47cfdd 7224 }
600b828c 7225 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
7226 {
7227 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7228 tmp2 = tcg_const_i32(0);
aa47cfdd 7229 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7230 tcg_temp_free_i32(tmp2);
aa47cfdd 7231 tcg_temp_free_ptr(fpstatus);
0e326109 7232 break;
aa47cfdd 7233 }
600b828c 7234 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
7235 {
7236 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 7237 tmp2 = tcg_const_i32(0);
aa47cfdd 7238 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 7239 tcg_temp_free_i32(tmp2);
aa47cfdd 7240 tcg_temp_free_ptr(fpstatus);
0e326109 7241 break;
aa47cfdd 7242 }
600b828c 7243 case NEON_2RM_VABS_F:
4373f3ce 7244 gen_vfp_abs(0);
9ee6e8bb 7245 break;
600b828c 7246 case NEON_2RM_VNEG_F:
4373f3ce 7247 gen_vfp_neg(0);
9ee6e8bb 7248 break;
600b828c 7249 case NEON_2RM_VSWP:
dd8fbd78
FN
7250 tmp2 = neon_load_reg(rd, pass);
7251 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7252 break;
600b828c 7253 case NEON_2RM_VTRN:
dd8fbd78 7254 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 7255 switch (size) {
dd8fbd78
FN
7256 case 0: gen_neon_trn_u8(tmp, tmp2); break;
7257 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 7258 default: abort();
9ee6e8bb 7259 }
dd8fbd78 7260 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 7261 break;
34f7b0a2
WN
7262 case NEON_2RM_VRINTN:
7263 case NEON_2RM_VRINTA:
7264 case NEON_2RM_VRINTM:
7265 case NEON_2RM_VRINTP:
7266 case NEON_2RM_VRINTZ:
7267 {
7268 TCGv_i32 tcg_rmode;
7269 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7270 int rmode;
7271
7272 if (op == NEON_2RM_VRINTZ) {
7273 rmode = FPROUNDING_ZERO;
7274 } else {
7275 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
7276 }
7277
7278 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7279 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7280 cpu_env);
7281 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
7282 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7283 cpu_env);
7284 tcg_temp_free_ptr(fpstatus);
7285 tcg_temp_free_i32(tcg_rmode);
7286 break;
7287 }
2ce70625
WN
7288 case NEON_2RM_VRINTX:
7289 {
7290 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7291 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
7292 tcg_temp_free_ptr(fpstatus);
7293 break;
7294 }
901ad525
WN
7295 case NEON_2RM_VCVTAU:
7296 case NEON_2RM_VCVTAS:
7297 case NEON_2RM_VCVTNU:
7298 case NEON_2RM_VCVTNS:
7299 case NEON_2RM_VCVTPU:
7300 case NEON_2RM_VCVTPS:
7301 case NEON_2RM_VCVTMU:
7302 case NEON_2RM_VCVTMS:
7303 {
7304 bool is_signed = !extract32(insn, 7, 1);
7305 TCGv_ptr fpst = get_fpstatus_ptr(1);
7306 TCGv_i32 tcg_rmode, tcg_shift;
7307 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
7308
7309 tcg_shift = tcg_const_i32(0);
7310 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
7311 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7312 cpu_env);
7313
7314 if (is_signed) {
7315 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
7316 tcg_shift, fpst);
7317 } else {
7318 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
7319 tcg_shift, fpst);
7320 }
7321
7322 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
7323 cpu_env);
7324 tcg_temp_free_i32(tcg_rmode);
7325 tcg_temp_free_i32(tcg_shift);
7326 tcg_temp_free_ptr(fpst);
7327 break;
7328 }
600b828c 7329 case NEON_2RM_VRECPE:
b6d4443a
AB
7330 {
7331 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7332 gen_helper_recpe_u32(tmp, tmp, fpstatus);
7333 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7334 break;
b6d4443a 7335 }
600b828c 7336 case NEON_2RM_VRSQRTE:
c2fb418e
AB
7337 {
7338 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7339 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
7340 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7341 break;
c2fb418e 7342 }
600b828c 7343 case NEON_2RM_VRECPE_F:
b6d4443a
AB
7344 {
7345 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7346 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
7347 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7348 break;
b6d4443a 7349 }
600b828c 7350 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
7351 {
7352 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
7353 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
7354 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 7355 break;
c2fb418e 7356 }
600b828c 7357 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 7358 gen_vfp_sito(0, 1);
9ee6e8bb 7359 break;
600b828c 7360 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 7361 gen_vfp_uito(0, 1);
9ee6e8bb 7362 break;
600b828c 7363 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 7364 gen_vfp_tosiz(0, 1);
9ee6e8bb 7365 break;
600b828c 7366 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 7367 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
7368 break;
7369 default:
600b828c
PM
7370 /* Reserved op values were caught by the
7371 * neon_2rm_sizes[] check earlier.
7372 */
7373 abort();
9ee6e8bb 7374 }
600b828c 7375 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
7376 tcg_gen_st_f32(cpu_F0s, cpu_env,
7377 neon_reg_offset(rd, pass));
9ee6e8bb 7378 } else {
dd8fbd78 7379 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7380 }
7381 }
7382 break;
7383 }
7384 } else if ((insn & (1 << 10)) == 0) {
7385 /* VTBL, VTBX. */
56907d77
PM
7386 int n = ((insn >> 8) & 3) + 1;
7387 if ((rn + n) > 32) {
7388 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7389 * helper function running off the end of the register file.
7390 */
7391 return 1;
7392 }
7393 n <<= 3;
9ee6e8bb 7394 if (insn & (1 << 6)) {
8f8e3aa4 7395 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7396 } else {
7d1b0095 7397 tmp = tcg_temp_new_i32();
8f8e3aa4 7398 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7399 }
8f8e3aa4 7400 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7401 tmp4 = tcg_const_i32(rn);
7402 tmp5 = tcg_const_i32(n);
9ef39277 7403 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7404 tcg_temp_free_i32(tmp);
9ee6e8bb 7405 if (insn & (1 << 6)) {
8f8e3aa4 7406 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7407 } else {
7d1b0095 7408 tmp = tcg_temp_new_i32();
8f8e3aa4 7409 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7410 }
8f8e3aa4 7411 tmp3 = neon_load_reg(rm, 1);
9ef39277 7412 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7413 tcg_temp_free_i32(tmp5);
7414 tcg_temp_free_i32(tmp4);
8f8e3aa4 7415 neon_store_reg(rd, 0, tmp2);
3018f259 7416 neon_store_reg(rd, 1, tmp3);
7d1b0095 7417 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7418 } else if ((insn & 0x380) == 0) {
7419 /* VDUP */
133da6aa
JR
7420 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7421 return 1;
7422 }
9ee6e8bb 7423 if (insn & (1 << 19)) {
dd8fbd78 7424 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7425 } else {
dd8fbd78 7426 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7427 }
7428 if (insn & (1 << 16)) {
dd8fbd78 7429 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7430 } else if (insn & (1 << 17)) {
7431 if ((insn >> 18) & 1)
dd8fbd78 7432 gen_neon_dup_high16(tmp);
9ee6e8bb 7433 else
dd8fbd78 7434 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7435 }
7436 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7437 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7438 tcg_gen_mov_i32(tmp2, tmp);
7439 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7440 }
7d1b0095 7441 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7442 } else {
7443 return 1;
7444 }
7445 }
7446 }
7447 return 0;
7448}
7449
7dcc1f89 7450static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7451{
4b6a83fb
PM
7452 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7453 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7454
7455 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7456
7457 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7458 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7459 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7460 return 1;
7461 }
d614a513 7462 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7463 return disas_iwmmxt_insn(s, insn);
d614a513 7464 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7465 return disas_dsp_insn(s, insn);
c0f4af17
PM
7466 }
7467 return 1;
4b6a83fb
PM
7468 }
7469
7470 /* Otherwise treat as a generic register access */
7471 is64 = (insn & (1 << 25)) == 0;
7472 if (!is64 && ((insn & (1 << 4)) == 0)) {
7473 /* cdp */
7474 return 1;
7475 }
7476
7477 crm = insn & 0xf;
7478 if (is64) {
7479 crn = 0;
7480 opc1 = (insn >> 4) & 0xf;
7481 opc2 = 0;
7482 rt2 = (insn >> 16) & 0xf;
7483 } else {
7484 crn = (insn >> 16) & 0xf;
7485 opc1 = (insn >> 21) & 7;
7486 opc2 = (insn >> 5) & 7;
7487 rt2 = 0;
7488 }
7489 isread = (insn >> 20) & 1;
7490 rt = (insn >> 12) & 0xf;
7491
60322b39 7492 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7493 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7494 if (ri) {
7495 /* Check access permissions */
dcbff19b 7496 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7497 return 1;
7498 }
7499
c0f4af17 7500 if (ri->accessfn ||
d614a513 7501 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7502 /* Emit code to perform further access permissions checks at
7503 * runtime; this may result in an exception.
c0f4af17
PM
7504 * Note that on XScale all cp0..c13 registers do an access check
7505 * call in order to handle c15_cpar.
f59df3f2
PM
7506 */
7507 TCGv_ptr tmpptr;
3f208fd7 7508 TCGv_i32 tcg_syn, tcg_isread;
8bcbf37c
PM
7509 uint32_t syndrome;
7510
7511 /* Note that since we are an implementation which takes an
7512 * exception on a trapped conditional instruction only if the
7513 * instruction passes its condition code check, we can take
7514 * advantage of the clause in the ARM ARM that allows us to set
7515 * the COND field in the instruction to 0xE in all cases.
7516 * We could fish the actual condition out of the insn (ARM)
7517 * or the condexec bits (Thumb) but it isn't necessary.
7518 */
7519 switch (cpnum) {
7520 case 14:
7521 if (is64) {
7522 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7523 isread, false);
8bcbf37c
PM
7524 } else {
7525 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7526 rt, isread, false);
8bcbf37c
PM
7527 }
7528 break;
7529 case 15:
7530 if (is64) {
7531 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
4df32259 7532 isread, false);
8bcbf37c
PM
7533 } else {
7534 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
4df32259 7535 rt, isread, false);
8bcbf37c
PM
7536 }
7537 break;
7538 default:
7539 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7540 * so this can only happen if this is an ARMv7 or earlier CPU,
7541 * in which case the syndrome information won't actually be
7542 * guest visible.
7543 */
d614a513 7544 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7545 syndrome = syn_uncategorized();
7546 break;
7547 }
7548
43bfa4a1 7549 gen_set_condexec(s);
3977ee5d 7550 gen_set_pc_im(s, s->pc - 4);
f59df3f2 7551 tmpptr = tcg_const_ptr(ri);
8bcbf37c 7552 tcg_syn = tcg_const_i32(syndrome);
3f208fd7
PM
7553 tcg_isread = tcg_const_i32(isread);
7554 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
7555 tcg_isread);
f59df3f2 7556 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7557 tcg_temp_free_i32(tcg_syn);
3f208fd7 7558 tcg_temp_free_i32(tcg_isread);
f59df3f2
PM
7559 }
7560
4b6a83fb
PM
7561 /* Handle special cases first */
7562 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7563 case ARM_CP_NOP:
7564 return 0;
7565 case ARM_CP_WFI:
7566 if (isread) {
7567 return 1;
7568 }
eaed129d 7569 gen_set_pc_im(s, s->pc);
4b6a83fb 7570 s->is_jmp = DISAS_WFI;
2bee5105 7571 return 0;
4b6a83fb
PM
7572 default:
7573 break;
7574 }
7575
bd79255d 7576 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7577 gen_io_start();
7578 }
7579
4b6a83fb
PM
7580 if (isread) {
7581 /* Read */
7582 if (is64) {
7583 TCGv_i64 tmp64;
7584 TCGv_i32 tmp;
7585 if (ri->type & ARM_CP_CONST) {
7586 tmp64 = tcg_const_i64(ri->resetvalue);
7587 } else if (ri->readfn) {
7588 TCGv_ptr tmpptr;
4b6a83fb
PM
7589 tmp64 = tcg_temp_new_i64();
7590 tmpptr = tcg_const_ptr(ri);
7591 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7592 tcg_temp_free_ptr(tmpptr);
7593 } else {
7594 tmp64 = tcg_temp_new_i64();
7595 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7596 }
7597 tmp = tcg_temp_new_i32();
ecc7b3aa 7598 tcg_gen_extrl_i64_i32(tmp, tmp64);
4b6a83fb
PM
7599 store_reg(s, rt, tmp);
7600 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7601 tmp = tcg_temp_new_i32();
ecc7b3aa 7602 tcg_gen_extrl_i64_i32(tmp, tmp64);
ed336850 7603 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7604 store_reg(s, rt2, tmp);
7605 } else {
39d5492a 7606 TCGv_i32 tmp;
4b6a83fb
PM
7607 if (ri->type & ARM_CP_CONST) {
7608 tmp = tcg_const_i32(ri->resetvalue);
7609 } else if (ri->readfn) {
7610 TCGv_ptr tmpptr;
4b6a83fb
PM
7611 tmp = tcg_temp_new_i32();
7612 tmpptr = tcg_const_ptr(ri);
7613 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7614 tcg_temp_free_ptr(tmpptr);
7615 } else {
7616 tmp = load_cpu_offset(ri->fieldoffset);
7617 }
7618 if (rt == 15) {
7619 /* Destination register of r15 for 32 bit loads sets
7620 * the condition codes from the high 4 bits of the value
7621 */
7622 gen_set_nzcv(tmp);
7623 tcg_temp_free_i32(tmp);
7624 } else {
7625 store_reg(s, rt, tmp);
7626 }
7627 }
7628 } else {
7629 /* Write */
7630 if (ri->type & ARM_CP_CONST) {
7631 /* If not forbidden by access permissions, treat as WI */
7632 return 0;
7633 }
7634
7635 if (is64) {
39d5492a 7636 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7637 TCGv_i64 tmp64 = tcg_temp_new_i64();
7638 tmplo = load_reg(s, rt);
7639 tmphi = load_reg(s, rt2);
7640 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7641 tcg_temp_free_i32(tmplo);
7642 tcg_temp_free_i32(tmphi);
7643 if (ri->writefn) {
7644 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7645 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7646 tcg_temp_free_ptr(tmpptr);
7647 } else {
7648 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7649 }
7650 tcg_temp_free_i64(tmp64);
7651 } else {
7652 if (ri->writefn) {
39d5492a 7653 TCGv_i32 tmp;
4b6a83fb 7654 TCGv_ptr tmpptr;
4b6a83fb
PM
7655 tmp = load_reg(s, rt);
7656 tmpptr = tcg_const_ptr(ri);
7657 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7658 tcg_temp_free_ptr(tmpptr);
7659 tcg_temp_free_i32(tmp);
7660 } else {
39d5492a 7661 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7662 store_cpu_offset(tmp, ri->fieldoffset);
7663 }
7664 }
2452731c
PM
7665 }
7666
bd79255d 7667 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7668 /* I/O operations must end the TB here (whether read or write) */
7669 gen_io_end();
7670 gen_lookup_tb(s);
7671 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7672 /* We default to ending the TB on a coprocessor register write,
7673 * but allow this to be suppressed by the register definition
7674 * (usually only necessary to work around guest bugs).
7675 */
2452731c 7676 gen_lookup_tb(s);
4b6a83fb 7677 }
2452731c 7678
4b6a83fb
PM
7679 return 0;
7680 }
7681
626187d8
PM
7682 /* Unknown register; this might be a guest error or a QEMU
7683 * unimplemented feature.
7684 */
7685 if (is64) {
7686 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7687 "64 bit system register cp:%d opc1: %d crm:%d "
7688 "(%s)\n",
7689 isread ? "read" : "write", cpnum, opc1, crm,
7690 s->ns ? "non-secure" : "secure");
626187d8
PM
7691 } else {
7692 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7693 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7694 "(%s)\n",
7695 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7696 s->ns ? "non-secure" : "secure");
626187d8
PM
7697 }
7698
4a9a539f 7699 return 1;
9ee6e8bb
PB
7700}
7701
5e3f878a
PB
7702
7703/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7704static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7705{
39d5492a 7706 TCGv_i32 tmp;
7d1b0095 7707 tmp = tcg_temp_new_i32();
ecc7b3aa 7708 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a 7709 store_reg(s, rlow, tmp);
7d1b0095 7710 tmp = tcg_temp_new_i32();
5e3f878a 7711 tcg_gen_shri_i64(val, val, 32);
ecc7b3aa 7712 tcg_gen_extrl_i64_i32(tmp, val);
5e3f878a
PB
7713 store_reg(s, rhigh, tmp);
7714}
7715
7716/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7717static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7718{
a7812ae4 7719 TCGv_i64 tmp;
39d5492a 7720 TCGv_i32 tmp2;
5e3f878a 7721
36aa55dc 7722 /* Load value and extend to 64 bits. */
a7812ae4 7723 tmp = tcg_temp_new_i64();
5e3f878a
PB
7724 tmp2 = load_reg(s, rlow);
7725 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7726 tcg_temp_free_i32(tmp2);
5e3f878a 7727 tcg_gen_add_i64(val, val, tmp);
b75263d6 7728 tcg_temp_free_i64(tmp);
5e3f878a
PB
7729}
7730
7731/* load and add a 64-bit value from a register pair. */
a7812ae4 7732static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7733{
a7812ae4 7734 TCGv_i64 tmp;
39d5492a
PM
7735 TCGv_i32 tmpl;
7736 TCGv_i32 tmph;
5e3f878a
PB
7737
7738 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7739 tmpl = load_reg(s, rlow);
7740 tmph = load_reg(s, rhigh);
a7812ae4 7741 tmp = tcg_temp_new_i64();
36aa55dc 7742 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7743 tcg_temp_free_i32(tmpl);
7744 tcg_temp_free_i32(tmph);
5e3f878a 7745 tcg_gen_add_i64(val, val, tmp);
b75263d6 7746 tcg_temp_free_i64(tmp);
5e3f878a
PB
7747}
7748
c9f10124 7749/* Set N and Z flags from hi|lo. */
39d5492a 7750static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7751{
c9f10124
RH
7752 tcg_gen_mov_i32(cpu_NF, hi);
7753 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7754}
7755
426f5abc
PB
7756/* Load/Store exclusive instructions are implemented by remembering
7757 the value/address loaded, and seeing if these are the same
354161b3 7758 when the store is performed. This should be sufficient to implement
426f5abc 7759 the architecturally mandated semantics, and avoids having to monitor
354161b3
EC
7760 regular stores. The compare vs the remembered value is done during
7761 the cmpxchg operation, but we must compare the addresses manually. */
426f5abc 7762static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7763 TCGv_i32 addr, int size)
426f5abc 7764{
94ee24e7 7765 TCGv_i32 tmp = tcg_temp_new_i32();
354161b3 7766 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc 7767
50225ad0
PM
7768 s->is_ldex = true;
7769
426f5abc 7770 if (size == 3) {
39d5492a 7771 TCGv_i32 tmp2 = tcg_temp_new_i32();
354161b3 7772 TCGv_i64 t64 = tcg_temp_new_i64();
03d05e2d 7773
354161b3
EC
7774 gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
7775 tcg_gen_mov_i64(cpu_exclusive_val, t64);
7776 tcg_gen_extr_i64_i32(tmp, tmp2, t64);
7777 tcg_temp_free_i64(t64);
7778
7779 store_reg(s, rt2, tmp2);
03d05e2d 7780 } else {
354161b3 7781 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
03d05e2d 7782 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7783 }
03d05e2d
PM
7784
7785 store_reg(s, rt, tmp);
7786 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7787}
7788
7789static void gen_clrex(DisasContext *s)
7790{
03d05e2d 7791 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7792}
7793
426f5abc 7794static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7795 TCGv_i32 addr, int size)
426f5abc 7796{
354161b3
EC
7797 TCGv_i32 t0, t1, t2;
7798 TCGv_i64 extaddr;
7799 TCGv taddr;
42a268c2
RH
7800 TCGLabel *done_label;
7801 TCGLabel *fail_label;
354161b3 7802 TCGMemOp opc = size | MO_ALIGN | s->be_data;
426f5abc
PB
7803
7804 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7805 [addr] = {Rt};
7806 {Rd} = 0;
7807 } else {
7808 {Rd} = 1;
7809 } */
7810 fail_label = gen_new_label();
7811 done_label = gen_new_label();
03d05e2d
PM
7812 extaddr = tcg_temp_new_i64();
7813 tcg_gen_extu_i32_i64(extaddr, addr);
7814 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7815 tcg_temp_free_i64(extaddr);
7816
354161b3
EC
7817 taddr = gen_aa32_addr(s, addr, opc);
7818 t0 = tcg_temp_new_i32();
7819 t1 = load_reg(s, rt);
426f5abc 7820 if (size == 3) {
354161b3
EC
7821 TCGv_i64 o64 = tcg_temp_new_i64();
7822 TCGv_i64 n64 = tcg_temp_new_i64();
03d05e2d 7823
354161b3
EC
7824 t2 = load_reg(s, rt2);
7825 tcg_gen_concat_i32_i64(n64, t1, t2);
7826 tcg_temp_free_i32(t2);
7827 gen_aa32_frob64(s, n64);
03d05e2d 7828
354161b3
EC
7829 tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
7830 get_mem_index(s), opc);
7831 tcg_temp_free_i64(n64);
7832
7833 gen_aa32_frob64(s, o64);
7834 tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
7835 tcg_gen_extrl_i64_i32(t0, o64);
7836
7837 tcg_temp_free_i64(o64);
7838 } else {
7839 t2 = tcg_temp_new_i32();
7840 tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
7841 tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
7842 tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
7843 tcg_temp_free_i32(t2);
426f5abc 7844 }
354161b3
EC
7845 tcg_temp_free_i32(t1);
7846 tcg_temp_free(taddr);
7847 tcg_gen_mov_i32(cpu_R[rd], t0);
7848 tcg_temp_free_i32(t0);
426f5abc 7849 tcg_gen_br(done_label);
354161b3 7850
426f5abc
PB
7851 gen_set_label(fail_label);
7852 tcg_gen_movi_i32(cpu_R[rd], 1);
7853 gen_set_label(done_label);
03d05e2d 7854 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc 7855}
426f5abc 7856
81465888
PM
7857/* gen_srs:
7858 * @env: CPUARMState
7859 * @s: DisasContext
7860 * @mode: mode field from insn (which stack to store to)
7861 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7862 * @writeback: true if writeback bit set
7863 *
7864 * Generate code for the SRS (Store Return State) insn.
7865 */
7866static void gen_srs(DisasContext *s,
7867 uint32_t mode, uint32_t amode, bool writeback)
7868{
7869 int32_t offset;
cbc0326b
PM
7870 TCGv_i32 addr, tmp;
7871 bool undef = false;
7872
7873 /* SRS is:
7874 * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
ba63cf47 7875 * and specified mode is monitor mode
cbc0326b
PM
7876 * - UNDEFINED in Hyp mode
7877 * - UNPREDICTABLE in User or System mode
7878 * - UNPREDICTABLE if the specified mode is:
7879 * -- not implemented
7880 * -- not a valid mode number
7881 * -- a mode that's at a higher exception level
7882 * -- Monitor, if we are Non-secure
f01377f5 7883 * For the UNPREDICTABLE cases we choose to UNDEF.
cbc0326b 7884 */
ba63cf47 7885 if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
cbc0326b
PM
7886 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
7887 return;
7888 }
7889
7890 if (s->current_el == 0 || s->current_el == 2) {
7891 undef = true;
7892 }
7893
7894 switch (mode) {
7895 case ARM_CPU_MODE_USR:
7896 case ARM_CPU_MODE_FIQ:
7897 case ARM_CPU_MODE_IRQ:
7898 case ARM_CPU_MODE_SVC:
7899 case ARM_CPU_MODE_ABT:
7900 case ARM_CPU_MODE_UND:
7901 case ARM_CPU_MODE_SYS:
7902 break;
7903 case ARM_CPU_MODE_HYP:
7904 if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
7905 undef = true;
7906 }
7907 break;
7908 case ARM_CPU_MODE_MON:
7909 /* No need to check specifically for "are we non-secure" because
7910 * we've already made EL0 UNDEF and handled the trap for S-EL1;
7911 * so if this isn't EL3 then we must be non-secure.
7912 */
7913 if (s->current_el != 3) {
7914 undef = true;
7915 }
7916 break;
7917 default:
7918 undef = true;
7919 }
7920
7921 if (undef) {
7922 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
7923 default_exception_el(s));
7924 return;
7925 }
7926
7927 addr = tcg_temp_new_i32();
7928 tmp = tcg_const_i32(mode);
f01377f5
PM
7929 /* get_r13_banked() will raise an exception if called from System mode */
7930 gen_set_condexec(s);
7931 gen_set_pc_im(s, s->pc - 4);
81465888
PM
7932 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7933 tcg_temp_free_i32(tmp);
7934 switch (amode) {
7935 case 0: /* DA */
7936 offset = -4;
7937 break;
7938 case 1: /* IA */
7939 offset = 0;
7940 break;
7941 case 2: /* DB */
7942 offset = -8;
7943 break;
7944 case 3: /* IB */
7945 offset = 4;
7946 break;
7947 default:
7948 abort();
7949 }
7950 tcg_gen_addi_i32(addr, addr, offset);
7951 tmp = load_reg(s, 14);
12dcc321 7952 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7953 tcg_temp_free_i32(tmp);
81465888
PM
7954 tmp = load_cpu_field(spsr);
7955 tcg_gen_addi_i32(addr, addr, 4);
12dcc321 7956 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 7957 tcg_temp_free_i32(tmp);
81465888
PM
7958 if (writeback) {
7959 switch (amode) {
7960 case 0:
7961 offset = -8;
7962 break;
7963 case 1:
7964 offset = 4;
7965 break;
7966 case 2:
7967 offset = -4;
7968 break;
7969 case 3:
7970 offset = 0;
7971 break;
7972 default:
7973 abort();
7974 }
7975 tcg_gen_addi_i32(addr, addr, offset);
7976 tmp = tcg_const_i32(mode);
7977 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7978 tcg_temp_free_i32(tmp);
7979 }
7980 tcg_temp_free_i32(addr);
f01377f5 7981 s->is_jmp = DISAS_UPDATE;
81465888
PM
7982}
7983
f4df2210 7984static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7985{
f4df2210 7986 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7987 TCGv_i32 tmp;
7988 TCGv_i32 tmp2;
7989 TCGv_i32 tmp3;
7990 TCGv_i32 addr;
a7812ae4 7991 TCGv_i64 tmp64;
9ee6e8bb 7992
9ee6e8bb 7993 /* M variants do not implement ARM mode. */
b53d8923 7994 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7995 goto illegal_op;
b53d8923 7996 }
9ee6e8bb
PB
7997 cond = insn >> 28;
7998 if (cond == 0xf){
be5e7a76
DES
7999 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
8000 * choose to UNDEF. In ARMv5 and above the space is used
8001 * for miscellaneous unconditional instructions.
8002 */
8003 ARCH(5);
8004
9ee6e8bb
PB
8005 /* Unconditional instructions. */
8006 if (((insn >> 25) & 7) == 1) {
8007 /* NEON Data processing. */
d614a513 8008 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8009 goto illegal_op;
d614a513 8010 }
9ee6e8bb 8011
7dcc1f89 8012 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 8013 goto illegal_op;
7dcc1f89 8014 }
9ee6e8bb
PB
8015 return;
8016 }
8017 if ((insn & 0x0f100000) == 0x04000000) {
8018 /* NEON load/store. */
d614a513 8019 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 8020 goto illegal_op;
d614a513 8021 }
9ee6e8bb 8022
7dcc1f89 8023 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 8024 goto illegal_op;
7dcc1f89 8025 }
9ee6e8bb
PB
8026 return;
8027 }
6a57f3eb
WN
8028 if ((insn & 0x0f000e10) == 0x0e000a00) {
8029 /* VFP. */
7dcc1f89 8030 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
8031 goto illegal_op;
8032 }
8033 return;
8034 }
3d185e5d
PM
8035 if (((insn & 0x0f30f000) == 0x0510f000) ||
8036 ((insn & 0x0f30f010) == 0x0710f000)) {
8037 if ((insn & (1 << 22)) == 0) {
8038 /* PLDW; v7MP */
d614a513 8039 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8040 goto illegal_op;
8041 }
8042 }
8043 /* Otherwise PLD; v5TE+ */
be5e7a76 8044 ARCH(5TE);
3d185e5d
PM
8045 return;
8046 }
8047 if (((insn & 0x0f70f000) == 0x0450f000) ||
8048 ((insn & 0x0f70f010) == 0x0650f000)) {
8049 ARCH(7);
8050 return; /* PLI; V7 */
8051 }
8052 if (((insn & 0x0f700000) == 0x04100000) ||
8053 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 8054 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
8055 goto illegal_op;
8056 }
8057 return; /* v7MP: Unallocated memory hint: must NOP */
8058 }
8059
8060 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
8061 ARCH(6);
8062 /* setend */
9886ecdf
PB
8063 if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
8064 gen_helper_setend(cpu_env);
8065 s->is_jmp = DISAS_UPDATE;
9ee6e8bb
PB
8066 }
8067 return;
8068 } else if ((insn & 0x0fffff00) == 0x057ff000) {
8069 switch ((insn >> 4) & 0xf) {
8070 case 1: /* clrex */
8071 ARCH(6K);
426f5abc 8072 gen_clrex(s);
9ee6e8bb
PB
8073 return;
8074 case 4: /* dsb */
8075 case 5: /* dmb */
9ee6e8bb 8076 ARCH(7);
61e4c432 8077 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 8078 return;
6df99dec
SS
8079 case 6: /* isb */
8080 /* We need to break the TB after this insn to execute
8081 * self-modifying code correctly and also to take
8082 * any pending interrupts immediately.
8083 */
8084 gen_lookup_tb(s);
8085 return;
9ee6e8bb
PB
8086 default:
8087 goto illegal_op;
8088 }
8089 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
8090 /* srs */
81465888
PM
8091 ARCH(6);
8092 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 8093 return;
ea825eee 8094 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 8095 /* rfe */
c67b6b71 8096 int32_t offset;
9ee6e8bb
PB
8097 if (IS_USER(s))
8098 goto illegal_op;
8099 ARCH(6);
8100 rn = (insn >> 16) & 0xf;
b0109805 8101 addr = load_reg(s, rn);
9ee6e8bb
PB
8102 i = (insn >> 23) & 3;
8103 switch (i) {
b0109805 8104 case 0: offset = -4; break; /* DA */
c67b6b71
FN
8105 case 1: offset = 0; break; /* IA */
8106 case 2: offset = -8; break; /* DB */
b0109805 8107 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
8108 default: abort();
8109 }
8110 if (offset)
b0109805
PB
8111 tcg_gen_addi_i32(addr, addr, offset);
8112 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 8113 tmp = tcg_temp_new_i32();
12dcc321 8114 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 8115 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8116 tmp2 = tcg_temp_new_i32();
12dcc321 8117 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
8118 if (insn & (1 << 21)) {
8119 /* Base writeback. */
8120 switch (i) {
b0109805 8121 case 0: offset = -8; break;
c67b6b71
FN
8122 case 1: offset = 4; break;
8123 case 2: offset = -4; break;
b0109805 8124 case 3: offset = 0; break;
9ee6e8bb
PB
8125 default: abort();
8126 }
8127 if (offset)
b0109805
PB
8128 tcg_gen_addi_i32(addr, addr, offset);
8129 store_reg(s, rn, addr);
8130 } else {
7d1b0095 8131 tcg_temp_free_i32(addr);
9ee6e8bb 8132 }
b0109805 8133 gen_rfe(s, tmp, tmp2);
c67b6b71 8134 return;
9ee6e8bb
PB
8135 } else if ((insn & 0x0e000000) == 0x0a000000) {
8136 /* branch link and change to thumb (blx <offset>) */
8137 int32_t offset;
8138
8139 val = (uint32_t)s->pc;
7d1b0095 8140 tmp = tcg_temp_new_i32();
d9ba4830
PB
8141 tcg_gen_movi_i32(tmp, val);
8142 store_reg(s, 14, tmp);
9ee6e8bb
PB
8143 /* Sign-extend the 24-bit offset */
8144 offset = (((int32_t)insn) << 8) >> 8;
8145 /* offset * 4 + bit24 * 2 + (thumb bit) */
8146 val += (offset << 2) | ((insn >> 23) & 2) | 1;
8147 /* pipeline offset */
8148 val += 4;
be5e7a76 8149 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 8150 gen_bx_im(s, val);
9ee6e8bb
PB
8151 return;
8152 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 8153 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 8154 /* iWMMXt register transfer. */
c0f4af17 8155 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 8156 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 8157 return;
c0f4af17
PM
8158 }
8159 }
9ee6e8bb
PB
8160 }
8161 } else if ((insn & 0x0fe00000) == 0x0c400000) {
8162 /* Coprocessor double register transfer. */
be5e7a76 8163 ARCH(5TE);
9ee6e8bb
PB
8164 } else if ((insn & 0x0f000010) == 0x0e000010) {
8165 /* Additional coprocessor register transfer. */
7997d92f 8166 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
8167 uint32_t mask;
8168 uint32_t val;
8169 /* cps (privileged) */
8170 if (IS_USER(s))
8171 return;
8172 mask = val = 0;
8173 if (insn & (1 << 19)) {
8174 if (insn & (1 << 8))
8175 mask |= CPSR_A;
8176 if (insn & (1 << 7))
8177 mask |= CPSR_I;
8178 if (insn & (1 << 6))
8179 mask |= CPSR_F;
8180 if (insn & (1 << 18))
8181 val |= mask;
8182 }
7997d92f 8183 if (insn & (1 << 17)) {
9ee6e8bb
PB
8184 mask |= CPSR_M;
8185 val |= (insn & 0x1f);
8186 }
8187 if (mask) {
2fbac54b 8188 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
8189 }
8190 return;
8191 }
8192 goto illegal_op;
8193 }
8194 if (cond != 0xe) {
8195 /* if not always execute, we generate a conditional jump to
8196 next instruction */
8197 s->condlabel = gen_new_label();
39fb730a 8198 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8199 s->condjmp = 1;
8200 }
8201 if ((insn & 0x0f900000) == 0x03000000) {
8202 if ((insn & (1 << 21)) == 0) {
8203 ARCH(6T2);
8204 rd = (insn >> 12) & 0xf;
8205 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8206 if ((insn & (1 << 22)) == 0) {
8207 /* MOVW */
7d1b0095 8208 tmp = tcg_temp_new_i32();
5e3f878a 8209 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
8210 } else {
8211 /* MOVT */
5e3f878a 8212 tmp = load_reg(s, rd);
86831435 8213 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8214 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 8215 }
5e3f878a 8216 store_reg(s, rd, tmp);
9ee6e8bb
PB
8217 } else {
8218 if (((insn >> 12) & 0xf) != 0xf)
8219 goto illegal_op;
8220 if (((insn >> 16) & 0xf) == 0) {
8221 gen_nop_hint(s, insn & 0xff);
8222 } else {
8223 /* CPSR = immediate */
8224 val = insn & 0xff;
8225 shift = ((insn >> 8) & 0xf) * 2;
8226 if (shift)
8227 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 8228 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
8229 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
8230 i, val)) {
9ee6e8bb 8231 goto illegal_op;
7dcc1f89 8232 }
9ee6e8bb
PB
8233 }
8234 }
8235 } else if ((insn & 0x0f900000) == 0x01000000
8236 && (insn & 0x00000090) != 0x00000090) {
8237 /* miscellaneous instructions */
8238 op1 = (insn >> 21) & 3;
8239 sh = (insn >> 4) & 0xf;
8240 rm = insn & 0xf;
8241 switch (sh) {
8bfd0550
PM
8242 case 0x0: /* MSR, MRS */
8243 if (insn & (1 << 9)) {
8244 /* MSR (banked) and MRS (banked) */
8245 int sysm = extract32(insn, 16, 4) |
8246 (extract32(insn, 8, 1) << 4);
8247 int r = extract32(insn, 22, 1);
8248
8249 if (op1 & 1) {
8250 /* MSR (banked) */
8251 gen_msr_banked(s, r, sysm, rm);
8252 } else {
8253 /* MRS (banked) */
8254 int rd = extract32(insn, 12, 4);
8255
8256 gen_mrs_banked(s, r, sysm, rd);
8257 }
8258 break;
8259 }
8260
8261 /* MSR, MRS (for PSRs) */
9ee6e8bb
PB
8262 if (op1 & 1) {
8263 /* PSR = reg */
2fbac54b 8264 tmp = load_reg(s, rm);
9ee6e8bb 8265 i = ((op1 & 2) != 0);
7dcc1f89 8266 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
8267 goto illegal_op;
8268 } else {
8269 /* reg = PSR */
8270 rd = (insn >> 12) & 0xf;
8271 if (op1 & 2) {
8272 if (IS_USER(s))
8273 goto illegal_op;
d9ba4830 8274 tmp = load_cpu_field(spsr);
9ee6e8bb 8275 } else {
7d1b0095 8276 tmp = tcg_temp_new_i32();
9ef39277 8277 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8278 }
d9ba4830 8279 store_reg(s, rd, tmp);
9ee6e8bb
PB
8280 }
8281 break;
8282 case 0x1:
8283 if (op1 == 1) {
8284 /* branch/exchange thumb (bx). */
be5e7a76 8285 ARCH(4T);
d9ba4830
PB
8286 tmp = load_reg(s, rm);
8287 gen_bx(s, tmp);
9ee6e8bb
PB
8288 } else if (op1 == 3) {
8289 /* clz */
be5e7a76 8290 ARCH(5);
9ee6e8bb 8291 rd = (insn >> 12) & 0xf;
1497c961 8292 tmp = load_reg(s, rm);
7539a012 8293 tcg_gen_clzi_i32(tmp, tmp, 32);
1497c961 8294 store_reg(s, rd, tmp);
9ee6e8bb
PB
8295 } else {
8296 goto illegal_op;
8297 }
8298 break;
8299 case 0x2:
8300 if (op1 == 1) {
8301 ARCH(5J); /* bxj */
8302 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8303 tmp = load_reg(s, rm);
8304 gen_bx(s, tmp);
9ee6e8bb
PB
8305 } else {
8306 goto illegal_op;
8307 }
8308 break;
8309 case 0x3:
8310 if (op1 != 1)
8311 goto illegal_op;
8312
be5e7a76 8313 ARCH(5);
9ee6e8bb 8314 /* branch link/exchange thumb (blx) */
d9ba4830 8315 tmp = load_reg(s, rm);
7d1b0095 8316 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
8317 tcg_gen_movi_i32(tmp2, s->pc);
8318 store_reg(s, 14, tmp2);
8319 gen_bx(s, tmp);
9ee6e8bb 8320 break;
eb0ecd5a
WN
8321 case 0x4:
8322 {
8323 /* crc32/crc32c */
8324 uint32_t c = extract32(insn, 8, 4);
8325
8326 /* Check this CPU supports ARMv8 CRC instructions.
8327 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
8328 * Bits 8, 10 and 11 should be zero.
8329 */
d614a513 8330 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
8331 (c & 0xd) != 0) {
8332 goto illegal_op;
8333 }
8334
8335 rn = extract32(insn, 16, 4);
8336 rd = extract32(insn, 12, 4);
8337
8338 tmp = load_reg(s, rn);
8339 tmp2 = load_reg(s, rm);
aa633469
PM
8340 if (op1 == 0) {
8341 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
8342 } else if (op1 == 1) {
8343 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
8344 }
eb0ecd5a
WN
8345 tmp3 = tcg_const_i32(1 << op1);
8346 if (c & 0x2) {
8347 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
8348 } else {
8349 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
8350 }
8351 tcg_temp_free_i32(tmp2);
8352 tcg_temp_free_i32(tmp3);
8353 store_reg(s, rd, tmp);
8354 break;
8355 }
9ee6e8bb 8356 case 0x5: /* saturating add/subtract */
be5e7a76 8357 ARCH(5TE);
9ee6e8bb
PB
8358 rd = (insn >> 12) & 0xf;
8359 rn = (insn >> 16) & 0xf;
b40d0353 8360 tmp = load_reg(s, rm);
5e3f878a 8361 tmp2 = load_reg(s, rn);
9ee6e8bb 8362 if (op1 & 2)
9ef39277 8363 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 8364 if (op1 & 1)
9ef39277 8365 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8366 else
9ef39277 8367 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8368 tcg_temp_free_i32(tmp2);
5e3f878a 8369 store_reg(s, rd, tmp);
9ee6e8bb 8370 break;
49e14940 8371 case 7:
d4a2dc67
PM
8372 {
8373 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e 8374 switch (op1) {
19a6e31c
PM
8375 case 0:
8376 /* HLT */
8377 gen_hlt(s, imm16);
8378 break;
37e6456e
PM
8379 case 1:
8380 /* bkpt */
8381 ARCH(5);
8382 gen_exception_insn(s, 4, EXCP_BKPT,
73710361
GB
8383 syn_aa32_bkpt(imm16, false),
8384 default_exception_el(s));
37e6456e
PM
8385 break;
8386 case 2:
8387 /* Hypervisor call (v7) */
8388 ARCH(7);
8389 if (IS_USER(s)) {
8390 goto illegal_op;
8391 }
8392 gen_hvc(s, imm16);
8393 break;
8394 case 3:
8395 /* Secure monitor call (v6+) */
8396 ARCH(6K);
8397 if (IS_USER(s)) {
8398 goto illegal_op;
8399 }
8400 gen_smc(s);
8401 break;
8402 default:
19a6e31c 8403 g_assert_not_reached();
49e14940 8404 }
9ee6e8bb 8405 break;
d4a2dc67 8406 }
9ee6e8bb
PB
8407 case 0x8: /* signed multiply */
8408 case 0xa:
8409 case 0xc:
8410 case 0xe:
be5e7a76 8411 ARCH(5TE);
9ee6e8bb
PB
8412 rs = (insn >> 8) & 0xf;
8413 rn = (insn >> 12) & 0xf;
8414 rd = (insn >> 16) & 0xf;
8415 if (op1 == 1) {
8416 /* (32 * 16) >> 16 */
5e3f878a
PB
8417 tmp = load_reg(s, rm);
8418 tmp2 = load_reg(s, rs);
9ee6e8bb 8419 if (sh & 4)
5e3f878a 8420 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8421 else
5e3f878a 8422 gen_sxth(tmp2);
a7812ae4
PB
8423 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8424 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8425 tmp = tcg_temp_new_i32();
ecc7b3aa 8426 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 8427 tcg_temp_free_i64(tmp64);
9ee6e8bb 8428 if ((sh & 2) == 0) {
5e3f878a 8429 tmp2 = load_reg(s, rn);
9ef39277 8430 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8431 tcg_temp_free_i32(tmp2);
9ee6e8bb 8432 }
5e3f878a 8433 store_reg(s, rd, tmp);
9ee6e8bb
PB
8434 } else {
8435 /* 16 * 16 */
5e3f878a
PB
8436 tmp = load_reg(s, rm);
8437 tmp2 = load_reg(s, rs);
8438 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8439 tcg_temp_free_i32(tmp2);
9ee6e8bb 8440 if (op1 == 2) {
a7812ae4
PB
8441 tmp64 = tcg_temp_new_i64();
8442 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8443 tcg_temp_free_i32(tmp);
a7812ae4
PB
8444 gen_addq(s, tmp64, rn, rd);
8445 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8446 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8447 } else {
8448 if (op1 == 0) {
5e3f878a 8449 tmp2 = load_reg(s, rn);
9ef39277 8450 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8451 tcg_temp_free_i32(tmp2);
9ee6e8bb 8452 }
5e3f878a 8453 store_reg(s, rd, tmp);
9ee6e8bb
PB
8454 }
8455 }
8456 break;
8457 default:
8458 goto illegal_op;
8459 }
8460 } else if (((insn & 0x0e000000) == 0 &&
8461 (insn & 0x00000090) != 0x90) ||
8462 ((insn & 0x0e000000) == (1 << 25))) {
8463 int set_cc, logic_cc, shiftop;
8464
8465 op1 = (insn >> 21) & 0xf;
8466 set_cc = (insn >> 20) & 1;
8467 logic_cc = table_logic_cc[op1] & set_cc;
8468
8469 /* data processing instruction */
8470 if (insn & (1 << 25)) {
8471 /* immediate operand */
8472 val = insn & 0xff;
8473 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8474 if (shift) {
9ee6e8bb 8475 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8476 }
7d1b0095 8477 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8478 tcg_gen_movi_i32(tmp2, val);
8479 if (logic_cc && shift) {
8480 gen_set_CF_bit31(tmp2);
8481 }
9ee6e8bb
PB
8482 } else {
8483 /* register */
8484 rm = (insn) & 0xf;
e9bb4aa9 8485 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8486 shiftop = (insn >> 5) & 3;
8487 if (!(insn & (1 << 4))) {
8488 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8489 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8490 } else {
8491 rs = (insn >> 8) & 0xf;
8984bd2e 8492 tmp = load_reg(s, rs);
e9bb4aa9 8493 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8494 }
8495 }
8496 if (op1 != 0x0f && op1 != 0x0d) {
8497 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8498 tmp = load_reg(s, rn);
8499 } else {
39d5492a 8500 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8501 }
8502 rd = (insn >> 12) & 0xf;
8503 switch(op1) {
8504 case 0x00:
e9bb4aa9
JR
8505 tcg_gen_and_i32(tmp, tmp, tmp2);
8506 if (logic_cc) {
8507 gen_logic_CC(tmp);
8508 }
7dcc1f89 8509 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8510 break;
8511 case 0x01:
e9bb4aa9
JR
8512 tcg_gen_xor_i32(tmp, tmp, tmp2);
8513 if (logic_cc) {
8514 gen_logic_CC(tmp);
8515 }
7dcc1f89 8516 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8517 break;
8518 case 0x02:
8519 if (set_cc && rd == 15) {
8520 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8521 if (IS_USER(s)) {
9ee6e8bb 8522 goto illegal_op;
e9bb4aa9 8523 }
72485ec4 8524 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8525 gen_exception_return(s, tmp);
9ee6e8bb 8526 } else {
e9bb4aa9 8527 if (set_cc) {
72485ec4 8528 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8529 } else {
8530 tcg_gen_sub_i32(tmp, tmp, tmp2);
8531 }
7dcc1f89 8532 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8533 }
8534 break;
8535 case 0x03:
e9bb4aa9 8536 if (set_cc) {
72485ec4 8537 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8538 } else {
8539 tcg_gen_sub_i32(tmp, tmp2, tmp);
8540 }
7dcc1f89 8541 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8542 break;
8543 case 0x04:
e9bb4aa9 8544 if (set_cc) {
72485ec4 8545 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8546 } else {
8547 tcg_gen_add_i32(tmp, tmp, tmp2);
8548 }
7dcc1f89 8549 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8550 break;
8551 case 0x05:
e9bb4aa9 8552 if (set_cc) {
49b4c31e 8553 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8554 } else {
8555 gen_add_carry(tmp, tmp, tmp2);
8556 }
7dcc1f89 8557 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8558 break;
8559 case 0x06:
e9bb4aa9 8560 if (set_cc) {
2de68a49 8561 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8562 } else {
8563 gen_sub_carry(tmp, tmp, tmp2);
8564 }
7dcc1f89 8565 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8566 break;
8567 case 0x07:
e9bb4aa9 8568 if (set_cc) {
2de68a49 8569 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8570 } else {
8571 gen_sub_carry(tmp, tmp2, tmp);
8572 }
7dcc1f89 8573 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8574 break;
8575 case 0x08:
8576 if (set_cc) {
e9bb4aa9
JR
8577 tcg_gen_and_i32(tmp, tmp, tmp2);
8578 gen_logic_CC(tmp);
9ee6e8bb 8579 }
7d1b0095 8580 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8581 break;
8582 case 0x09:
8583 if (set_cc) {
e9bb4aa9
JR
8584 tcg_gen_xor_i32(tmp, tmp, tmp2);
8585 gen_logic_CC(tmp);
9ee6e8bb 8586 }
7d1b0095 8587 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8588 break;
8589 case 0x0a:
8590 if (set_cc) {
72485ec4 8591 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8592 }
7d1b0095 8593 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8594 break;
8595 case 0x0b:
8596 if (set_cc) {
72485ec4 8597 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8598 }
7d1b0095 8599 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8600 break;
8601 case 0x0c:
e9bb4aa9
JR
8602 tcg_gen_or_i32(tmp, tmp, tmp2);
8603 if (logic_cc) {
8604 gen_logic_CC(tmp);
8605 }
7dcc1f89 8606 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8607 break;
8608 case 0x0d:
8609 if (logic_cc && rd == 15) {
8610 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8611 if (IS_USER(s)) {
9ee6e8bb 8612 goto illegal_op;
e9bb4aa9
JR
8613 }
8614 gen_exception_return(s, tmp2);
9ee6e8bb 8615 } else {
e9bb4aa9
JR
8616 if (logic_cc) {
8617 gen_logic_CC(tmp2);
8618 }
7dcc1f89 8619 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8620 }
8621 break;
8622 case 0x0e:
f669df27 8623 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8624 if (logic_cc) {
8625 gen_logic_CC(tmp);
8626 }
7dcc1f89 8627 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8628 break;
8629 default:
8630 case 0x0f:
e9bb4aa9
JR
8631 tcg_gen_not_i32(tmp2, tmp2);
8632 if (logic_cc) {
8633 gen_logic_CC(tmp2);
8634 }
7dcc1f89 8635 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8636 break;
8637 }
e9bb4aa9 8638 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8639 tcg_temp_free_i32(tmp2);
e9bb4aa9 8640 }
9ee6e8bb
PB
8641 } else {
8642 /* other instructions */
8643 op1 = (insn >> 24) & 0xf;
8644 switch(op1) {
8645 case 0x0:
8646 case 0x1:
8647 /* multiplies, extra load/stores */
8648 sh = (insn >> 5) & 3;
8649 if (sh == 0) {
8650 if (op1 == 0x0) {
8651 rd = (insn >> 16) & 0xf;
8652 rn = (insn >> 12) & 0xf;
8653 rs = (insn >> 8) & 0xf;
8654 rm = (insn) & 0xf;
8655 op1 = (insn >> 20) & 0xf;
8656 switch (op1) {
8657 case 0: case 1: case 2: case 3: case 6:
8658 /* 32 bit mul */
5e3f878a
PB
8659 tmp = load_reg(s, rs);
8660 tmp2 = load_reg(s, rm);
8661 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8662 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8663 if (insn & (1 << 22)) {
8664 /* Subtract (mls) */
8665 ARCH(6T2);
5e3f878a
PB
8666 tmp2 = load_reg(s, rn);
8667 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8668 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8669 } else if (insn & (1 << 21)) {
8670 /* Add */
5e3f878a
PB
8671 tmp2 = load_reg(s, rn);
8672 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8673 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8674 }
8675 if (insn & (1 << 20))
5e3f878a
PB
8676 gen_logic_CC(tmp);
8677 store_reg(s, rd, tmp);
9ee6e8bb 8678 break;
8aac08b1
AJ
8679 case 4:
8680 /* 64 bit mul double accumulate (UMAAL) */
8681 ARCH(6);
8682 tmp = load_reg(s, rs);
8683 tmp2 = load_reg(s, rm);
8684 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8685 gen_addq_lo(s, tmp64, rn);
8686 gen_addq_lo(s, tmp64, rd);
8687 gen_storeq_reg(s, rn, rd, tmp64);
8688 tcg_temp_free_i64(tmp64);
8689 break;
8690 case 8: case 9: case 10: case 11:
8691 case 12: case 13: case 14: case 15:
8692 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8693 tmp = load_reg(s, rs);
8694 tmp2 = load_reg(s, rm);
8aac08b1 8695 if (insn & (1 << 22)) {
c9f10124 8696 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8697 } else {
c9f10124 8698 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8699 }
8700 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8701 TCGv_i32 al = load_reg(s, rn);
8702 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8703 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8704 tcg_temp_free_i32(al);
8705 tcg_temp_free_i32(ah);
9ee6e8bb 8706 }
8aac08b1 8707 if (insn & (1 << 20)) {
c9f10124 8708 gen_logicq_cc(tmp, tmp2);
8aac08b1 8709 }
c9f10124
RH
8710 store_reg(s, rn, tmp);
8711 store_reg(s, rd, tmp2);
9ee6e8bb 8712 break;
8aac08b1
AJ
8713 default:
8714 goto illegal_op;
9ee6e8bb
PB
8715 }
8716 } else {
8717 rn = (insn >> 16) & 0xf;
8718 rd = (insn >> 12) & 0xf;
8719 if (insn & (1 << 23)) {
8720 /* load/store exclusive */
2359bf80 8721 int op2 = (insn >> 8) & 3;
86753403 8722 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8723
8724 switch (op2) {
8725 case 0: /* lda/stl */
8726 if (op1 == 1) {
8727 goto illegal_op;
8728 }
8729 ARCH(8);
8730 break;
8731 case 1: /* reserved */
8732 goto illegal_op;
8733 case 2: /* ldaex/stlex */
8734 ARCH(8);
8735 break;
8736 case 3: /* ldrex/strex */
8737 if (op1) {
8738 ARCH(6K);
8739 } else {
8740 ARCH(6);
8741 }
8742 break;
8743 }
8744
3174f8e9 8745 addr = tcg_temp_local_new_i32();
98a46317 8746 load_reg_var(s, addr, rn);
2359bf80
MR
8747
8748 /* Since the emulation does not have barriers,
8749 the acquire/release semantics need no special
8750 handling */
8751 if (op2 == 0) {
8752 if (insn & (1 << 20)) {
8753 tmp = tcg_temp_new_i32();
8754 switch (op1) {
8755 case 0: /* lda */
9bb6558a
PM
8756 gen_aa32_ld32u_iss(s, tmp, addr,
8757 get_mem_index(s),
8758 rd | ISSIsAcqRel);
2359bf80
MR
8759 break;
8760 case 2: /* ldab */
9bb6558a
PM
8761 gen_aa32_ld8u_iss(s, tmp, addr,
8762 get_mem_index(s),
8763 rd | ISSIsAcqRel);
2359bf80
MR
8764 break;
8765 case 3: /* ldah */
9bb6558a
PM
8766 gen_aa32_ld16u_iss(s, tmp, addr,
8767 get_mem_index(s),
8768 rd | ISSIsAcqRel);
2359bf80
MR
8769 break;
8770 default:
8771 abort();
8772 }
8773 store_reg(s, rd, tmp);
8774 } else {
8775 rm = insn & 0xf;
8776 tmp = load_reg(s, rm);
8777 switch (op1) {
8778 case 0: /* stl */
9bb6558a
PM
8779 gen_aa32_st32_iss(s, tmp, addr,
8780 get_mem_index(s),
8781 rm | ISSIsAcqRel);
2359bf80
MR
8782 break;
8783 case 2: /* stlb */
9bb6558a
PM
8784 gen_aa32_st8_iss(s, tmp, addr,
8785 get_mem_index(s),
8786 rm | ISSIsAcqRel);
2359bf80
MR
8787 break;
8788 case 3: /* stlh */
9bb6558a
PM
8789 gen_aa32_st16_iss(s, tmp, addr,
8790 get_mem_index(s),
8791 rm | ISSIsAcqRel);
2359bf80
MR
8792 break;
8793 default:
8794 abort();
8795 }
8796 tcg_temp_free_i32(tmp);
8797 }
8798 } else if (insn & (1 << 20)) {
86753403
PB
8799 switch (op1) {
8800 case 0: /* ldrex */
426f5abc 8801 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8802 break;
8803 case 1: /* ldrexd */
426f5abc 8804 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8805 break;
8806 case 2: /* ldrexb */
426f5abc 8807 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8808 break;
8809 case 3: /* ldrexh */
426f5abc 8810 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8811 break;
8812 default:
8813 abort();
8814 }
9ee6e8bb
PB
8815 } else {
8816 rm = insn & 0xf;
86753403
PB
8817 switch (op1) {
8818 case 0: /* strex */
426f5abc 8819 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8820 break;
8821 case 1: /* strexd */
502e64fe 8822 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8823 break;
8824 case 2: /* strexb */
426f5abc 8825 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8826 break;
8827 case 3: /* strexh */
426f5abc 8828 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8829 break;
8830 default:
8831 abort();
8832 }
9ee6e8bb 8833 }
39d5492a 8834 tcg_temp_free_i32(addr);
9ee6e8bb 8835 } else {
cf12bce0
EC
8836 TCGv taddr;
8837 TCGMemOp opc = s->be_data;
8838
9ee6e8bb
PB
8839 /* SWP instruction */
8840 rm = (insn) & 0xf;
8841
9ee6e8bb 8842 if (insn & (1 << 22)) {
cf12bce0 8843 opc |= MO_UB;
9ee6e8bb 8844 } else {
cf12bce0 8845 opc |= MO_UL | MO_ALIGN;
9ee6e8bb 8846 }
cf12bce0
EC
8847
8848 addr = load_reg(s, rn);
8849 taddr = gen_aa32_addr(s, addr, opc);
7d1b0095 8850 tcg_temp_free_i32(addr);
cf12bce0
EC
8851
8852 tmp = load_reg(s, rm);
8853 tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
8854 get_mem_index(s), opc);
8855 tcg_temp_free(taddr);
8856 store_reg(s, rd, tmp);
9ee6e8bb
PB
8857 }
8858 }
8859 } else {
8860 int address_offset;
3960c336 8861 bool load = insn & (1 << 20);
63f26fcf
PM
8862 bool wbit = insn & (1 << 21);
8863 bool pbit = insn & (1 << 24);
3960c336 8864 bool doubleword = false;
9bb6558a
PM
8865 ISSInfo issinfo;
8866
9ee6e8bb
PB
8867 /* Misc load/store */
8868 rn = (insn >> 16) & 0xf;
8869 rd = (insn >> 12) & 0xf;
3960c336 8870
9bb6558a
PM
8871 /* ISS not valid if writeback */
8872 issinfo = (pbit & !wbit) ? rd : ISSInvalid;
8873
3960c336
PM
8874 if (!load && (sh & 2)) {
8875 /* doubleword */
8876 ARCH(5TE);
8877 if (rd & 1) {
8878 /* UNPREDICTABLE; we choose to UNDEF */
8879 goto illegal_op;
8880 }
8881 load = (sh & 1) == 0;
8882 doubleword = true;
8883 }
8884
b0109805 8885 addr = load_reg(s, rn);
63f26fcf 8886 if (pbit) {
b0109805 8887 gen_add_datah_offset(s, insn, 0, addr);
63f26fcf 8888 }
9ee6e8bb 8889 address_offset = 0;
3960c336
PM
8890
8891 if (doubleword) {
8892 if (!load) {
9ee6e8bb 8893 /* store */
b0109805 8894 tmp = load_reg(s, rd);
12dcc321 8895 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8896 tcg_temp_free_i32(tmp);
b0109805
PB
8897 tcg_gen_addi_i32(addr, addr, 4);
8898 tmp = load_reg(s, rd + 1);
12dcc321 8899 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 8900 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8901 } else {
8902 /* load */
5a839c0d 8903 tmp = tcg_temp_new_i32();
12dcc321 8904 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
8905 store_reg(s, rd, tmp);
8906 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8907 tmp = tcg_temp_new_i32();
12dcc321 8908 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 8909 rd++;
9ee6e8bb
PB
8910 }
8911 address_offset = -4;
3960c336
PM
8912 } else if (load) {
8913 /* load */
8914 tmp = tcg_temp_new_i32();
8915 switch (sh) {
8916 case 1:
9bb6558a
PM
8917 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
8918 issinfo);
3960c336
PM
8919 break;
8920 case 2:
9bb6558a
PM
8921 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s),
8922 issinfo);
3960c336
PM
8923 break;
8924 default:
8925 case 3:
9bb6558a
PM
8926 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s),
8927 issinfo);
3960c336
PM
8928 break;
8929 }
9ee6e8bb
PB
8930 } else {
8931 /* store */
b0109805 8932 tmp = load_reg(s, rd);
9bb6558a 8933 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), issinfo);
5a839c0d 8934 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8935 }
8936 /* Perform base writeback before the loaded value to
8937 ensure correct behavior with overlapping index registers.
b6af0975 8938 ldrd with base writeback is undefined if the
9ee6e8bb 8939 destination and index registers overlap. */
63f26fcf 8940 if (!pbit) {
b0109805
PB
8941 gen_add_datah_offset(s, insn, address_offset, addr);
8942 store_reg(s, rn, addr);
63f26fcf 8943 } else if (wbit) {
9ee6e8bb 8944 if (address_offset)
b0109805
PB
8945 tcg_gen_addi_i32(addr, addr, address_offset);
8946 store_reg(s, rn, addr);
8947 } else {
7d1b0095 8948 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8949 }
8950 if (load) {
8951 /* Complete the load. */
b0109805 8952 store_reg(s, rd, tmp);
9ee6e8bb
PB
8953 }
8954 }
8955 break;
8956 case 0x4:
8957 case 0x5:
8958 goto do_ldst;
8959 case 0x6:
8960 case 0x7:
8961 if (insn & (1 << 4)) {
8962 ARCH(6);
8963 /* Armv6 Media instructions. */
8964 rm = insn & 0xf;
8965 rn = (insn >> 16) & 0xf;
2c0262af 8966 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8967 rs = (insn >> 8) & 0xf;
8968 switch ((insn >> 23) & 3) {
8969 case 0: /* Parallel add/subtract. */
8970 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8971 tmp = load_reg(s, rn);
8972 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8973 sh = (insn >> 5) & 7;
8974 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8975 goto illegal_op;
6ddbc6e4 8976 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8977 tcg_temp_free_i32(tmp2);
6ddbc6e4 8978 store_reg(s, rd, tmp);
9ee6e8bb
PB
8979 break;
8980 case 1:
8981 if ((insn & 0x00700020) == 0) {
6c95676b 8982 /* Halfword pack. */
3670669c
PB
8983 tmp = load_reg(s, rn);
8984 tmp2 = load_reg(s, rm);
9ee6e8bb 8985 shift = (insn >> 7) & 0x1f;
3670669c
PB
8986 if (insn & (1 << 6)) {
8987 /* pkhtb */
22478e79
AZ
8988 if (shift == 0)
8989 shift = 31;
8990 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8991 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8992 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8993 } else {
8994 /* pkhbt */
22478e79
AZ
8995 if (shift)
8996 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8997 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8998 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8999 }
9000 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9001 tcg_temp_free_i32(tmp2);
3670669c 9002 store_reg(s, rd, tmp);
9ee6e8bb
PB
9003 } else if ((insn & 0x00200020) == 0x00200000) {
9004 /* [us]sat */
6ddbc6e4 9005 tmp = load_reg(s, rm);
9ee6e8bb
PB
9006 shift = (insn >> 7) & 0x1f;
9007 if (insn & (1 << 6)) {
9008 if (shift == 0)
9009 shift = 31;
6ddbc6e4 9010 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9011 } else {
6ddbc6e4 9012 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
9013 }
9014 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9015 tmp2 = tcg_const_i32(sh);
9016 if (insn & (1 << 22))
9ef39277 9017 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 9018 else
9ef39277 9019 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 9020 tcg_temp_free_i32(tmp2);
6ddbc6e4 9021 store_reg(s, rd, tmp);
9ee6e8bb
PB
9022 } else if ((insn & 0x00300fe0) == 0x00200f20) {
9023 /* [us]sat16 */
6ddbc6e4 9024 tmp = load_reg(s, rm);
9ee6e8bb 9025 sh = (insn >> 16) & 0x1f;
40d3c433
CL
9026 tmp2 = tcg_const_i32(sh);
9027 if (insn & (1 << 22))
9ef39277 9028 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9029 else
9ef39277 9030 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 9031 tcg_temp_free_i32(tmp2);
6ddbc6e4 9032 store_reg(s, rd, tmp);
9ee6e8bb
PB
9033 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
9034 /* Select bytes. */
6ddbc6e4
PB
9035 tmp = load_reg(s, rn);
9036 tmp2 = load_reg(s, rm);
7d1b0095 9037 tmp3 = tcg_temp_new_i32();
0ecb72a5 9038 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 9039 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9040 tcg_temp_free_i32(tmp3);
9041 tcg_temp_free_i32(tmp2);
6ddbc6e4 9042 store_reg(s, rd, tmp);
9ee6e8bb 9043 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 9044 tmp = load_reg(s, rm);
9ee6e8bb 9045 shift = (insn >> 10) & 3;
1301f322 9046 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9047 rotate, a shift is sufficient. */
9048 if (shift != 0)
f669df27 9049 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9050 op1 = (insn >> 20) & 7;
9051 switch (op1) {
5e3f878a
PB
9052 case 0: gen_sxtb16(tmp); break;
9053 case 2: gen_sxtb(tmp); break;
9054 case 3: gen_sxth(tmp); break;
9055 case 4: gen_uxtb16(tmp); break;
9056 case 6: gen_uxtb(tmp); break;
9057 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
9058 default: goto illegal_op;
9059 }
9060 if (rn != 15) {
5e3f878a 9061 tmp2 = load_reg(s, rn);
9ee6e8bb 9062 if ((op1 & 3) == 0) {
5e3f878a 9063 gen_add16(tmp, tmp2);
9ee6e8bb 9064 } else {
5e3f878a 9065 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9066 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9067 }
9068 }
6c95676b 9069 store_reg(s, rd, tmp);
9ee6e8bb
PB
9070 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
9071 /* rev */
b0109805 9072 tmp = load_reg(s, rm);
9ee6e8bb
PB
9073 if (insn & (1 << 22)) {
9074 if (insn & (1 << 7)) {
b0109805 9075 gen_revsh(tmp);
9ee6e8bb
PB
9076 } else {
9077 ARCH(6T2);
b0109805 9078 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9079 }
9080 } else {
9081 if (insn & (1 << 7))
b0109805 9082 gen_rev16(tmp);
9ee6e8bb 9083 else
66896cb8 9084 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 9085 }
b0109805 9086 store_reg(s, rd, tmp);
9ee6e8bb
PB
9087 } else {
9088 goto illegal_op;
9089 }
9090 break;
9091 case 2: /* Multiplies (Type 3). */
41e9564d
PM
9092 switch ((insn >> 20) & 0x7) {
9093 case 5:
9094 if (((insn >> 6) ^ (insn >> 7)) & 1) {
9095 /* op2 not 00x or 11x : UNDEF */
9096 goto illegal_op;
9097 }
838fa72d
AJ
9098 /* Signed multiply most significant [accumulate].
9099 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
9100 tmp = load_reg(s, rm);
9101 tmp2 = load_reg(s, rs);
a7812ae4 9102 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 9103
955a7dd5 9104 if (rd != 15) {
838fa72d 9105 tmp = load_reg(s, rd);
9ee6e8bb 9106 if (insn & (1 << 6)) {
838fa72d 9107 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 9108 } else {
838fa72d 9109 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
9110 }
9111 }
838fa72d
AJ
9112 if (insn & (1 << 5)) {
9113 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9114 }
9115 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9116 tmp = tcg_temp_new_i32();
ecc7b3aa 9117 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 9118 tcg_temp_free_i64(tmp64);
955a7dd5 9119 store_reg(s, rn, tmp);
41e9564d
PM
9120 break;
9121 case 0:
9122 case 4:
9123 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
9124 if (insn & (1 << 7)) {
9125 goto illegal_op;
9126 }
9127 tmp = load_reg(s, rm);
9128 tmp2 = load_reg(s, rs);
9ee6e8bb 9129 if (insn & (1 << 5))
5e3f878a
PB
9130 gen_swap_half(tmp2);
9131 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9132 if (insn & (1 << 22)) {
5e3f878a 9133 /* smlald, smlsld */
33bbd75a
PC
9134 TCGv_i64 tmp64_2;
9135
a7812ae4 9136 tmp64 = tcg_temp_new_i64();
33bbd75a 9137 tmp64_2 = tcg_temp_new_i64();
a7812ae4 9138 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 9139 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 9140 tcg_temp_free_i32(tmp);
33bbd75a
PC
9141 tcg_temp_free_i32(tmp2);
9142 if (insn & (1 << 6)) {
9143 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
9144 } else {
9145 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
9146 }
9147 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
9148 gen_addq(s, tmp64, rd, rn);
9149 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 9150 tcg_temp_free_i64(tmp64);
9ee6e8bb 9151 } else {
5e3f878a 9152 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
9153 if (insn & (1 << 6)) {
9154 /* This subtraction cannot overflow. */
9155 tcg_gen_sub_i32(tmp, tmp, tmp2);
9156 } else {
9157 /* This addition cannot overflow 32 bits;
9158 * however it may overflow considered as a
9159 * signed operation, in which case we must set
9160 * the Q flag.
9161 */
9162 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9163 }
9164 tcg_temp_free_i32(tmp2);
22478e79 9165 if (rd != 15)
9ee6e8bb 9166 {
22478e79 9167 tmp2 = load_reg(s, rd);
9ef39277 9168 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9169 tcg_temp_free_i32(tmp2);
9ee6e8bb 9170 }
22478e79 9171 store_reg(s, rn, tmp);
9ee6e8bb 9172 }
41e9564d 9173 break;
b8b8ea05
PM
9174 case 1:
9175 case 3:
9176 /* SDIV, UDIV */
d614a513 9177 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
9178 goto illegal_op;
9179 }
9180 if (((insn >> 5) & 7) || (rd != 15)) {
9181 goto illegal_op;
9182 }
9183 tmp = load_reg(s, rm);
9184 tmp2 = load_reg(s, rs);
9185 if (insn & (1 << 21)) {
9186 gen_helper_udiv(tmp, tmp, tmp2);
9187 } else {
9188 gen_helper_sdiv(tmp, tmp, tmp2);
9189 }
9190 tcg_temp_free_i32(tmp2);
9191 store_reg(s, rn, tmp);
9192 break;
41e9564d
PM
9193 default:
9194 goto illegal_op;
9ee6e8bb
PB
9195 }
9196 break;
9197 case 3:
9198 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
9199 switch (op1) {
9200 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
9201 ARCH(6);
9202 tmp = load_reg(s, rm);
9203 tmp2 = load_reg(s, rs);
9204 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9205 tcg_temp_free_i32(tmp2);
ded9d295
AZ
9206 if (rd != 15) {
9207 tmp2 = load_reg(s, rd);
6ddbc6e4 9208 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9209 tcg_temp_free_i32(tmp2);
9ee6e8bb 9210 }
ded9d295 9211 store_reg(s, rn, tmp);
9ee6e8bb
PB
9212 break;
9213 case 0x20: case 0x24: case 0x28: case 0x2c:
9214 /* Bitfield insert/clear. */
9215 ARCH(6T2);
9216 shift = (insn >> 7) & 0x1f;
9217 i = (insn >> 16) & 0x1f;
45140a57
KB
9218 if (i < shift) {
9219 /* UNPREDICTABLE; we choose to UNDEF */
9220 goto illegal_op;
9221 }
9ee6e8bb
PB
9222 i = i + 1 - shift;
9223 if (rm == 15) {
7d1b0095 9224 tmp = tcg_temp_new_i32();
5e3f878a 9225 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 9226 } else {
5e3f878a 9227 tmp = load_reg(s, rm);
9ee6e8bb
PB
9228 }
9229 if (i != 32) {
5e3f878a 9230 tmp2 = load_reg(s, rd);
d593c48e 9231 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 9232 tcg_temp_free_i32(tmp2);
9ee6e8bb 9233 }
5e3f878a 9234 store_reg(s, rd, tmp);
9ee6e8bb
PB
9235 break;
9236 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
9237 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 9238 ARCH(6T2);
5e3f878a 9239 tmp = load_reg(s, rm);
9ee6e8bb
PB
9240 shift = (insn >> 7) & 0x1f;
9241 i = ((insn >> 16) & 0x1f) + 1;
9242 if (shift + i > 32)
9243 goto illegal_op;
9244 if (i < 32) {
9245 if (op1 & 0x20) {
59a71b4c 9246 tcg_gen_extract_i32(tmp, tmp, shift, i);
9ee6e8bb 9247 } else {
59a71b4c 9248 tcg_gen_sextract_i32(tmp, tmp, shift, i);
9ee6e8bb
PB
9249 }
9250 }
5e3f878a 9251 store_reg(s, rd, tmp);
9ee6e8bb
PB
9252 break;
9253 default:
9254 goto illegal_op;
9255 }
9256 break;
9257 }
9258 break;
9259 }
9260 do_ldst:
9261 /* Check for undefined extension instructions
9262 * per the ARM Bible IE:
9263 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
9264 */
9265 sh = (0xf << 20) | (0xf << 4);
9266 if (op1 == 0x7 && ((insn & sh) == sh))
9267 {
9268 goto illegal_op;
9269 }
9270 /* load/store byte/word */
9271 rn = (insn >> 16) & 0xf;
9272 rd = (insn >> 12) & 0xf;
b0109805 9273 tmp2 = load_reg(s, rn);
a99caa48
PM
9274 if ((insn & 0x01200000) == 0x00200000) {
9275 /* ldrt/strt */
579d21cc 9276 i = get_a32_user_mem_index(s);
a99caa48
PM
9277 } else {
9278 i = get_mem_index(s);
9279 }
9ee6e8bb 9280 if (insn & (1 << 24))
b0109805 9281 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
9282 if (insn & (1 << 20)) {
9283 /* load */
5a839c0d 9284 tmp = tcg_temp_new_i32();
9ee6e8bb 9285 if (insn & (1 << 22)) {
9bb6558a 9286 gen_aa32_ld8u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9287 } else {
9bb6558a 9288 gen_aa32_ld32u_iss(s, tmp, tmp2, i, rd);
9ee6e8bb 9289 }
9ee6e8bb
PB
9290 } else {
9291 /* store */
b0109805 9292 tmp = load_reg(s, rd);
5a839c0d 9293 if (insn & (1 << 22)) {
9bb6558a 9294 gen_aa32_st8_iss(s, tmp, tmp2, i, rd);
5a839c0d 9295 } else {
9bb6558a 9296 gen_aa32_st32_iss(s, tmp, tmp2, i, rd);
5a839c0d
PM
9297 }
9298 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9299 }
9300 if (!(insn & (1 << 24))) {
b0109805
PB
9301 gen_add_data_offset(s, insn, tmp2);
9302 store_reg(s, rn, tmp2);
9303 } else if (insn & (1 << 21)) {
9304 store_reg(s, rn, tmp2);
9305 } else {
7d1b0095 9306 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9307 }
9308 if (insn & (1 << 20)) {
9309 /* Complete the load. */
7dcc1f89 9310 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
9311 }
9312 break;
9313 case 0x08:
9314 case 0x09:
9315 {
da3e53dd
PM
9316 int j, n, loaded_base;
9317 bool exc_return = false;
9318 bool is_load = extract32(insn, 20, 1);
9319 bool user = false;
39d5492a 9320 TCGv_i32 loaded_var;
9ee6e8bb
PB
9321 /* load/store multiple words */
9322 /* XXX: store correct base if write back */
9ee6e8bb 9323 if (insn & (1 << 22)) {
da3e53dd 9324 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
9325 if (IS_USER(s))
9326 goto illegal_op; /* only usable in supervisor mode */
9327
da3e53dd
PM
9328 if (is_load && extract32(insn, 15, 1)) {
9329 exc_return = true;
9330 } else {
9331 user = true;
9332 }
9ee6e8bb
PB
9333 }
9334 rn = (insn >> 16) & 0xf;
b0109805 9335 addr = load_reg(s, rn);
9ee6e8bb
PB
9336
9337 /* compute total size */
9338 loaded_base = 0;
39d5492a 9339 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9340 n = 0;
9341 for(i=0;i<16;i++) {
9342 if (insn & (1 << i))
9343 n++;
9344 }
9345 /* XXX: test invalid n == 0 case ? */
9346 if (insn & (1 << 23)) {
9347 if (insn & (1 << 24)) {
9348 /* pre increment */
b0109805 9349 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9350 } else {
9351 /* post increment */
9352 }
9353 } else {
9354 if (insn & (1 << 24)) {
9355 /* pre decrement */
b0109805 9356 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9357 } else {
9358 /* post decrement */
9359 if (n != 1)
b0109805 9360 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9361 }
9362 }
9363 j = 0;
9364 for(i=0;i<16;i++) {
9365 if (insn & (1 << i)) {
da3e53dd 9366 if (is_load) {
9ee6e8bb 9367 /* load */
5a839c0d 9368 tmp = tcg_temp_new_i32();
12dcc321 9369 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
be5e7a76 9370 if (user) {
b75263d6 9371 tmp2 = tcg_const_i32(i);
1ce94f81 9372 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 9373 tcg_temp_free_i32(tmp2);
7d1b0095 9374 tcg_temp_free_i32(tmp);
9ee6e8bb 9375 } else if (i == rn) {
b0109805 9376 loaded_var = tmp;
9ee6e8bb 9377 loaded_base = 1;
fb0e8e79
PM
9378 } else if (rn == 15 && exc_return) {
9379 store_pc_exc_ret(s, tmp);
9ee6e8bb 9380 } else {
7dcc1f89 9381 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
9382 }
9383 } else {
9384 /* store */
9385 if (i == 15) {
9386 /* special case: r15 = PC + 8 */
9387 val = (long)s->pc + 4;
7d1b0095 9388 tmp = tcg_temp_new_i32();
b0109805 9389 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 9390 } else if (user) {
7d1b0095 9391 tmp = tcg_temp_new_i32();
b75263d6 9392 tmp2 = tcg_const_i32(i);
9ef39277 9393 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 9394 tcg_temp_free_i32(tmp2);
9ee6e8bb 9395 } else {
b0109805 9396 tmp = load_reg(s, i);
9ee6e8bb 9397 }
12dcc321 9398 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
5a839c0d 9399 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9400 }
9401 j++;
9402 /* no need to add after the last transfer */
9403 if (j != n)
b0109805 9404 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9405 }
9406 }
9407 if (insn & (1 << 21)) {
9408 /* write back */
9409 if (insn & (1 << 23)) {
9410 if (insn & (1 << 24)) {
9411 /* pre increment */
9412 } else {
9413 /* post increment */
b0109805 9414 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
9415 }
9416 } else {
9417 if (insn & (1 << 24)) {
9418 /* pre decrement */
9419 if (n != 1)
b0109805 9420 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
9421 } else {
9422 /* post decrement */
b0109805 9423 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
9424 }
9425 }
b0109805
PB
9426 store_reg(s, rn, addr);
9427 } else {
7d1b0095 9428 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9429 }
9430 if (loaded_base) {
b0109805 9431 store_reg(s, rn, loaded_var);
9ee6e8bb 9432 }
da3e53dd 9433 if (exc_return) {
9ee6e8bb 9434 /* Restore CPSR from SPSR. */
d9ba4830 9435 tmp = load_cpu_field(spsr);
235ea1f5 9436 gen_helper_cpsr_write_eret(cpu_env, tmp);
7d1b0095 9437 tcg_temp_free_i32(tmp);
577bf808 9438 s->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
9439 }
9440 }
9441 break;
9442 case 0xa:
9443 case 0xb:
9444 {
9445 int32_t offset;
9446
9447 /* branch (and link) */
9448 val = (int32_t)s->pc;
9449 if (insn & (1 << 24)) {
7d1b0095 9450 tmp = tcg_temp_new_i32();
5e3f878a
PB
9451 tcg_gen_movi_i32(tmp, val);
9452 store_reg(s, 14, tmp);
9ee6e8bb 9453 }
534df156
PM
9454 offset = sextract32(insn << 2, 0, 26);
9455 val += offset + 4;
9ee6e8bb
PB
9456 gen_jmp(s, val);
9457 }
9458 break;
9459 case 0xc:
9460 case 0xd:
9461 case 0xe:
6a57f3eb
WN
9462 if (((insn >> 8) & 0xe) == 10) {
9463 /* VFP. */
7dcc1f89 9464 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9465 goto illegal_op;
9466 }
7dcc1f89 9467 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9468 /* Coprocessor. */
9ee6e8bb 9469 goto illegal_op;
6a57f3eb 9470 }
9ee6e8bb
PB
9471 break;
9472 case 0xf:
9473 /* swi */
eaed129d 9474 gen_set_pc_im(s, s->pc);
d4a2dc67 9475 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9476 s->is_jmp = DISAS_SWI;
9477 break;
9478 default:
9479 illegal_op:
73710361
GB
9480 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
9481 default_exception_el(s));
9ee6e8bb
PB
9482 break;
9483 }
9484 }
9485}
9486
9487/* Return true if this is a Thumb-2 logical op. */
9488static int
9489thumb2_logic_op(int op)
9490{
9491 return (op < 8);
9492}
9493
9494/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9495 then set condition code flags based on the result of the operation.
9496 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9497 to the high bit of T1.
9498 Returns zero if the opcode is valid. */
9499
9500static int
39d5492a
PM
9501gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9502 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9503{
9504 int logic_cc;
9505
9506 logic_cc = 0;
9507 switch (op) {
9508 case 0: /* and */
396e467c 9509 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9510 logic_cc = conds;
9511 break;
9512 case 1: /* bic */
f669df27 9513 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9514 logic_cc = conds;
9515 break;
9516 case 2: /* orr */
396e467c 9517 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9518 logic_cc = conds;
9519 break;
9520 case 3: /* orn */
29501f1b 9521 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9522 logic_cc = conds;
9523 break;
9524 case 4: /* eor */
396e467c 9525 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9526 logic_cc = conds;
9527 break;
9528 case 8: /* add */
9529 if (conds)
72485ec4 9530 gen_add_CC(t0, t0, t1);
9ee6e8bb 9531 else
396e467c 9532 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9533 break;
9534 case 10: /* adc */
9535 if (conds)
49b4c31e 9536 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9537 else
396e467c 9538 gen_adc(t0, t1);
9ee6e8bb
PB
9539 break;
9540 case 11: /* sbc */
2de68a49
RH
9541 if (conds) {
9542 gen_sbc_CC(t0, t0, t1);
9543 } else {
396e467c 9544 gen_sub_carry(t0, t0, t1);
2de68a49 9545 }
9ee6e8bb
PB
9546 break;
9547 case 13: /* sub */
9548 if (conds)
72485ec4 9549 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9550 else
396e467c 9551 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9552 break;
9553 case 14: /* rsb */
9554 if (conds)
72485ec4 9555 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9556 else
396e467c 9557 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9558 break;
9559 default: /* 5, 6, 7, 9, 12, 15. */
9560 return 1;
9561 }
9562 if (logic_cc) {
396e467c 9563 gen_logic_CC(t0);
9ee6e8bb 9564 if (shifter_out)
396e467c 9565 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9566 }
9567 return 0;
9568}
9569
9570/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9571 is not legal. */
0ecb72a5 9572static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9573{
b0109805 9574 uint32_t insn, imm, shift, offset;
9ee6e8bb 9575 uint32_t rd, rn, rm, rs;
39d5492a
PM
9576 TCGv_i32 tmp;
9577 TCGv_i32 tmp2;
9578 TCGv_i32 tmp3;
9579 TCGv_i32 addr;
a7812ae4 9580 TCGv_i64 tmp64;
9ee6e8bb
PB
9581 int op;
9582 int shiftop;
9583 int conds;
9584 int logic_cc;
9585
d614a513
PM
9586 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9587 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9588 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9589 16-bit instructions to get correct prefetch abort behavior. */
9590 insn = insn_hw1;
9591 if ((insn & (1 << 12)) == 0) {
be5e7a76 9592 ARCH(5);
9ee6e8bb
PB
9593 /* Second half of blx. */
9594 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9595 tmp = load_reg(s, 14);
9596 tcg_gen_addi_i32(tmp, tmp, offset);
9597 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9598
7d1b0095 9599 tmp2 = tcg_temp_new_i32();
b0109805 9600 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9601 store_reg(s, 14, tmp2);
9602 gen_bx(s, tmp);
9ee6e8bb
PB
9603 return 0;
9604 }
9605 if (insn & (1 << 11)) {
9606 /* Second half of bl. */
9607 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9608 tmp = load_reg(s, 14);
6a0d8a1d 9609 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9610
7d1b0095 9611 tmp2 = tcg_temp_new_i32();
b0109805 9612 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9613 store_reg(s, 14, tmp2);
9614 gen_bx(s, tmp);
9ee6e8bb
PB
9615 return 0;
9616 }
9617 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9618 /* Instruction spans a page boundary. Implement it as two
9619 16-bit instructions in case the second half causes an
9620 prefetch abort. */
9621 offset = ((int32_t)insn << 21) >> 9;
396e467c 9622 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9623 return 0;
9624 }
9625 /* Fall through to 32-bit decode. */
9626 }
9627
f9fd40eb 9628 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
9ee6e8bb
PB
9629 s->pc += 2;
9630 insn |= (uint32_t)insn_hw1 << 16;
9631
9632 if ((insn & 0xf800e800) != 0xf000e800) {
9633 ARCH(6T2);
9634 }
9635
9636 rn = (insn >> 16) & 0xf;
9637 rs = (insn >> 12) & 0xf;
9638 rd = (insn >> 8) & 0xf;
9639 rm = insn & 0xf;
9640 switch ((insn >> 25) & 0xf) {
9641 case 0: case 1: case 2: case 3:
9642 /* 16-bit instructions. Should never happen. */
9643 abort();
9644 case 4:
9645 if (insn & (1 << 22)) {
9646 /* Other load/store, table branch. */
9647 if (insn & 0x01200000) {
9648 /* Load/store doubleword. */
9649 if (rn == 15) {
7d1b0095 9650 addr = tcg_temp_new_i32();
b0109805 9651 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9652 } else {
b0109805 9653 addr = load_reg(s, rn);
9ee6e8bb
PB
9654 }
9655 offset = (insn & 0xff) * 4;
9656 if ((insn & (1 << 23)) == 0)
9657 offset = -offset;
9658 if (insn & (1 << 24)) {
b0109805 9659 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9660 offset = 0;
9661 }
9662 if (insn & (1 << 20)) {
9663 /* ldrd */
e2592fad 9664 tmp = tcg_temp_new_i32();
12dcc321 9665 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805
PB
9666 store_reg(s, rs, tmp);
9667 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9668 tmp = tcg_temp_new_i32();
12dcc321 9669 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9670 store_reg(s, rd, tmp);
9ee6e8bb
PB
9671 } else {
9672 /* strd */
b0109805 9673 tmp = load_reg(s, rs);
12dcc321 9674 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9675 tcg_temp_free_i32(tmp);
b0109805
PB
9676 tcg_gen_addi_i32(addr, addr, 4);
9677 tmp = load_reg(s, rd);
12dcc321 9678 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9679 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9680 }
9681 if (insn & (1 << 21)) {
9682 /* Base writeback. */
9683 if (rn == 15)
9684 goto illegal_op;
b0109805
PB
9685 tcg_gen_addi_i32(addr, addr, offset - 4);
9686 store_reg(s, rn, addr);
9687 } else {
7d1b0095 9688 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9689 }
9690 } else if ((insn & (1 << 23)) == 0) {
9691 /* Load/store exclusive word. */
39d5492a 9692 addr = tcg_temp_local_new_i32();
98a46317 9693 load_reg_var(s, addr, rn);
426f5abc 9694 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9695 if (insn & (1 << 20)) {
426f5abc 9696 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9697 } else {
426f5abc 9698 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9699 }
39d5492a 9700 tcg_temp_free_i32(addr);
2359bf80 9701 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9702 /* Table Branch. */
9703 if (rn == 15) {
7d1b0095 9704 addr = tcg_temp_new_i32();
b0109805 9705 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9706 } else {
b0109805 9707 addr = load_reg(s, rn);
9ee6e8bb 9708 }
b26eefb6 9709 tmp = load_reg(s, rm);
b0109805 9710 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9711 if (insn & (1 << 4)) {
9712 /* tbh */
b0109805 9713 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9714 tcg_temp_free_i32(tmp);
e2592fad 9715 tmp = tcg_temp_new_i32();
12dcc321 9716 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9717 } else { /* tbb */
7d1b0095 9718 tcg_temp_free_i32(tmp);
e2592fad 9719 tmp = tcg_temp_new_i32();
12dcc321 9720 gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9721 }
7d1b0095 9722 tcg_temp_free_i32(addr);
b0109805
PB
9723 tcg_gen_shli_i32(tmp, tmp, 1);
9724 tcg_gen_addi_i32(tmp, tmp, s->pc);
9725 store_reg(s, 15, tmp);
9ee6e8bb 9726 } else {
2359bf80 9727 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9728 op = (insn >> 4) & 0x3;
2359bf80
MR
9729 switch (op2) {
9730 case 0:
426f5abc 9731 goto illegal_op;
2359bf80
MR
9732 case 1:
9733 /* Load/store exclusive byte/halfword/doubleword */
9734 if (op == 2) {
9735 goto illegal_op;
9736 }
9737 ARCH(7);
9738 break;
9739 case 2:
9740 /* Load-acquire/store-release */
9741 if (op == 3) {
9742 goto illegal_op;
9743 }
9744 /* Fall through */
9745 case 3:
9746 /* Load-acquire/store-release exclusive */
9747 ARCH(8);
9748 break;
426f5abc 9749 }
39d5492a 9750 addr = tcg_temp_local_new_i32();
98a46317 9751 load_reg_var(s, addr, rn);
2359bf80
MR
9752 if (!(op2 & 1)) {
9753 if (insn & (1 << 20)) {
9754 tmp = tcg_temp_new_i32();
9755 switch (op) {
9756 case 0: /* ldab */
9bb6558a
PM
9757 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s),
9758 rs | ISSIsAcqRel);
2359bf80
MR
9759 break;
9760 case 1: /* ldah */
9bb6558a
PM
9761 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s),
9762 rs | ISSIsAcqRel);
2359bf80
MR
9763 break;
9764 case 2: /* lda */
9bb6558a
PM
9765 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
9766 rs | ISSIsAcqRel);
2359bf80
MR
9767 break;
9768 default:
9769 abort();
9770 }
9771 store_reg(s, rs, tmp);
9772 } else {
9773 tmp = load_reg(s, rs);
9774 switch (op) {
9775 case 0: /* stlb */
9bb6558a
PM
9776 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s),
9777 rs | ISSIsAcqRel);
2359bf80
MR
9778 break;
9779 case 1: /* stlh */
9bb6558a
PM
9780 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s),
9781 rs | ISSIsAcqRel);
2359bf80
MR
9782 break;
9783 case 2: /* stl */
9bb6558a
PM
9784 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s),
9785 rs | ISSIsAcqRel);
2359bf80
MR
9786 break;
9787 default:
9788 abort();
9789 }
9790 tcg_temp_free_i32(tmp);
9791 }
9792 } else if (insn & (1 << 20)) {
426f5abc 9793 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9794 } else {
426f5abc 9795 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9796 }
39d5492a 9797 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9798 }
9799 } else {
9800 /* Load/store multiple, RFE, SRS. */
9801 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9802 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9803 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9804 goto illegal_op;
00115976 9805 }
9ee6e8bb
PB
9806 if (insn & (1 << 20)) {
9807 /* rfe */
b0109805
PB
9808 addr = load_reg(s, rn);
9809 if ((insn & (1 << 24)) == 0)
9810 tcg_gen_addi_i32(addr, addr, -8);
9811 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9812 tmp = tcg_temp_new_i32();
12dcc321 9813 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 9814 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9815 tmp2 = tcg_temp_new_i32();
12dcc321 9816 gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9817 if (insn & (1 << 21)) {
9818 /* Base writeback. */
b0109805
PB
9819 if (insn & (1 << 24)) {
9820 tcg_gen_addi_i32(addr, addr, 4);
9821 } else {
9822 tcg_gen_addi_i32(addr, addr, -4);
9823 }
9824 store_reg(s, rn, addr);
9825 } else {
7d1b0095 9826 tcg_temp_free_i32(addr);
9ee6e8bb 9827 }
b0109805 9828 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9829 } else {
9830 /* srs */
81465888
PM
9831 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9832 insn & (1 << 21));
9ee6e8bb
PB
9833 }
9834 } else {
5856d44e 9835 int i, loaded_base = 0;
39d5492a 9836 TCGv_i32 loaded_var;
9ee6e8bb 9837 /* Load/store multiple. */
b0109805 9838 addr = load_reg(s, rn);
9ee6e8bb
PB
9839 offset = 0;
9840 for (i = 0; i < 16; i++) {
9841 if (insn & (1 << i))
9842 offset += 4;
9843 }
9844 if (insn & (1 << 24)) {
b0109805 9845 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9846 }
9847
39d5492a 9848 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9849 for (i = 0; i < 16; i++) {
9850 if ((insn & (1 << i)) == 0)
9851 continue;
9852 if (insn & (1 << 20)) {
9853 /* Load. */
e2592fad 9854 tmp = tcg_temp_new_i32();
12dcc321 9855 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
9ee6e8bb 9856 if (i == 15) {
b0109805 9857 gen_bx(s, tmp);
5856d44e
YO
9858 } else if (i == rn) {
9859 loaded_var = tmp;
9860 loaded_base = 1;
9ee6e8bb 9861 } else {
b0109805 9862 store_reg(s, i, tmp);
9ee6e8bb
PB
9863 }
9864 } else {
9865 /* Store. */
b0109805 9866 tmp = load_reg(s, i);
12dcc321 9867 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
e2592fad 9868 tcg_temp_free_i32(tmp);
9ee6e8bb 9869 }
b0109805 9870 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9871 }
5856d44e
YO
9872 if (loaded_base) {
9873 store_reg(s, rn, loaded_var);
9874 }
9ee6e8bb
PB
9875 if (insn & (1 << 21)) {
9876 /* Base register writeback. */
9877 if (insn & (1 << 24)) {
b0109805 9878 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9879 }
9880 /* Fault if writeback register is in register list. */
9881 if (insn & (1 << rn))
9882 goto illegal_op;
b0109805
PB
9883 store_reg(s, rn, addr);
9884 } else {
7d1b0095 9885 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9886 }
9887 }
9888 }
9889 break;
2af9ab77
JB
9890 case 5:
9891
9ee6e8bb 9892 op = (insn >> 21) & 0xf;
2af9ab77 9893 if (op == 6) {
62b44f05
AR
9894 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9895 goto illegal_op;
9896 }
2af9ab77
JB
9897 /* Halfword pack. */
9898 tmp = load_reg(s, rn);
9899 tmp2 = load_reg(s, rm);
9900 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9901 if (insn & (1 << 5)) {
9902 /* pkhtb */
9903 if (shift == 0)
9904 shift = 31;
9905 tcg_gen_sari_i32(tmp2, tmp2, shift);
9906 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9907 tcg_gen_ext16u_i32(tmp2, tmp2);
9908 } else {
9909 /* pkhbt */
9910 if (shift)
9911 tcg_gen_shli_i32(tmp2, tmp2, shift);
9912 tcg_gen_ext16u_i32(tmp, tmp);
9913 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9914 }
9915 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9916 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9917 store_reg(s, rd, tmp);
9918 } else {
2af9ab77
JB
9919 /* Data processing register constant shift. */
9920 if (rn == 15) {
7d1b0095 9921 tmp = tcg_temp_new_i32();
2af9ab77
JB
9922 tcg_gen_movi_i32(tmp, 0);
9923 } else {
9924 tmp = load_reg(s, rn);
9925 }
9926 tmp2 = load_reg(s, rm);
9927
9928 shiftop = (insn >> 4) & 3;
9929 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9930 conds = (insn & (1 << 20)) != 0;
9931 logic_cc = (conds && thumb2_logic_op(op));
9932 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9933 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9934 goto illegal_op;
7d1b0095 9935 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9936 if (rd != 15) {
9937 store_reg(s, rd, tmp);
9938 } else {
7d1b0095 9939 tcg_temp_free_i32(tmp);
2af9ab77 9940 }
3174f8e9 9941 }
9ee6e8bb
PB
9942 break;
9943 case 13: /* Misc data processing. */
9944 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9945 if (op < 4 && (insn & 0xf000) != 0xf000)
9946 goto illegal_op;
9947 switch (op) {
9948 case 0: /* Register controlled shift. */
8984bd2e
PB
9949 tmp = load_reg(s, rn);
9950 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9951 if ((insn & 0x70) != 0)
9952 goto illegal_op;
9953 op = (insn >> 21) & 3;
8984bd2e
PB
9954 logic_cc = (insn & (1 << 20)) != 0;
9955 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9956 if (logic_cc)
9957 gen_logic_CC(tmp);
7dcc1f89 9958 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9959 break;
9960 case 1: /* Sign/zero extend. */
62b44f05
AR
9961 op = (insn >> 20) & 7;
9962 switch (op) {
9963 case 0: /* SXTAH, SXTH */
9964 case 1: /* UXTAH, UXTH */
9965 case 4: /* SXTAB, SXTB */
9966 case 5: /* UXTAB, UXTB */
9967 break;
9968 case 2: /* SXTAB16, SXTB16 */
9969 case 3: /* UXTAB16, UXTB16 */
9970 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9971 goto illegal_op;
9972 }
9973 break;
9974 default:
9975 goto illegal_op;
9976 }
9977 if (rn != 15) {
9978 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
9979 goto illegal_op;
9980 }
9981 }
5e3f878a 9982 tmp = load_reg(s, rm);
9ee6e8bb 9983 shift = (insn >> 4) & 3;
1301f322 9984 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9985 rotate, a shift is sufficient. */
9986 if (shift != 0)
f669df27 9987 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9988 op = (insn >> 20) & 7;
9989 switch (op) {
5e3f878a
PB
9990 case 0: gen_sxth(tmp); break;
9991 case 1: gen_uxth(tmp); break;
9992 case 2: gen_sxtb16(tmp); break;
9993 case 3: gen_uxtb16(tmp); break;
9994 case 4: gen_sxtb(tmp); break;
9995 case 5: gen_uxtb(tmp); break;
62b44f05
AR
9996 default:
9997 g_assert_not_reached();
9ee6e8bb
PB
9998 }
9999 if (rn != 15) {
5e3f878a 10000 tmp2 = load_reg(s, rn);
9ee6e8bb 10001 if ((op >> 1) == 1) {
5e3f878a 10002 gen_add16(tmp, tmp2);
9ee6e8bb 10003 } else {
5e3f878a 10004 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10005 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10006 }
10007 }
5e3f878a 10008 store_reg(s, rd, tmp);
9ee6e8bb
PB
10009 break;
10010 case 2: /* SIMD add/subtract. */
62b44f05
AR
10011 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10012 goto illegal_op;
10013 }
9ee6e8bb
PB
10014 op = (insn >> 20) & 7;
10015 shift = (insn >> 4) & 7;
10016 if ((op & 3) == 3 || (shift & 3) == 3)
10017 goto illegal_op;
6ddbc6e4
PB
10018 tmp = load_reg(s, rn);
10019 tmp2 = load_reg(s, rm);
10020 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 10021 tcg_temp_free_i32(tmp2);
6ddbc6e4 10022 store_reg(s, rd, tmp);
9ee6e8bb
PB
10023 break;
10024 case 3: /* Other data processing. */
10025 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
10026 if (op < 4) {
10027 /* Saturating add/subtract. */
62b44f05
AR
10028 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10029 goto illegal_op;
10030 }
d9ba4830
PB
10031 tmp = load_reg(s, rn);
10032 tmp2 = load_reg(s, rm);
9ee6e8bb 10033 if (op & 1)
9ef39277 10034 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 10035 if (op & 2)
9ef39277 10036 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 10037 else
9ef39277 10038 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 10039 tcg_temp_free_i32(tmp2);
9ee6e8bb 10040 } else {
62b44f05
AR
10041 switch (op) {
10042 case 0x0a: /* rbit */
10043 case 0x08: /* rev */
10044 case 0x09: /* rev16 */
10045 case 0x0b: /* revsh */
10046 case 0x18: /* clz */
10047 break;
10048 case 0x10: /* sel */
10049 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10050 goto illegal_op;
10051 }
10052 break;
10053 case 0x20: /* crc32/crc32c */
10054 case 0x21:
10055 case 0x22:
10056 case 0x28:
10057 case 0x29:
10058 case 0x2a:
10059 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
10060 goto illegal_op;
10061 }
10062 break;
10063 default:
10064 goto illegal_op;
10065 }
d9ba4830 10066 tmp = load_reg(s, rn);
9ee6e8bb
PB
10067 switch (op) {
10068 case 0x0a: /* rbit */
d9ba4830 10069 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
10070 break;
10071 case 0x08: /* rev */
66896cb8 10072 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
10073 break;
10074 case 0x09: /* rev16 */
d9ba4830 10075 gen_rev16(tmp);
9ee6e8bb
PB
10076 break;
10077 case 0x0b: /* revsh */
d9ba4830 10078 gen_revsh(tmp);
9ee6e8bb
PB
10079 break;
10080 case 0x10: /* sel */
d9ba4830 10081 tmp2 = load_reg(s, rm);
7d1b0095 10082 tmp3 = tcg_temp_new_i32();
0ecb72a5 10083 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 10084 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
10085 tcg_temp_free_i32(tmp3);
10086 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10087 break;
10088 case 0x18: /* clz */
7539a012 10089 tcg_gen_clzi_i32(tmp, tmp, 32);
9ee6e8bb 10090 break;
eb0ecd5a
WN
10091 case 0x20:
10092 case 0x21:
10093 case 0x22:
10094 case 0x28:
10095 case 0x29:
10096 case 0x2a:
10097 {
10098 /* crc32/crc32c */
10099 uint32_t sz = op & 0x3;
10100 uint32_t c = op & 0x8;
10101
eb0ecd5a 10102 tmp2 = load_reg(s, rm);
aa633469
PM
10103 if (sz == 0) {
10104 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
10105 } else if (sz == 1) {
10106 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
10107 }
eb0ecd5a
WN
10108 tmp3 = tcg_const_i32(1 << sz);
10109 if (c) {
10110 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
10111 } else {
10112 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
10113 }
10114 tcg_temp_free_i32(tmp2);
10115 tcg_temp_free_i32(tmp3);
10116 break;
10117 }
9ee6e8bb 10118 default:
62b44f05 10119 g_assert_not_reached();
9ee6e8bb
PB
10120 }
10121 }
d9ba4830 10122 store_reg(s, rd, tmp);
9ee6e8bb
PB
10123 break;
10124 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
62b44f05
AR
10125 switch ((insn >> 20) & 7) {
10126 case 0: /* 32 x 32 -> 32 */
10127 case 7: /* Unsigned sum of absolute differences. */
10128 break;
10129 case 1: /* 16 x 16 -> 32 */
10130 case 2: /* Dual multiply add. */
10131 case 3: /* 32 * 16 -> 32msb */
10132 case 4: /* Dual multiply subtract. */
10133 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10134 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10135 goto illegal_op;
10136 }
10137 break;
10138 }
9ee6e8bb 10139 op = (insn >> 4) & 0xf;
d9ba4830
PB
10140 tmp = load_reg(s, rn);
10141 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10142 switch ((insn >> 20) & 7) {
10143 case 0: /* 32 x 32 -> 32 */
d9ba4830 10144 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 10145 tcg_temp_free_i32(tmp2);
9ee6e8bb 10146 if (rs != 15) {
d9ba4830 10147 tmp2 = load_reg(s, rs);
9ee6e8bb 10148 if (op)
d9ba4830 10149 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 10150 else
d9ba4830 10151 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10152 tcg_temp_free_i32(tmp2);
9ee6e8bb 10153 }
9ee6e8bb
PB
10154 break;
10155 case 1: /* 16 x 16 -> 32 */
d9ba4830 10156 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10157 tcg_temp_free_i32(tmp2);
9ee6e8bb 10158 if (rs != 15) {
d9ba4830 10159 tmp2 = load_reg(s, rs);
9ef39277 10160 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10161 tcg_temp_free_i32(tmp2);
9ee6e8bb 10162 }
9ee6e8bb
PB
10163 break;
10164 case 2: /* Dual multiply add. */
10165 case 4: /* Dual multiply subtract. */
10166 if (op)
d9ba4830
PB
10167 gen_swap_half(tmp2);
10168 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10169 if (insn & (1 << 22)) {
e1d177b9 10170 /* This subtraction cannot overflow. */
d9ba4830 10171 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10172 } else {
e1d177b9
PM
10173 /* This addition cannot overflow 32 bits;
10174 * however it may overflow considered as a signed
10175 * operation, in which case we must set the Q flag.
10176 */
9ef39277 10177 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10178 }
7d1b0095 10179 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10180 if (rs != 15)
10181 {
d9ba4830 10182 tmp2 = load_reg(s, rs);
9ef39277 10183 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10184 tcg_temp_free_i32(tmp2);
9ee6e8bb 10185 }
9ee6e8bb
PB
10186 break;
10187 case 3: /* 32 * 16 -> 32msb */
10188 if (op)
d9ba4830 10189 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 10190 else
d9ba4830 10191 gen_sxth(tmp2);
a7812ae4
PB
10192 tmp64 = gen_muls_i64_i32(tmp, tmp2);
10193 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 10194 tmp = tcg_temp_new_i32();
ecc7b3aa 10195 tcg_gen_extrl_i64_i32(tmp, tmp64);
b75263d6 10196 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10197 if (rs != 15)
10198 {
d9ba4830 10199 tmp2 = load_reg(s, rs);
9ef39277 10200 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 10201 tcg_temp_free_i32(tmp2);
9ee6e8bb 10202 }
9ee6e8bb 10203 break;
838fa72d
AJ
10204 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
10205 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10206 if (rs != 15) {
838fa72d
AJ
10207 tmp = load_reg(s, rs);
10208 if (insn & (1 << 20)) {
10209 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 10210 } else {
838fa72d 10211 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 10212 }
2c0262af 10213 }
838fa72d
AJ
10214 if (insn & (1 << 4)) {
10215 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
10216 }
10217 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 10218 tmp = tcg_temp_new_i32();
ecc7b3aa 10219 tcg_gen_extrl_i64_i32(tmp, tmp64);
838fa72d 10220 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
10221 break;
10222 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 10223 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 10224 tcg_temp_free_i32(tmp2);
9ee6e8bb 10225 if (rs != 15) {
d9ba4830
PB
10226 tmp2 = load_reg(s, rs);
10227 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10228 tcg_temp_free_i32(tmp2);
5fd46862 10229 }
9ee6e8bb 10230 break;
2c0262af 10231 }
d9ba4830 10232 store_reg(s, rd, tmp);
2c0262af 10233 break;
9ee6e8bb
PB
10234 case 6: case 7: /* 64-bit multiply, Divide. */
10235 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
10236 tmp = load_reg(s, rn);
10237 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
10238 if ((op & 0x50) == 0x10) {
10239 /* sdiv, udiv */
d614a513 10240 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 10241 goto illegal_op;
47789990 10242 }
9ee6e8bb 10243 if (op & 0x20)
5e3f878a 10244 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 10245 else
5e3f878a 10246 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 10247 tcg_temp_free_i32(tmp2);
5e3f878a 10248 store_reg(s, rd, tmp);
9ee6e8bb
PB
10249 } else if ((op & 0xe) == 0xc) {
10250 /* Dual multiply accumulate long. */
62b44f05
AR
10251 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10252 tcg_temp_free_i32(tmp);
10253 tcg_temp_free_i32(tmp2);
10254 goto illegal_op;
10255 }
9ee6e8bb 10256 if (op & 1)
5e3f878a
PB
10257 gen_swap_half(tmp2);
10258 gen_smul_dual(tmp, tmp2);
9ee6e8bb 10259 if (op & 0x10) {
5e3f878a 10260 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 10261 } else {
5e3f878a 10262 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 10263 }
7d1b0095 10264 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10265 /* BUGFIX */
10266 tmp64 = tcg_temp_new_i64();
10267 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10268 tcg_temp_free_i32(tmp);
a7812ae4
PB
10269 gen_addq(s, tmp64, rs, rd);
10270 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10271 tcg_temp_free_i64(tmp64);
2c0262af 10272 } else {
9ee6e8bb
PB
10273 if (op & 0x20) {
10274 /* Unsigned 64-bit multiply */
a7812ae4 10275 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 10276 } else {
9ee6e8bb
PB
10277 if (op & 8) {
10278 /* smlalxy */
62b44f05
AR
10279 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10280 tcg_temp_free_i32(tmp2);
10281 tcg_temp_free_i32(tmp);
10282 goto illegal_op;
10283 }
5e3f878a 10284 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 10285 tcg_temp_free_i32(tmp2);
a7812ae4
PB
10286 tmp64 = tcg_temp_new_i64();
10287 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 10288 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10289 } else {
10290 /* Signed 64-bit multiply */
a7812ae4 10291 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 10292 }
b5ff1b31 10293 }
9ee6e8bb
PB
10294 if (op & 4) {
10295 /* umaal */
62b44f05
AR
10296 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10297 tcg_temp_free_i64(tmp64);
10298 goto illegal_op;
10299 }
a7812ae4
PB
10300 gen_addq_lo(s, tmp64, rs);
10301 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
10302 } else if (op & 0x40) {
10303 /* 64-bit accumulate. */
a7812ae4 10304 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 10305 }
a7812ae4 10306 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 10307 tcg_temp_free_i64(tmp64);
5fd46862 10308 }
2c0262af 10309 break;
9ee6e8bb
PB
10310 }
10311 break;
10312 case 6: case 7: case 14: case 15:
10313 /* Coprocessor. */
7517748e
PM
10314 if (arm_dc_feature(s, ARM_FEATURE_M)) {
10315 /* We don't currently implement M profile FP support,
10316 * so this entire space should give a NOCP fault.
10317 */
10318 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
10319 default_exception_el(s));
10320 break;
10321 }
9ee6e8bb
PB
10322 if (((insn >> 24) & 3) == 3) {
10323 /* Translate into the equivalent ARM encoding. */
f06053e3 10324 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 10325 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 10326 goto illegal_op;
7dcc1f89 10327 }
6a57f3eb 10328 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 10329 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
10330 goto illegal_op;
10331 }
9ee6e8bb
PB
10332 } else {
10333 if (insn & (1 << 28))
10334 goto illegal_op;
7dcc1f89 10335 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 10336 goto illegal_op;
7dcc1f89 10337 }
9ee6e8bb
PB
10338 }
10339 break;
10340 case 8: case 9: case 10: case 11:
10341 if (insn & (1 << 15)) {
10342 /* Branches, misc control. */
10343 if (insn & 0x5000) {
10344 /* Unconditional branch. */
10345 /* signextend(hw1[10:0]) -> offset[:12]. */
10346 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
10347 /* hw1[10:0] -> offset[11:1]. */
10348 offset |= (insn & 0x7ff) << 1;
10349 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
10350 offset[24:22] already have the same value because of the
10351 sign extension above. */
10352 offset ^= ((~insn) & (1 << 13)) << 10;
10353 offset ^= ((~insn) & (1 << 11)) << 11;
10354
9ee6e8bb
PB
10355 if (insn & (1 << 14)) {
10356 /* Branch and link. */
3174f8e9 10357 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 10358 }
3b46e624 10359
b0109805 10360 offset += s->pc;
9ee6e8bb
PB
10361 if (insn & (1 << 12)) {
10362 /* b/bl */
b0109805 10363 gen_jmp(s, offset);
9ee6e8bb
PB
10364 } else {
10365 /* blx */
b0109805 10366 offset &= ~(uint32_t)2;
be5e7a76 10367 /* thumb2 bx, no need to check */
b0109805 10368 gen_bx_im(s, offset);
2c0262af 10369 }
9ee6e8bb
PB
10370 } else if (((insn >> 23) & 7) == 7) {
10371 /* Misc control */
10372 if (insn & (1 << 13))
10373 goto illegal_op;
10374
10375 if (insn & (1 << 26)) {
37e6456e
PM
10376 if (!(insn & (1 << 20))) {
10377 /* Hypervisor call (v7) */
10378 int imm16 = extract32(insn, 16, 4) << 12
10379 | extract32(insn, 0, 12);
10380 ARCH(7);
10381 if (IS_USER(s)) {
10382 goto illegal_op;
10383 }
10384 gen_hvc(s, imm16);
10385 } else {
10386 /* Secure monitor call (v6+) */
10387 ARCH(6K);
10388 if (IS_USER(s)) {
10389 goto illegal_op;
10390 }
10391 gen_smc(s);
10392 }
2c0262af 10393 } else {
9ee6e8bb
PB
10394 op = (insn >> 20) & 7;
10395 switch (op) {
10396 case 0: /* msr cpsr. */
b53d8923 10397 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10398 tmp = load_reg(s, rn);
10399 addr = tcg_const_i32(insn & 0xff);
10400 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 10401 tcg_temp_free_i32(addr);
7d1b0095 10402 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10403 gen_lookup_tb(s);
10404 break;
10405 }
10406 /* fall through */
10407 case 1: /* msr spsr. */
b53d8923 10408 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10409 goto illegal_op;
b53d8923 10410 }
8bfd0550
PM
10411
10412 if (extract32(insn, 5, 1)) {
10413 /* MSR (banked) */
10414 int sysm = extract32(insn, 8, 4) |
10415 (extract32(insn, 4, 1) << 4);
10416 int r = op & 1;
10417
10418 gen_msr_banked(s, r, sysm, rm);
10419 break;
10420 }
10421
10422 /* MSR (for PSRs) */
2fbac54b
FN
10423 tmp = load_reg(s, rn);
10424 if (gen_set_psr(s,
7dcc1f89 10425 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 10426 op == 1, tmp))
9ee6e8bb
PB
10427 goto illegal_op;
10428 break;
10429 case 2: /* cps, nop-hint. */
10430 if (((insn >> 8) & 7) == 0) {
10431 gen_nop_hint(s, insn & 0xff);
10432 }
10433 /* Implemented as NOP in user mode. */
10434 if (IS_USER(s))
10435 break;
10436 offset = 0;
10437 imm = 0;
10438 if (insn & (1 << 10)) {
10439 if (insn & (1 << 7))
10440 offset |= CPSR_A;
10441 if (insn & (1 << 6))
10442 offset |= CPSR_I;
10443 if (insn & (1 << 5))
10444 offset |= CPSR_F;
10445 if (insn & (1 << 9))
10446 imm = CPSR_A | CPSR_I | CPSR_F;
10447 }
10448 if (insn & (1 << 8)) {
10449 offset |= 0x1f;
10450 imm |= (insn & 0x1f);
10451 }
10452 if (offset) {
2fbac54b 10453 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
10454 }
10455 break;
10456 case 3: /* Special control operations. */
426f5abc 10457 ARCH(7);
9ee6e8bb
PB
10458 op = (insn >> 4) & 0xf;
10459 switch (op) {
10460 case 2: /* clrex */
426f5abc 10461 gen_clrex(s);
9ee6e8bb
PB
10462 break;
10463 case 4: /* dsb */
10464 case 5: /* dmb */
61e4c432 10465 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
9ee6e8bb 10466 break;
6df99dec
SS
10467 case 6: /* isb */
10468 /* We need to break the TB after this insn
10469 * to execute self-modifying code correctly
10470 * and also to take any pending interrupts
10471 * immediately.
10472 */
10473 gen_lookup_tb(s);
10474 break;
9ee6e8bb
PB
10475 default:
10476 goto illegal_op;
10477 }
10478 break;
10479 case 4: /* bxj */
10480 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
10481 tmp = load_reg(s, rn);
10482 gen_bx(s, tmp);
9ee6e8bb
PB
10483 break;
10484 case 5: /* Exception return. */
b8b45b68
RV
10485 if (IS_USER(s)) {
10486 goto illegal_op;
10487 }
10488 if (rn != 14 || rd != 15) {
10489 goto illegal_op;
10490 }
10491 tmp = load_reg(s, rn);
10492 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
10493 gen_exception_return(s, tmp);
10494 break;
8bfd0550
PM
10495 case 6: /* MRS */
10496 if (extract32(insn, 5, 1)) {
10497 /* MRS (banked) */
10498 int sysm = extract32(insn, 16, 4) |
10499 (extract32(insn, 4, 1) << 4);
10500
10501 gen_mrs_banked(s, 0, sysm, rd);
10502 break;
10503 }
10504
10505 /* mrs cpsr */
7d1b0095 10506 tmp = tcg_temp_new_i32();
b53d8923 10507 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
10508 addr = tcg_const_i32(insn & 0xff);
10509 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 10510 tcg_temp_free_i32(addr);
9ee6e8bb 10511 } else {
9ef39277 10512 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 10513 }
8984bd2e 10514 store_reg(s, rd, tmp);
9ee6e8bb 10515 break;
8bfd0550
PM
10516 case 7: /* MRS */
10517 if (extract32(insn, 5, 1)) {
10518 /* MRS (banked) */
10519 int sysm = extract32(insn, 16, 4) |
10520 (extract32(insn, 4, 1) << 4);
10521
10522 gen_mrs_banked(s, 1, sysm, rd);
10523 break;
10524 }
10525
10526 /* mrs spsr. */
9ee6e8bb 10527 /* Not accessible in user mode. */
b53d8923 10528 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 10529 goto illegal_op;
b53d8923 10530 }
d9ba4830
PB
10531 tmp = load_cpu_field(spsr);
10532 store_reg(s, rd, tmp);
9ee6e8bb 10533 break;
2c0262af
FB
10534 }
10535 }
9ee6e8bb
PB
10536 } else {
10537 /* Conditional branch. */
10538 op = (insn >> 22) & 0xf;
10539 /* Generate a conditional jump to next instruction. */
10540 s->condlabel = gen_new_label();
39fb730a 10541 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
10542 s->condjmp = 1;
10543
10544 /* offset[11:1] = insn[10:0] */
10545 offset = (insn & 0x7ff) << 1;
10546 /* offset[17:12] = insn[21:16]. */
10547 offset |= (insn & 0x003f0000) >> 4;
10548 /* offset[31:20] = insn[26]. */
10549 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
10550 /* offset[18] = insn[13]. */
10551 offset |= (insn & (1 << 13)) << 5;
10552 /* offset[19] = insn[11]. */
10553 offset |= (insn & (1 << 11)) << 8;
10554
10555 /* jump to the offset */
b0109805 10556 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
10557 }
10558 } else {
10559 /* Data processing immediate. */
10560 if (insn & (1 << 25)) {
10561 if (insn & (1 << 24)) {
10562 if (insn & (1 << 20))
10563 goto illegal_op;
10564 /* Bitfield/Saturate. */
10565 op = (insn >> 21) & 7;
10566 imm = insn & 0x1f;
10567 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 10568 if (rn == 15) {
7d1b0095 10569 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
10570 tcg_gen_movi_i32(tmp, 0);
10571 } else {
10572 tmp = load_reg(s, rn);
10573 }
9ee6e8bb
PB
10574 switch (op) {
10575 case 2: /* Signed bitfield extract. */
10576 imm++;
10577 if (shift + imm > 32)
10578 goto illegal_op;
59a71b4c
RH
10579 if (imm < 32) {
10580 tcg_gen_sextract_i32(tmp, tmp, shift, imm);
10581 }
9ee6e8bb
PB
10582 break;
10583 case 6: /* Unsigned bitfield extract. */
10584 imm++;
10585 if (shift + imm > 32)
10586 goto illegal_op;
59a71b4c
RH
10587 if (imm < 32) {
10588 tcg_gen_extract_i32(tmp, tmp, shift, imm);
10589 }
9ee6e8bb
PB
10590 break;
10591 case 3: /* Bitfield insert/clear. */
10592 if (imm < shift)
10593 goto illegal_op;
10594 imm = imm + 1 - shift;
10595 if (imm != 32) {
6ddbc6e4 10596 tmp2 = load_reg(s, rd);
d593c48e 10597 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10598 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10599 }
10600 break;
10601 case 7:
10602 goto illegal_op;
10603 default: /* Saturate. */
9ee6e8bb
PB
10604 if (shift) {
10605 if (op & 1)
6ddbc6e4 10606 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10607 else
6ddbc6e4 10608 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10609 }
6ddbc6e4 10610 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10611 if (op & 4) {
10612 /* Unsigned. */
62b44f05
AR
10613 if ((op & 1) && shift == 0) {
10614 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10615 tcg_temp_free_i32(tmp);
10616 tcg_temp_free_i32(tmp2);
10617 goto illegal_op;
10618 }
9ef39277 10619 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10620 } else {
9ef39277 10621 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
62b44f05 10622 }
2c0262af 10623 } else {
9ee6e8bb 10624 /* Signed. */
62b44f05
AR
10625 if ((op & 1) && shift == 0) {
10626 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
10627 tcg_temp_free_i32(tmp);
10628 tcg_temp_free_i32(tmp2);
10629 goto illegal_op;
10630 }
9ef39277 10631 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
62b44f05 10632 } else {
9ef39277 10633 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
62b44f05 10634 }
2c0262af 10635 }
b75263d6 10636 tcg_temp_free_i32(tmp2);
9ee6e8bb 10637 break;
2c0262af 10638 }
6ddbc6e4 10639 store_reg(s, rd, tmp);
9ee6e8bb
PB
10640 } else {
10641 imm = ((insn & 0x04000000) >> 15)
10642 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10643 if (insn & (1 << 22)) {
10644 /* 16-bit immediate. */
10645 imm |= (insn >> 4) & 0xf000;
10646 if (insn & (1 << 23)) {
10647 /* movt */
5e3f878a 10648 tmp = load_reg(s, rd);
86831435 10649 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10650 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10651 } else {
9ee6e8bb 10652 /* movw */
7d1b0095 10653 tmp = tcg_temp_new_i32();
5e3f878a 10654 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10655 }
10656 } else {
9ee6e8bb
PB
10657 /* Add/sub 12-bit immediate. */
10658 if (rn == 15) {
b0109805 10659 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10660 if (insn & (1 << 23))
b0109805 10661 offset -= imm;
9ee6e8bb 10662 else
b0109805 10663 offset += imm;
7d1b0095 10664 tmp = tcg_temp_new_i32();
5e3f878a 10665 tcg_gen_movi_i32(tmp, offset);
2c0262af 10666 } else {
5e3f878a 10667 tmp = load_reg(s, rn);
9ee6e8bb 10668 if (insn & (1 << 23))
5e3f878a 10669 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10670 else
5e3f878a 10671 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10672 }
9ee6e8bb 10673 }
5e3f878a 10674 store_reg(s, rd, tmp);
191abaa2 10675 }
9ee6e8bb
PB
10676 } else {
10677 int shifter_out = 0;
10678 /* modified 12-bit immediate. */
10679 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10680 imm = (insn & 0xff);
10681 switch (shift) {
10682 case 0: /* XY */
10683 /* Nothing to do. */
10684 break;
10685 case 1: /* 00XY00XY */
10686 imm |= imm << 16;
10687 break;
10688 case 2: /* XY00XY00 */
10689 imm |= imm << 16;
10690 imm <<= 8;
10691 break;
10692 case 3: /* XYXYXYXY */
10693 imm |= imm << 16;
10694 imm |= imm << 8;
10695 break;
10696 default: /* Rotated constant. */
10697 shift = (shift << 1) | (imm >> 7);
10698 imm |= 0x80;
10699 imm = imm << (32 - shift);
10700 shifter_out = 1;
10701 break;
b5ff1b31 10702 }
7d1b0095 10703 tmp2 = tcg_temp_new_i32();
3174f8e9 10704 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10705 rn = (insn >> 16) & 0xf;
3174f8e9 10706 if (rn == 15) {
7d1b0095 10707 tmp = tcg_temp_new_i32();
3174f8e9
FN
10708 tcg_gen_movi_i32(tmp, 0);
10709 } else {
10710 tmp = load_reg(s, rn);
10711 }
9ee6e8bb
PB
10712 op = (insn >> 21) & 0xf;
10713 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10714 shifter_out, tmp, tmp2))
9ee6e8bb 10715 goto illegal_op;
7d1b0095 10716 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10717 rd = (insn >> 8) & 0xf;
10718 if (rd != 15) {
3174f8e9
FN
10719 store_reg(s, rd, tmp);
10720 } else {
7d1b0095 10721 tcg_temp_free_i32(tmp);
2c0262af 10722 }
2c0262af 10723 }
9ee6e8bb
PB
10724 }
10725 break;
10726 case 12: /* Load/store single data item. */
10727 {
10728 int postinc = 0;
10729 int writeback = 0;
a99caa48 10730 int memidx;
9bb6558a
PM
10731 ISSInfo issinfo;
10732
9ee6e8bb 10733 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10734 if (disas_neon_ls_insn(s, insn)) {
c1713132 10735 goto illegal_op;
7dcc1f89 10736 }
9ee6e8bb
PB
10737 break;
10738 }
a2fdc890
PM
10739 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10740 if (rs == 15) {
10741 if (!(insn & (1 << 20))) {
10742 goto illegal_op;
10743 }
10744 if (op != 2) {
10745 /* Byte or halfword load space with dest == r15 : memory hints.
10746 * Catch them early so we don't emit pointless addressing code.
10747 * This space is a mix of:
10748 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10749 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10750 * cores)
10751 * unallocated hints, which must be treated as NOPs
10752 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10753 * which is easiest for the decoding logic
10754 * Some space which must UNDEF
10755 */
10756 int op1 = (insn >> 23) & 3;
10757 int op2 = (insn >> 6) & 0x3f;
10758 if (op & 2) {
10759 goto illegal_op;
10760 }
10761 if (rn == 15) {
02afbf64
PM
10762 /* UNPREDICTABLE, unallocated hint or
10763 * PLD/PLDW/PLI (literal)
10764 */
a2fdc890
PM
10765 return 0;
10766 }
10767 if (op1 & 1) {
02afbf64 10768 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10769 }
10770 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10771 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10772 }
10773 /* UNDEF space, or an UNPREDICTABLE */
10774 return 1;
10775 }
10776 }
a99caa48 10777 memidx = get_mem_index(s);
9ee6e8bb 10778 if (rn == 15) {
7d1b0095 10779 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10780 /* PC relative. */
10781 /* s->pc has already been incremented by 4. */
10782 imm = s->pc & 0xfffffffc;
10783 if (insn & (1 << 23))
10784 imm += insn & 0xfff;
10785 else
10786 imm -= insn & 0xfff;
b0109805 10787 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10788 } else {
b0109805 10789 addr = load_reg(s, rn);
9ee6e8bb
PB
10790 if (insn & (1 << 23)) {
10791 /* Positive offset. */
10792 imm = insn & 0xfff;
b0109805 10793 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10794 } else {
9ee6e8bb 10795 imm = insn & 0xff;
2a0308c5
PM
10796 switch ((insn >> 8) & 0xf) {
10797 case 0x0: /* Shifted Register. */
9ee6e8bb 10798 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10799 if (shift > 3) {
10800 tcg_temp_free_i32(addr);
18c9b560 10801 goto illegal_op;
2a0308c5 10802 }
b26eefb6 10803 tmp = load_reg(s, rm);
9ee6e8bb 10804 if (shift)
b26eefb6 10805 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10806 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10807 tcg_temp_free_i32(tmp);
9ee6e8bb 10808 break;
2a0308c5 10809 case 0xc: /* Negative offset. */
b0109805 10810 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10811 break;
2a0308c5 10812 case 0xe: /* User privilege. */
b0109805 10813 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10814 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10815 break;
2a0308c5 10816 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10817 imm = -imm;
10818 /* Fall through. */
2a0308c5 10819 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10820 postinc = 1;
10821 writeback = 1;
10822 break;
2a0308c5 10823 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10824 imm = -imm;
10825 /* Fall through. */
2a0308c5 10826 case 0xf: /* Pre-increment. */
b0109805 10827 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10828 writeback = 1;
10829 break;
10830 default:
2a0308c5 10831 tcg_temp_free_i32(addr);
b7bcbe95 10832 goto illegal_op;
9ee6e8bb
PB
10833 }
10834 }
10835 }
9bb6558a
PM
10836
10837 issinfo = writeback ? ISSInvalid : rs;
10838
9ee6e8bb
PB
10839 if (insn & (1 << 20)) {
10840 /* Load. */
5a839c0d 10841 tmp = tcg_temp_new_i32();
a2fdc890 10842 switch (op) {
5a839c0d 10843 case 0:
9bb6558a 10844 gen_aa32_ld8u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10845 break;
10846 case 4:
9bb6558a 10847 gen_aa32_ld8s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10848 break;
10849 case 1:
9bb6558a 10850 gen_aa32_ld16u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10851 break;
10852 case 5:
9bb6558a 10853 gen_aa32_ld16s_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10854 break;
10855 case 2:
9bb6558a 10856 gen_aa32_ld32u_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10857 break;
2a0308c5 10858 default:
5a839c0d 10859 tcg_temp_free_i32(tmp);
2a0308c5
PM
10860 tcg_temp_free_i32(addr);
10861 goto illegal_op;
a2fdc890
PM
10862 }
10863 if (rs == 15) {
10864 gen_bx(s, tmp);
9ee6e8bb 10865 } else {
a2fdc890 10866 store_reg(s, rs, tmp);
9ee6e8bb
PB
10867 }
10868 } else {
10869 /* Store. */
b0109805 10870 tmp = load_reg(s, rs);
9ee6e8bb 10871 switch (op) {
5a839c0d 10872 case 0:
9bb6558a 10873 gen_aa32_st8_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10874 break;
10875 case 1:
9bb6558a 10876 gen_aa32_st16_iss(s, tmp, addr, memidx, issinfo);
5a839c0d
PM
10877 break;
10878 case 2:
9bb6558a 10879 gen_aa32_st32_iss(s, tmp, addr, memidx, issinfo);
5a839c0d 10880 break;
2a0308c5 10881 default:
5a839c0d 10882 tcg_temp_free_i32(tmp);
2a0308c5
PM
10883 tcg_temp_free_i32(addr);
10884 goto illegal_op;
b7bcbe95 10885 }
5a839c0d 10886 tcg_temp_free_i32(tmp);
2c0262af 10887 }
9ee6e8bb 10888 if (postinc)
b0109805
PB
10889 tcg_gen_addi_i32(addr, addr, imm);
10890 if (writeback) {
10891 store_reg(s, rn, addr);
10892 } else {
7d1b0095 10893 tcg_temp_free_i32(addr);
b0109805 10894 }
9ee6e8bb
PB
10895 }
10896 break;
10897 default:
10898 goto illegal_op;
2c0262af 10899 }
9ee6e8bb
PB
10900 return 0;
10901illegal_op:
10902 return 1;
2c0262af
FB
10903}
10904
0ecb72a5 10905static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10906{
10907 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10908 int32_t offset;
10909 int i;
39d5492a
PM
10910 TCGv_i32 tmp;
10911 TCGv_i32 tmp2;
10912 TCGv_i32 addr;
99c475ab 10913
9ee6e8bb
PB
10914 if (s->condexec_mask) {
10915 cond = s->condexec_cond;
bedd2912
JB
10916 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10917 s->condlabel = gen_new_label();
39fb730a 10918 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10919 s->condjmp = 1;
10920 }
9ee6e8bb
PB
10921 }
10922
f9fd40eb 10923 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
99c475ab 10924 s->pc += 2;
b5ff1b31 10925
99c475ab
FB
10926 switch (insn >> 12) {
10927 case 0: case 1:
396e467c 10928
99c475ab
FB
10929 rd = insn & 7;
10930 op = (insn >> 11) & 3;
10931 if (op == 3) {
10932 /* add/subtract */
10933 rn = (insn >> 3) & 7;
396e467c 10934 tmp = load_reg(s, rn);
99c475ab
FB
10935 if (insn & (1 << 10)) {
10936 /* immediate */
7d1b0095 10937 tmp2 = tcg_temp_new_i32();
396e467c 10938 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10939 } else {
10940 /* reg */
10941 rm = (insn >> 6) & 7;
396e467c 10942 tmp2 = load_reg(s, rm);
99c475ab 10943 }
9ee6e8bb
PB
10944 if (insn & (1 << 9)) {
10945 if (s->condexec_mask)
396e467c 10946 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10947 else
72485ec4 10948 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10949 } else {
10950 if (s->condexec_mask)
396e467c 10951 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10952 else
72485ec4 10953 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10954 }
7d1b0095 10955 tcg_temp_free_i32(tmp2);
396e467c 10956 store_reg(s, rd, tmp);
99c475ab
FB
10957 } else {
10958 /* shift immediate */
10959 rm = (insn >> 3) & 7;
10960 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10961 tmp = load_reg(s, rm);
10962 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10963 if (!s->condexec_mask)
10964 gen_logic_CC(tmp);
10965 store_reg(s, rd, tmp);
99c475ab
FB
10966 }
10967 break;
10968 case 2: case 3:
10969 /* arithmetic large immediate */
10970 op = (insn >> 11) & 3;
10971 rd = (insn >> 8) & 0x7;
396e467c 10972 if (op == 0) { /* mov */
7d1b0095 10973 tmp = tcg_temp_new_i32();
396e467c 10974 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10975 if (!s->condexec_mask)
396e467c
FN
10976 gen_logic_CC(tmp);
10977 store_reg(s, rd, tmp);
10978 } else {
10979 tmp = load_reg(s, rd);
7d1b0095 10980 tmp2 = tcg_temp_new_i32();
396e467c
FN
10981 tcg_gen_movi_i32(tmp2, insn & 0xff);
10982 switch (op) {
10983 case 1: /* cmp */
72485ec4 10984 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10985 tcg_temp_free_i32(tmp);
10986 tcg_temp_free_i32(tmp2);
396e467c
FN
10987 break;
10988 case 2: /* add */
10989 if (s->condexec_mask)
10990 tcg_gen_add_i32(tmp, tmp, tmp2);
10991 else
72485ec4 10992 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10993 tcg_temp_free_i32(tmp2);
396e467c
FN
10994 store_reg(s, rd, tmp);
10995 break;
10996 case 3: /* sub */
10997 if (s->condexec_mask)
10998 tcg_gen_sub_i32(tmp, tmp, tmp2);
10999 else
72485ec4 11000 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 11001 tcg_temp_free_i32(tmp2);
396e467c
FN
11002 store_reg(s, rd, tmp);
11003 break;
11004 }
99c475ab 11005 }
99c475ab
FB
11006 break;
11007 case 4:
11008 if (insn & (1 << 11)) {
11009 rd = (insn >> 8) & 7;
5899f386
FB
11010 /* load pc-relative. Bit 1 of PC is ignored. */
11011 val = s->pc + 2 + ((insn & 0xff) * 4);
11012 val &= ~(uint32_t)2;
7d1b0095 11013 addr = tcg_temp_new_i32();
b0109805 11014 tcg_gen_movi_i32(addr, val);
c40c8556 11015 tmp = tcg_temp_new_i32();
9bb6558a
PM
11016 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s),
11017 rd | ISSIs16Bit);
7d1b0095 11018 tcg_temp_free_i32(addr);
b0109805 11019 store_reg(s, rd, tmp);
99c475ab
FB
11020 break;
11021 }
11022 if (insn & (1 << 10)) {
11023 /* data processing extended or blx */
11024 rd = (insn & 7) | ((insn >> 4) & 8);
11025 rm = (insn >> 3) & 0xf;
11026 op = (insn >> 8) & 3;
11027 switch (op) {
11028 case 0: /* add */
396e467c
FN
11029 tmp = load_reg(s, rd);
11030 tmp2 = load_reg(s, rm);
11031 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 11032 tcg_temp_free_i32(tmp2);
396e467c 11033 store_reg(s, rd, tmp);
99c475ab
FB
11034 break;
11035 case 1: /* cmp */
396e467c
FN
11036 tmp = load_reg(s, rd);
11037 tmp2 = load_reg(s, rm);
72485ec4 11038 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
11039 tcg_temp_free_i32(tmp2);
11040 tcg_temp_free_i32(tmp);
99c475ab
FB
11041 break;
11042 case 2: /* mov/cpy */
396e467c
FN
11043 tmp = load_reg(s, rm);
11044 store_reg(s, rd, tmp);
99c475ab
FB
11045 break;
11046 case 3:/* branch [and link] exchange thumb register */
b0109805 11047 tmp = load_reg(s, rm);
99c475ab 11048 if (insn & (1 << 7)) {
be5e7a76 11049 ARCH(5);
99c475ab 11050 val = (uint32_t)s->pc | 1;
7d1b0095 11051 tmp2 = tcg_temp_new_i32();
b0109805
PB
11052 tcg_gen_movi_i32(tmp2, val);
11053 store_reg(s, 14, tmp2);
99c475ab 11054 }
be5e7a76 11055 /* already thumb, no need to check */
d9ba4830 11056 gen_bx(s, tmp);
99c475ab
FB
11057 break;
11058 }
11059 break;
11060 }
11061
11062 /* data processing register */
11063 rd = insn & 7;
11064 rm = (insn >> 3) & 7;
11065 op = (insn >> 6) & 0xf;
11066 if (op == 2 || op == 3 || op == 4 || op == 7) {
11067 /* the shift/rotate ops want the operands backwards */
11068 val = rm;
11069 rm = rd;
11070 rd = val;
11071 val = 1;
11072 } else {
11073 val = 0;
11074 }
11075
396e467c 11076 if (op == 9) { /* neg */
7d1b0095 11077 tmp = tcg_temp_new_i32();
396e467c
FN
11078 tcg_gen_movi_i32(tmp, 0);
11079 } else if (op != 0xf) { /* mvn doesn't read its first operand */
11080 tmp = load_reg(s, rd);
11081 } else {
39d5492a 11082 TCGV_UNUSED_I32(tmp);
396e467c 11083 }
99c475ab 11084
396e467c 11085 tmp2 = load_reg(s, rm);
5899f386 11086 switch (op) {
99c475ab 11087 case 0x0: /* and */
396e467c 11088 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 11089 if (!s->condexec_mask)
396e467c 11090 gen_logic_CC(tmp);
99c475ab
FB
11091 break;
11092 case 0x1: /* eor */
396e467c 11093 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 11094 if (!s->condexec_mask)
396e467c 11095 gen_logic_CC(tmp);
99c475ab
FB
11096 break;
11097 case 0x2: /* lsl */
9ee6e8bb 11098 if (s->condexec_mask) {
365af80e 11099 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 11100 } else {
9ef39277 11101 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11102 gen_logic_CC(tmp2);
9ee6e8bb 11103 }
99c475ab
FB
11104 break;
11105 case 0x3: /* lsr */
9ee6e8bb 11106 if (s->condexec_mask) {
365af80e 11107 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 11108 } else {
9ef39277 11109 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11110 gen_logic_CC(tmp2);
9ee6e8bb 11111 }
99c475ab
FB
11112 break;
11113 case 0x4: /* asr */
9ee6e8bb 11114 if (s->condexec_mask) {
365af80e 11115 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 11116 } else {
9ef39277 11117 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11118 gen_logic_CC(tmp2);
9ee6e8bb 11119 }
99c475ab
FB
11120 break;
11121 case 0x5: /* adc */
49b4c31e 11122 if (s->condexec_mask) {
396e467c 11123 gen_adc(tmp, tmp2);
49b4c31e
RH
11124 } else {
11125 gen_adc_CC(tmp, tmp, tmp2);
11126 }
99c475ab
FB
11127 break;
11128 case 0x6: /* sbc */
2de68a49 11129 if (s->condexec_mask) {
396e467c 11130 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
11131 } else {
11132 gen_sbc_CC(tmp, tmp, tmp2);
11133 }
99c475ab
FB
11134 break;
11135 case 0x7: /* ror */
9ee6e8bb 11136 if (s->condexec_mask) {
f669df27
AJ
11137 tcg_gen_andi_i32(tmp, tmp, 0x1f);
11138 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 11139 } else {
9ef39277 11140 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 11141 gen_logic_CC(tmp2);
9ee6e8bb 11142 }
99c475ab
FB
11143 break;
11144 case 0x8: /* tst */
396e467c
FN
11145 tcg_gen_and_i32(tmp, tmp, tmp2);
11146 gen_logic_CC(tmp);
99c475ab 11147 rd = 16;
5899f386 11148 break;
99c475ab 11149 case 0x9: /* neg */
9ee6e8bb 11150 if (s->condexec_mask)
396e467c 11151 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 11152 else
72485ec4 11153 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11154 break;
11155 case 0xa: /* cmp */
72485ec4 11156 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
11157 rd = 16;
11158 break;
11159 case 0xb: /* cmn */
72485ec4 11160 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
11161 rd = 16;
11162 break;
11163 case 0xc: /* orr */
396e467c 11164 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 11165 if (!s->condexec_mask)
396e467c 11166 gen_logic_CC(tmp);
99c475ab
FB
11167 break;
11168 case 0xd: /* mul */
7b2919a0 11169 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 11170 if (!s->condexec_mask)
396e467c 11171 gen_logic_CC(tmp);
99c475ab
FB
11172 break;
11173 case 0xe: /* bic */
f669df27 11174 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 11175 if (!s->condexec_mask)
396e467c 11176 gen_logic_CC(tmp);
99c475ab
FB
11177 break;
11178 case 0xf: /* mvn */
396e467c 11179 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 11180 if (!s->condexec_mask)
396e467c 11181 gen_logic_CC(tmp2);
99c475ab 11182 val = 1;
5899f386 11183 rm = rd;
99c475ab
FB
11184 break;
11185 }
11186 if (rd != 16) {
396e467c
FN
11187 if (val) {
11188 store_reg(s, rm, tmp2);
11189 if (op != 0xf)
7d1b0095 11190 tcg_temp_free_i32(tmp);
396e467c
FN
11191 } else {
11192 store_reg(s, rd, tmp);
7d1b0095 11193 tcg_temp_free_i32(tmp2);
396e467c
FN
11194 }
11195 } else {
7d1b0095
PM
11196 tcg_temp_free_i32(tmp);
11197 tcg_temp_free_i32(tmp2);
99c475ab
FB
11198 }
11199 break;
11200
11201 case 5:
11202 /* load/store register offset. */
11203 rd = insn & 7;
11204 rn = (insn >> 3) & 7;
11205 rm = (insn >> 6) & 7;
11206 op = (insn >> 9) & 7;
b0109805 11207 addr = load_reg(s, rn);
b26eefb6 11208 tmp = load_reg(s, rm);
b0109805 11209 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 11210 tcg_temp_free_i32(tmp);
99c475ab 11211
c40c8556 11212 if (op < 3) { /* store */
b0109805 11213 tmp = load_reg(s, rd);
c40c8556
PM
11214 } else {
11215 tmp = tcg_temp_new_i32();
11216 }
99c475ab
FB
11217
11218 switch (op) {
11219 case 0: /* str */
9bb6558a 11220 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11221 break;
11222 case 1: /* strh */
9bb6558a 11223 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11224 break;
11225 case 2: /* strb */
9bb6558a 11226 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11227 break;
11228 case 3: /* ldrsb */
9bb6558a 11229 gen_aa32_ld8s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11230 break;
11231 case 4: /* ldr */
9bb6558a 11232 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11233 break;
11234 case 5: /* ldrh */
9bb6558a 11235 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11236 break;
11237 case 6: /* ldrb */
9bb6558a 11238 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11239 break;
11240 case 7: /* ldrsh */
9bb6558a 11241 gen_aa32_ld16s_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
99c475ab
FB
11242 break;
11243 }
c40c8556 11244 if (op >= 3) { /* load */
b0109805 11245 store_reg(s, rd, tmp);
c40c8556
PM
11246 } else {
11247 tcg_temp_free_i32(tmp);
11248 }
7d1b0095 11249 tcg_temp_free_i32(addr);
99c475ab
FB
11250 break;
11251
11252 case 6:
11253 /* load/store word immediate offset */
11254 rd = insn & 7;
11255 rn = (insn >> 3) & 7;
b0109805 11256 addr = load_reg(s, rn);
99c475ab 11257 val = (insn >> 4) & 0x7c;
b0109805 11258 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11259
11260 if (insn & (1 << 11)) {
11261 /* load */
c40c8556 11262 tmp = tcg_temp_new_i32();
12dcc321 11263 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11264 store_reg(s, rd, tmp);
99c475ab
FB
11265 } else {
11266 /* store */
b0109805 11267 tmp = load_reg(s, rd);
12dcc321 11268 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11269 tcg_temp_free_i32(tmp);
99c475ab 11270 }
7d1b0095 11271 tcg_temp_free_i32(addr);
99c475ab
FB
11272 break;
11273
11274 case 7:
11275 /* load/store byte immediate offset */
11276 rd = insn & 7;
11277 rn = (insn >> 3) & 7;
b0109805 11278 addr = load_reg(s, rn);
99c475ab 11279 val = (insn >> 6) & 0x1f;
b0109805 11280 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11281
11282 if (insn & (1 << 11)) {
11283 /* load */
c40c8556 11284 tmp = tcg_temp_new_i32();
9bb6558a 11285 gen_aa32_ld8u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11286 store_reg(s, rd, tmp);
99c475ab
FB
11287 } else {
11288 /* store */
b0109805 11289 tmp = load_reg(s, rd);
9bb6558a 11290 gen_aa32_st8_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11291 tcg_temp_free_i32(tmp);
99c475ab 11292 }
7d1b0095 11293 tcg_temp_free_i32(addr);
99c475ab
FB
11294 break;
11295
11296 case 8:
11297 /* load/store halfword immediate offset */
11298 rd = insn & 7;
11299 rn = (insn >> 3) & 7;
b0109805 11300 addr = load_reg(s, rn);
99c475ab 11301 val = (insn >> 5) & 0x3e;
b0109805 11302 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11303
11304 if (insn & (1 << 11)) {
11305 /* load */
c40c8556 11306 tmp = tcg_temp_new_i32();
9bb6558a 11307 gen_aa32_ld16u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11308 store_reg(s, rd, tmp);
99c475ab
FB
11309 } else {
11310 /* store */
b0109805 11311 tmp = load_reg(s, rd);
9bb6558a 11312 gen_aa32_st16_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11313 tcg_temp_free_i32(tmp);
99c475ab 11314 }
7d1b0095 11315 tcg_temp_free_i32(addr);
99c475ab
FB
11316 break;
11317
11318 case 9:
11319 /* load/store from stack */
11320 rd = (insn >> 8) & 7;
b0109805 11321 addr = load_reg(s, 13);
99c475ab 11322 val = (insn & 0xff) * 4;
b0109805 11323 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
11324
11325 if (insn & (1 << 11)) {
11326 /* load */
c40c8556 11327 tmp = tcg_temp_new_i32();
9bb6558a 11328 gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
b0109805 11329 store_reg(s, rd, tmp);
99c475ab
FB
11330 } else {
11331 /* store */
b0109805 11332 tmp = load_reg(s, rd);
9bb6558a 11333 gen_aa32_st32_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit);
c40c8556 11334 tcg_temp_free_i32(tmp);
99c475ab 11335 }
7d1b0095 11336 tcg_temp_free_i32(addr);
99c475ab
FB
11337 break;
11338
11339 case 10:
11340 /* add to high reg */
11341 rd = (insn >> 8) & 7;
5899f386
FB
11342 if (insn & (1 << 11)) {
11343 /* SP */
5e3f878a 11344 tmp = load_reg(s, 13);
5899f386
FB
11345 } else {
11346 /* PC. bit 1 is ignored. */
7d1b0095 11347 tmp = tcg_temp_new_i32();
5e3f878a 11348 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 11349 }
99c475ab 11350 val = (insn & 0xff) * 4;
5e3f878a
PB
11351 tcg_gen_addi_i32(tmp, tmp, val);
11352 store_reg(s, rd, tmp);
99c475ab
FB
11353 break;
11354
11355 case 11:
11356 /* misc */
11357 op = (insn >> 8) & 0xf;
11358 switch (op) {
11359 case 0:
11360 /* adjust stack pointer */
b26eefb6 11361 tmp = load_reg(s, 13);
99c475ab
FB
11362 val = (insn & 0x7f) * 4;
11363 if (insn & (1 << 7))
6a0d8a1d 11364 val = -(int32_t)val;
b26eefb6
PB
11365 tcg_gen_addi_i32(tmp, tmp, val);
11366 store_reg(s, 13, tmp);
99c475ab
FB
11367 break;
11368
9ee6e8bb
PB
11369 case 2: /* sign/zero extend. */
11370 ARCH(6);
11371 rd = insn & 7;
11372 rm = (insn >> 3) & 7;
b0109805 11373 tmp = load_reg(s, rm);
9ee6e8bb 11374 switch ((insn >> 6) & 3) {
b0109805
PB
11375 case 0: gen_sxth(tmp); break;
11376 case 1: gen_sxtb(tmp); break;
11377 case 2: gen_uxth(tmp); break;
11378 case 3: gen_uxtb(tmp); break;
9ee6e8bb 11379 }
b0109805 11380 store_reg(s, rd, tmp);
9ee6e8bb 11381 break;
99c475ab
FB
11382 case 4: case 5: case 0xc: case 0xd:
11383 /* push/pop */
b0109805 11384 addr = load_reg(s, 13);
5899f386
FB
11385 if (insn & (1 << 8))
11386 offset = 4;
99c475ab 11387 else
5899f386
FB
11388 offset = 0;
11389 for (i = 0; i < 8; i++) {
11390 if (insn & (1 << i))
11391 offset += 4;
11392 }
11393 if ((insn & (1 << 11)) == 0) {
b0109805 11394 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11395 }
99c475ab
FB
11396 for (i = 0; i < 8; i++) {
11397 if (insn & (1 << i)) {
11398 if (insn & (1 << 11)) {
11399 /* pop */
c40c8556 11400 tmp = tcg_temp_new_i32();
12dcc321 11401 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
b0109805 11402 store_reg(s, i, tmp);
99c475ab
FB
11403 } else {
11404 /* push */
b0109805 11405 tmp = load_reg(s, i);
12dcc321 11406 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11407 tcg_temp_free_i32(tmp);
99c475ab 11408 }
5899f386 11409 /* advance to the next address. */
b0109805 11410 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11411 }
11412 }
39d5492a 11413 TCGV_UNUSED_I32(tmp);
99c475ab
FB
11414 if (insn & (1 << 8)) {
11415 if (insn & (1 << 11)) {
11416 /* pop pc */
c40c8556 11417 tmp = tcg_temp_new_i32();
12dcc321 11418 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
99c475ab
FB
11419 /* don't set the pc until the rest of the instruction
11420 has completed */
11421 } else {
11422 /* push lr */
b0109805 11423 tmp = load_reg(s, 14);
12dcc321 11424 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11425 tcg_temp_free_i32(tmp);
99c475ab 11426 }
b0109805 11427 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 11428 }
5899f386 11429 if ((insn & (1 << 11)) == 0) {
b0109805 11430 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 11431 }
99c475ab 11432 /* write back the new stack pointer */
b0109805 11433 store_reg(s, 13, addr);
99c475ab 11434 /* set the new PC value */
be5e7a76 11435 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 11436 store_reg_from_load(s, 15, tmp);
be5e7a76 11437 }
99c475ab
FB
11438 break;
11439
9ee6e8bb
PB
11440 case 1: case 3: case 9: case 11: /* czb */
11441 rm = insn & 7;
d9ba4830 11442 tmp = load_reg(s, rm);
9ee6e8bb
PB
11443 s->condlabel = gen_new_label();
11444 s->condjmp = 1;
11445 if (insn & (1 << 11))
cb63669a 11446 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 11447 else
cb63669a 11448 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 11449 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
11450 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
11451 val = (uint32_t)s->pc + 2;
11452 val += offset;
11453 gen_jmp(s, val);
11454 break;
11455
11456 case 15: /* IT, nop-hint. */
11457 if ((insn & 0xf) == 0) {
11458 gen_nop_hint(s, (insn >> 4) & 0xf);
11459 break;
11460 }
11461 /* If Then. */
11462 s->condexec_cond = (insn >> 4) & 0xe;
11463 s->condexec_mask = insn & 0x1f;
11464 /* No actual code generated for this insn, just setup state. */
11465 break;
11466
06c949e6 11467 case 0xe: /* bkpt */
d4a2dc67
PM
11468 {
11469 int imm8 = extract32(insn, 0, 8);
be5e7a76 11470 ARCH(5);
73710361
GB
11471 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
11472 default_exception_el(s));
06c949e6 11473 break;
d4a2dc67 11474 }
06c949e6 11475
19a6e31c
PM
11476 case 0xa: /* rev, and hlt */
11477 {
11478 int op1 = extract32(insn, 6, 2);
11479
11480 if (op1 == 2) {
11481 /* HLT */
11482 int imm6 = extract32(insn, 0, 6);
11483
11484 gen_hlt(s, imm6);
11485 break;
11486 }
11487
11488 /* Otherwise this is rev */
9ee6e8bb
PB
11489 ARCH(6);
11490 rn = (insn >> 3) & 0x7;
11491 rd = insn & 0x7;
b0109805 11492 tmp = load_reg(s, rn);
19a6e31c 11493 switch (op1) {
66896cb8 11494 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
11495 case 1: gen_rev16(tmp); break;
11496 case 3: gen_revsh(tmp); break;
19a6e31c
PM
11497 default:
11498 g_assert_not_reached();
9ee6e8bb 11499 }
b0109805 11500 store_reg(s, rd, tmp);
9ee6e8bb 11501 break;
19a6e31c 11502 }
9ee6e8bb 11503
d9e028c1
PM
11504 case 6:
11505 switch ((insn >> 5) & 7) {
11506 case 2:
11507 /* setend */
11508 ARCH(6);
9886ecdf
PB
11509 if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
11510 gen_helper_setend(cpu_env);
11511 s->is_jmp = DISAS_UPDATE;
d9e028c1 11512 }
9ee6e8bb 11513 break;
d9e028c1
PM
11514 case 3:
11515 /* cps */
11516 ARCH(6);
11517 if (IS_USER(s)) {
11518 break;
8984bd2e 11519 }
b53d8923 11520 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
11521 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
11522 /* FAULTMASK */
11523 if (insn & 1) {
11524 addr = tcg_const_i32(19);
11525 gen_helper_v7m_msr(cpu_env, addr, tmp);
11526 tcg_temp_free_i32(addr);
11527 }
11528 /* PRIMASK */
11529 if (insn & 2) {
11530 addr = tcg_const_i32(16);
11531 gen_helper_v7m_msr(cpu_env, addr, tmp);
11532 tcg_temp_free_i32(addr);
11533 }
11534 tcg_temp_free_i32(tmp);
11535 gen_lookup_tb(s);
11536 } else {
11537 if (insn & (1 << 4)) {
11538 shift = CPSR_A | CPSR_I | CPSR_F;
11539 } else {
11540 shift = 0;
11541 }
11542 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 11543 }
d9e028c1
PM
11544 break;
11545 default:
11546 goto undef;
9ee6e8bb
PB
11547 }
11548 break;
11549
99c475ab
FB
11550 default:
11551 goto undef;
11552 }
11553 break;
11554
11555 case 12:
a7d3970d 11556 {
99c475ab 11557 /* load/store multiple */
39d5492a
PM
11558 TCGv_i32 loaded_var;
11559 TCGV_UNUSED_I32(loaded_var);
99c475ab 11560 rn = (insn >> 8) & 0x7;
b0109805 11561 addr = load_reg(s, rn);
99c475ab
FB
11562 for (i = 0; i < 8; i++) {
11563 if (insn & (1 << i)) {
99c475ab
FB
11564 if (insn & (1 << 11)) {
11565 /* load */
c40c8556 11566 tmp = tcg_temp_new_i32();
12dcc321 11567 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
a7d3970d
PM
11568 if (i == rn) {
11569 loaded_var = tmp;
11570 } else {
11571 store_reg(s, i, tmp);
11572 }
99c475ab
FB
11573 } else {
11574 /* store */
b0109805 11575 tmp = load_reg(s, i);
12dcc321 11576 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
c40c8556 11577 tcg_temp_free_i32(tmp);
99c475ab 11578 }
5899f386 11579 /* advance to the next address */
b0109805 11580 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
11581 }
11582 }
b0109805 11583 if ((insn & (1 << rn)) == 0) {
a7d3970d 11584 /* base reg not in list: base register writeback */
b0109805
PB
11585 store_reg(s, rn, addr);
11586 } else {
a7d3970d
PM
11587 /* base reg in list: if load, complete it now */
11588 if (insn & (1 << 11)) {
11589 store_reg(s, rn, loaded_var);
11590 }
7d1b0095 11591 tcg_temp_free_i32(addr);
b0109805 11592 }
99c475ab 11593 break;
a7d3970d 11594 }
99c475ab
FB
11595 case 13:
11596 /* conditional branch or swi */
11597 cond = (insn >> 8) & 0xf;
11598 if (cond == 0xe)
11599 goto undef;
11600
11601 if (cond == 0xf) {
11602 /* swi */
eaed129d 11603 gen_set_pc_im(s, s->pc);
d4a2dc67 11604 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 11605 s->is_jmp = DISAS_SWI;
99c475ab
FB
11606 break;
11607 }
11608 /* generate a conditional jump to next instruction */
e50e6a20 11609 s->condlabel = gen_new_label();
39fb730a 11610 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 11611 s->condjmp = 1;
99c475ab
FB
11612
11613 /* jump to the offset */
5899f386 11614 val = (uint32_t)s->pc + 2;
99c475ab 11615 offset = ((int32_t)insn << 24) >> 24;
5899f386 11616 val += offset << 1;
8aaca4c0 11617 gen_jmp(s, val);
99c475ab
FB
11618 break;
11619
11620 case 14:
358bf29e 11621 if (insn & (1 << 11)) {
9ee6e8bb
PB
11622 if (disas_thumb2_insn(env, s, insn))
11623 goto undef32;
358bf29e
PB
11624 break;
11625 }
9ee6e8bb 11626 /* unconditional branch */
99c475ab
FB
11627 val = (uint32_t)s->pc;
11628 offset = ((int32_t)insn << 21) >> 21;
11629 val += (offset << 1) + 2;
8aaca4c0 11630 gen_jmp(s, val);
99c475ab
FB
11631 break;
11632
11633 case 15:
9ee6e8bb 11634 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11635 goto undef32;
9ee6e8bb 11636 break;
99c475ab
FB
11637 }
11638 return;
9ee6e8bb 11639undef32:
73710361
GB
11640 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
11641 default_exception_el(s));
9ee6e8bb
PB
11642 return;
11643illegal_op:
99c475ab 11644undef:
73710361
GB
11645 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
11646 default_exception_el(s));
99c475ab
FB
11647}
11648
541ebcd4
PM
11649static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
11650{
11651 /* Return true if the insn at dc->pc might cross a page boundary.
11652 * (False positives are OK, false negatives are not.)
11653 */
11654 uint16_t insn;
11655
11656 if ((s->pc & 3) == 0) {
11657 /* At a 4-aligned address we can't be crossing a page */
11658 return false;
11659 }
11660
11661 /* This must be a Thumb insn */
f9fd40eb 11662 insn = arm_lduw_code(env, s->pc, s->sctlr_b);
541ebcd4
PM
11663
11664 if ((insn >> 11) >= 0x1d) {
11665 /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
11666 * First half of a 32-bit Thumb insn. Thumb-1 cores might
11667 * end up actually treating this as two 16-bit insns (see the
11668 * code at the start of disas_thumb2_insn()) but we don't bother
11669 * to check for that as it is unlikely, and false positives here
11670 * are harmless.
11671 */
11672 return true;
11673 }
11674 /* Definitely a 16-bit insn, can't be crossing a page. */
11675 return false;
11676}
11677
20157705 11678/* generate intermediate code for basic block 'tb'. */
4e5e1215 11679void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11680{
4e5e1215 11681 ARMCPU *cpu = arm_env_get_cpu(env);
ed2803da 11682 CPUState *cs = CPU(cpu);
2c0262af 11683 DisasContext dc1, *dc = &dc1;
0fa85d43 11684 target_ulong pc_start;
0a2461fa 11685 target_ulong next_page_start;
2e70f6ef
PB
11686 int num_insns;
11687 int max_insns;
541ebcd4 11688 bool end_of_page;
3b46e624 11689
2c0262af 11690 /* generate intermediate code */
40f860cd
PM
11691
11692 /* The A64 decoder has its own top level loop, because it doesn't need
11693 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11694 */
11695 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
4e5e1215 11696 gen_intermediate_code_a64(cpu, tb);
40f860cd
PM
11697 return;
11698 }
11699
0fa85d43 11700 pc_start = tb->pc;
3b46e624 11701
2c0262af
FB
11702 dc->tb = tb;
11703
2c0262af
FB
11704 dc->is_jmp = DISAS_NEXT;
11705 dc->pc = pc_start;
ed2803da 11706 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11707 dc->condjmp = 0;
3926cc84 11708
40f860cd 11709 dc->aarch64 = 0;
cef9ee70
SS
11710 /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
11711 * there is no secure EL1, so we route exceptions to EL3.
11712 */
11713 dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
11714 !arm_el_is_aa64(env, 3);
40f860cd 11715 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
f9fd40eb 11716 dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
91cca2cd 11717 dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
40f860cd
PM
11718 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11719 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11720 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11721 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11722#if !defined(CONFIG_USER_ONLY)
c1e37810 11723 dc->user = (dc->current_el == 0);
3926cc84 11724#endif
3f342b9e 11725 dc->ns = ARM_TBFLAG_NS(tb->flags);
9dbbc748 11726 dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
40f860cd
PM
11727 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11728 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11729 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11730 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11731 dc->cp_regs = cpu->cp_regs;
a984e42c 11732 dc->features = env->features;
40f860cd 11733
50225ad0
PM
11734 /* Single step state. The code-generation logic here is:
11735 * SS_ACTIVE == 0:
11736 * generate code with no special handling for single-stepping (except
11737 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11738 * this happens anyway because those changes are all system register or
11739 * PSTATE writes).
11740 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11741 * emit code for one insn
11742 * emit code to clear PSTATE.SS
11743 * emit code to generate software step exception for completed step
11744 * end TB (as usual for having generated an exception)
11745 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11746 * emit code to generate a software step exception
11747 * end the TB
11748 */
11749 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11750 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11751 dc->is_ldex = false;
11752 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11753
a7812ae4
PB
11754 cpu_F0s = tcg_temp_new_i32();
11755 cpu_F1s = tcg_temp_new_i32();
11756 cpu_F0d = tcg_temp_new_i64();
11757 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11758 cpu_V0 = cpu_F0d;
11759 cpu_V1 = cpu_F1d;
e677137d 11760 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11761 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11762 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2e70f6ef
PB
11763 num_insns = 0;
11764 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 11765 if (max_insns == 0) {
2e70f6ef 11766 max_insns = CF_COUNT_MASK;
190ce7fb
RH
11767 }
11768 if (max_insns > TCG_MAX_INSNS) {
11769 max_insns = TCG_MAX_INSNS;
11770 }
2e70f6ef 11771
cd42d5b2 11772 gen_tb_start(tb);
e12ce78d 11773
3849902c
PM
11774 tcg_clear_temp_count();
11775
e12ce78d
PM
11776 /* A note on handling of the condexec (IT) bits:
11777 *
11778 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11779 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11780 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11781 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11782 * to do it at the end of the block. (For example if we don't do this
11783 * it's hard to identify whether we can safely skip writing condexec
11784 * at the end of the TB, which we definitely want to do for the case
11785 * where a TB doesn't do anything with the IT state at all.)
11786 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11787 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11788 * This is done both for leaving the TB at the end, and for leaving
11789 * it because of an exception we know will happen, which is done in
11790 * gen_exception_insn(). The latter is necessary because we need to
11791 * leave the TB with the PC/IT state just prior to execution of the
11792 * instruction which caused the exception.
11793 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11794 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d 11795 * This is handled in the same way as restoration of the
4e5e1215
RH
11796 * PC in these situations; we save the value of the condexec bits
11797 * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
11798 * then uses this to restore them after an exception.
e12ce78d
PM
11799 *
11800 * Note that there are no instructions which can read the condexec
11801 * bits, and none which can write non-static values to them, so
0ecb72a5 11802 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11803 * middle of a TB.
11804 */
11805
9ee6e8bb
PB
11806 /* Reset the conditional execution bits immediately. This avoids
11807 complications trying to do it at the end of the block. */
98eac7ca 11808 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11809 {
39d5492a 11810 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11811 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11812 store_cpu_field(tmp, condexec_bits);
8f01245e 11813 }
2c0262af 11814 do {
9bb6558a 11815 dc->insn_start_idx = tcg_op_buf_count();
52e971d9 11816 tcg_gen_insn_start(dc->pc,
aaa1f954
EI
11817 (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
11818 0);
b933066a
RH
11819 num_insns++;
11820
fbb4a2e3
PB
11821#ifdef CONFIG_USER_ONLY
11822 /* Intercept jump to the magic kernel page. */
40f860cd 11823 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11824 /* We always get here via a jump, so know we are not in a
11825 conditional execution block. */
d4a2dc67 11826 gen_exception_internal(EXCP_KERNEL_TRAP);
577bf808 11827 dc->is_jmp = DISAS_EXC;
fbb4a2e3
PB
11828 break;
11829 }
11830#else
542b3478
MD
11831 if (arm_dc_feature(dc, ARM_FEATURE_M)) {
11832 /* Branches to the magic exception-return addresses should
11833 * already have been caught via the arm_v7m_unassigned_access hook,
11834 * and never get here.
11835 */
11836 assert(dc->pc < 0xfffffff0);
9ee6e8bb
PB
11837 }
11838#endif
11839
f0c3c505 11840 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
b933066a 11841 CPUBreakpoint *bp;
f0c3c505 11842 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11843 if (bp->pc == dc->pc) {
5d98bf8f 11844 if (bp->flags & BP_CPU) {
ce8a1b54 11845 gen_set_condexec(dc);
ed6c6448 11846 gen_set_pc_im(dc, dc->pc);
5d98bf8f
SF
11847 gen_helper_check_breakpoints(cpu_env);
11848 /* End the TB early; it's likely not going to be executed */
11849 dc->is_jmp = DISAS_UPDATE;
11850 } else {
11851 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
522a0d4e
RH
11852 /* The address covered by the breakpoint must be
11853 included in [tb->pc, tb->pc + tb->size) in order
11854 to for it to be properly cleared -- thus we
11855 increment the PC here so that the logic setting
11856 tb->size below does the right thing. */
5d98bf8f
SF
11857 /* TODO: Advance PC by correct instruction length to
11858 * avoid disassembler error messages */
11859 dc->pc += 2;
11860 goto done_generating;
11861 }
11862 break;
1fddef4b
FB
11863 }
11864 }
11865 }
e50e6a20 11866
959082fc 11867 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 11868 gen_io_start();
959082fc 11869 }
2e70f6ef 11870
50225ad0
PM
11871 if (dc->ss_active && !dc->pstate_ss) {
11872 /* Singlestep state is Active-pending.
11873 * If we're in this state at the start of a TB then either
11874 * a) we just took an exception to an EL which is being debugged
11875 * and this is the first insn in the exception handler
11876 * b) debug exceptions were masked and we just unmasked them
11877 * without changing EL (eg by clearing PSTATE.D)
11878 * In either case we're going to take a swstep exception in the
11879 * "did not step an insn" case, and so the syndrome ISV and EX
11880 * bits should be zero.
11881 */
959082fc 11882 assert(num_insns == 1);
73710361
GB
11883 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
11884 default_exception_el(dc));
50225ad0
PM
11885 goto done_generating;
11886 }
11887
40f860cd 11888 if (dc->thumb) {
9ee6e8bb
PB
11889 disas_thumb_insn(env, dc);
11890 if (dc->condexec_mask) {
11891 dc->condexec_cond = (dc->condexec_cond & 0xe)
11892 | ((dc->condexec_mask >> 4) & 1);
11893 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11894 if (dc->condexec_mask == 0) {
11895 dc->condexec_cond = 0;
11896 }
11897 }
11898 } else {
f9fd40eb 11899 unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
f4df2210
PM
11900 dc->pc += 4;
11901 disas_arm_insn(dc, insn);
9ee6e8bb 11902 }
e50e6a20
FB
11903
11904 if (dc->condjmp && !dc->is_jmp) {
11905 gen_set_label(dc->condlabel);
11906 dc->condjmp = 0;
11907 }
3849902c
PM
11908
11909 if (tcg_check_temp_count()) {
0a2461fa
AG
11910 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11911 dc->pc);
3849902c
PM
11912 }
11913
aaf2d97d 11914 /* Translation stops when a conditional branch is encountered.
e50e6a20 11915 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11916 * Also stop translation when a page boundary is reached. This
bf20dc07 11917 * ensures prefetch aborts occur at the right place. */
541ebcd4
PM
11918
11919 /* We want to stop the TB if the next insn starts in a new page,
11920 * or if it spans between this page and the next. This means that
11921 * if we're looking at the last halfword in the page we need to
11922 * see if it's a 16-bit Thumb insn (which will fit in this TB)
11923 * or a 32-bit Thumb insn (which won't).
11924 * This is to avoid generating a silly TB with a single 16-bit insn
11925 * in it at the end of this page (which would execute correctly
11926 * but isn't very efficient).
11927 */
11928 end_of_page = (dc->pc >= next_page_start) ||
11929 ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
11930
fe700adb 11931 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11932 !cs->singlestep_enabled &&
1b530a6d 11933 !singlestep &&
50225ad0 11934 !dc->ss_active &&
541ebcd4 11935 !end_of_page &&
2e70f6ef
PB
11936 num_insns < max_insns);
11937
11938 if (tb->cflags & CF_LAST_IO) {
11939 if (dc->condjmp) {
11940 /* FIXME: This can theoretically happen with self-modifying
11941 code. */
a47dddd7 11942 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11943 }
11944 gen_io_end();
11945 }
9ee6e8bb 11946
b5ff1b31 11947 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11948 instruction was a conditional branch or trap, and the PC has
11949 already been written. */
50225ad0 11950 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
7999a5c8 11951 /* Unconditional and "condition passed" instruction codepath. */
9ee6e8bb 11952 gen_set_condexec(dc);
7999a5c8
SF
11953 switch (dc->is_jmp) {
11954 case DISAS_SWI:
50225ad0 11955 gen_ss_advance(dc);
73710361
GB
11956 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
11957 default_exception_el(dc));
7999a5c8
SF
11958 break;
11959 case DISAS_HVC:
37e6456e 11960 gen_ss_advance(dc);
73710361 11961 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
7999a5c8
SF
11962 break;
11963 case DISAS_SMC:
37e6456e 11964 gen_ss_advance(dc);
73710361 11965 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
7999a5c8
SF
11966 break;
11967 case DISAS_NEXT:
11968 case DISAS_UPDATE:
11969 gen_set_pc_im(dc, dc->pc);
11970 /* fall through */
11971 default:
11972 if (dc->ss_active) {
11973 gen_step_complete_exception(dc);
11974 } else {
11975 /* FIXME: Single stepping a WFI insn will not halt
11976 the CPU. */
11977 gen_exception_internal(EXCP_DEBUG);
11978 }
11979 }
11980 if (dc->condjmp) {
11981 /* "Condition failed" instruction codepath. */
11982 gen_set_label(dc->condlabel);
11983 gen_set_condexec(dc);
11984 gen_set_pc_im(dc, dc->pc);
11985 if (dc->ss_active) {
11986 gen_step_complete_exception(dc);
11987 } else {
11988 gen_exception_internal(EXCP_DEBUG);
11989 }
9ee6e8bb 11990 }
8aaca4c0 11991 } else {
9ee6e8bb
PB
11992 /* While branches must always occur at the end of an IT block,
11993 there are a few other things that can cause us to terminate
65626741 11994 the TB in the middle of an IT block:
9ee6e8bb
PB
11995 - Exception generating instructions (bkpt, swi, undefined).
11996 - Page boundaries.
11997 - Hardware watchpoints.
11998 Hardware breakpoints have already been handled and skip this code.
11999 */
12000 gen_set_condexec(dc);
8aaca4c0 12001 switch(dc->is_jmp) {
8aaca4c0 12002 case DISAS_NEXT:
6e256c93 12003 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0 12004 break;
8aaca4c0 12005 case DISAS_UPDATE:
577bf808
SF
12006 gen_set_pc_im(dc, dc->pc);
12007 /* fall through */
12008 case DISAS_JUMP:
12009 default:
8aaca4c0 12010 /* indicate that the hash table must be used to find the next TB */
57fec1fe 12011 tcg_gen_exit_tb(0);
8aaca4c0
FB
12012 break;
12013 case DISAS_TB_JUMP:
12014 /* nothing more to generate */
12015 break;
9ee6e8bb 12016 case DISAS_WFI:
1ce94f81 12017 gen_helper_wfi(cpu_env);
84549b6d
PM
12018 /* The helper doesn't necessarily throw an exception, but we
12019 * must go back to the main loop to check for interrupts anyway.
12020 */
12021 tcg_gen_exit_tb(0);
9ee6e8bb 12022 break;
72c1d3af
PM
12023 case DISAS_WFE:
12024 gen_helper_wfe(cpu_env);
12025 break;
c87e5a61
PM
12026 case DISAS_YIELD:
12027 gen_helper_yield(cpu_env);
12028 break;
9ee6e8bb 12029 case DISAS_SWI:
73710361
GB
12030 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
12031 default_exception_el(dc));
9ee6e8bb 12032 break;
37e6456e 12033 case DISAS_HVC:
73710361 12034 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
37e6456e
PM
12035 break;
12036 case DISAS_SMC:
73710361 12037 gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
37e6456e 12038 break;
8aaca4c0 12039 }
e50e6a20
FB
12040 if (dc->condjmp) {
12041 gen_set_label(dc->condlabel);
9ee6e8bb 12042 gen_set_condexec(dc);
6e256c93 12043 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
12044 dc->condjmp = 0;
12045 }
2c0262af 12046 }
2e70f6ef 12047
9ee6e8bb 12048done_generating:
806f352d 12049 gen_tb_end(tb, num_insns);
2c0262af
FB
12050
12051#ifdef DEBUG_DISAS
06486077
AB
12052 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
12053 qemu_log_in_addr_range(pc_start)) {
1ee73216 12054 qemu_log_lock();
93fcfe39
AL
12055 qemu_log("----------------\n");
12056 qemu_log("IN: %s\n", lookup_symbol(pc_start));
d49190c4 12057 log_target_disas(cs, pc_start, dc->pc - pc_start,
f9fd40eb 12058 dc->thumb | (dc->sctlr_b << 1));
93fcfe39 12059 qemu_log("\n");
1ee73216 12060 qemu_log_unlock();
2c0262af
FB
12061 }
12062#endif
4e5e1215
RH
12063 tb->size = dc->pc - pc_start;
12064 tb->icount = num_insns;
2c0262af
FB
12065}
12066
b5ff1b31 12067static const char *cpu_mode_names[16] = {
28c9457d
EI
12068 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
12069 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 12070};
9ee6e8bb 12071
878096ee
AF
12072void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
12073 int flags)
2c0262af 12074{
878096ee
AF
12075 ARMCPU *cpu = ARM_CPU(cs);
12076 CPUARMState *env = &cpu->env;
2c0262af 12077 int i;
b5ff1b31 12078 uint32_t psr;
06e5cf7a 12079 const char *ns_status;
2c0262af 12080
17731115
PM
12081 if (is_a64(env)) {
12082 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
12083 return;
12084 }
12085
2c0262af 12086 for(i=0;i<16;i++) {
7fe48483 12087 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 12088 if ((i % 4) == 3)
7fe48483 12089 cpu_fprintf(f, "\n");
2c0262af 12090 else
7fe48483 12091 cpu_fprintf(f, " ");
2c0262af 12092 }
b5ff1b31 12093 psr = cpsr_read(env);
06e5cf7a
PM
12094
12095 if (arm_feature(env, ARM_FEATURE_EL3) &&
12096 (psr & CPSR_M) != ARM_CPU_MODE_MON) {
12097 ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
12098 } else {
12099 ns_status = "";
12100 }
12101
12102 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
687fa640 12103 psr,
b5ff1b31
FB
12104 psr & (1 << 31) ? 'N' : '-',
12105 psr & (1 << 30) ? 'Z' : '-',
12106 psr & (1 << 29) ? 'C' : '-',
12107 psr & (1 << 28) ? 'V' : '-',
5fafdf24 12108 psr & CPSR_T ? 'T' : 'A',
06e5cf7a 12109 ns_status,
b5ff1b31 12110 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 12111
f2617cfc
PM
12112 if (flags & CPU_DUMP_FPU) {
12113 int numvfpregs = 0;
12114 if (arm_feature(env, ARM_FEATURE_VFP)) {
12115 numvfpregs += 16;
12116 }
12117 if (arm_feature(env, ARM_FEATURE_VFP3)) {
12118 numvfpregs += 16;
12119 }
12120 for (i = 0; i < numvfpregs; i++) {
12121 uint64_t v = float64_val(env->vfp.regs[i]);
12122 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
12123 i * 2, (uint32_t)v,
12124 i * 2 + 1, (uint32_t)(v >> 32),
12125 i, v);
12126 }
12127 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 12128 }
2c0262af 12129}
a6b025d3 12130
bad729e2
RH
12131void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
12132 target_ulong *data)
d2856f1a 12133{
3926cc84 12134 if (is_a64(env)) {
bad729e2 12135 env->pc = data[0];
40f860cd 12136 env->condexec_bits = 0;
aaa1f954 12137 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12138 } else {
bad729e2
RH
12139 env->regs[15] = data[0];
12140 env->condexec_bits = data[1];
aaa1f954 12141 env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
3926cc84 12142 }
d2856f1a 12143}