]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-mips: Fix accumulator selection for MIPS16 and microMIPS
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
a7812ae4
PB
98static TCGv cpu_F0s, cpu_F1s;
99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
7d1b0095 143 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
7d1b0095 178 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
b75263d6
JR
205static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206{
207 TCGv tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
7d1b0095 216 TCGv tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
3670669c
PB
222static void gen_smul_dual(TCGv a, TCGv b)
223{
7d1b0095
PM
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
238static void gen_rev16(TCGv var)
239{
7d1b0095 240 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
250static void gen_revsh(TCGv var)
251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
258static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
266static void gen_sbfx(TCGv var, int shift, int width)
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d
AJ
280/* Return (b << 32) + a. Mark inputs as dead */
281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
5e3f878a 308/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 309static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 310{
831d7fe8
RH
311 TCGv lo = tcg_temp_new_i32();
312 TCGv hi = tcg_temp_new_i32();
313 TCGv_i64 ret;
5e3f878a 314
831d7fe8 315 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 316 tcg_temp_free_i32(a);
7d1b0095 317 tcg_temp_free_i32(b);
831d7fe8
RH
318
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
321 tcg_temp_free(lo);
322 tcg_temp_free(hi);
323
324 return ret;
5e3f878a
PB
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
831d7fe8
RH
329 TCGv lo = tcg_temp_new_i32();
330 TCGv hi = tcg_temp_new_i32();
331 TCGv_i64 ret;
5e3f878a 332
831d7fe8 333 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 334 tcg_temp_free_i32(a);
7d1b0095 335 tcg_temp_free_i32(b);
831d7fe8
RH
336
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
339 tcg_temp_free(lo);
340 tcg_temp_free(hi);
341
342 return ret;
5e3f878a
PB
343}
344
8f01245e
PB
345/* Swap low and high halfwords. */
346static void gen_swap_half(TCGv var)
347{
7d1b0095 348 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
7d1b0095 352 tcg_temp_free_i32(tmp);
8f01245e
PB
353}
354
b26eefb6
PB
355/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
360 */
361
362static void gen_add16(TCGv t0, TCGv t1)
363{
7d1b0095 364 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
b26eefb6
PB
373}
374
375/* Set CF to the top bit of var. */
376static void gen_set_CF_bit31(TCGv var)
377{
66c374de 378 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
379}
380
381/* Set N and Z flags from var. */
382static inline void gen_logic_CC(TCGv var)
383{
66c374de
AJ
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
386}
387
388/* T0 += T1 + CF. */
396e467c 389static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 390{
396e467c 391 tcg_gen_add_i32(t0, t0, t1);
66c374de 392 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
393}
394
e9bb4aa9
JR
395/* dest = T0 + T1 + CF. */
396static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
397{
e9bb4aa9 398 tcg_gen_add_i32(dest, t0, t1);
66c374de 399 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
400}
401
3670669c
PB
402/* dest = T0 - T1 + CF - 1. */
403static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
404{
3670669c 405 tcg_gen_sub_i32(dest, t0, t1);
66c374de 406 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 407 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
408}
409
72485ec4
AJ
410/* dest = T0 + T1. Compute C, N, V and Z flags */
411static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
412{
e3482cb8
RH
413 TCGv tmp = tcg_temp_new_i32();
414 tcg_gen_movi_i32(tmp, 0);
415 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
422}
423
49b4c31e
RH
424/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
425static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1)
426{
427 TCGv tmp = tcg_temp_new_i32();
428 if (TCG_TARGET_HAS_add2_i32) {
429 tcg_gen_movi_i32(tmp, 0);
430 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 431 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
432 } else {
433 TCGv_i64 q0 = tcg_temp_new_i64();
434 TCGv_i64 q1 = tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0, t0);
436 tcg_gen_extu_i32_i64(q1, t1);
437 tcg_gen_add_i64(q0, q0, q1);
438 tcg_gen_extu_i32_i64(q1, cpu_CF);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
441 tcg_temp_free_i64(q0);
442 tcg_temp_free_i64(q1);
443 }
444 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tcg_gen_xor_i32(tmp, t0, t1);
447 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
448 tcg_temp_free_i32(tmp);
449 tcg_gen_mov_i32(dest, cpu_NF);
450}
451
72485ec4
AJ
452/* dest = T0 - T1. Compute C, N, V and Z flags */
453static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
454{
455 TCGv tmp;
456 tcg_gen_sub_i32(cpu_NF, t0, t1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
459 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
460 tmp = tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
465}
466
e77f0832 467/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
2de68a49
RH
468static void gen_sbc_CC(TCGv dest, TCGv t0, TCGv t1)
469{
470 TCGv tmp = tcg_temp_new_i32();
e77f0832
RH
471 tcg_gen_not_i32(tmp, t1);
472 gen_adc_CC(dest, t0, tmp);
473 tcg_temp_free(tmp);
2de68a49
RH
474}
475
365af80e
AJ
476#define GEN_SHIFT(name) \
477static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
478{ \
479 TCGv tmp1, tmp2, tmp3; \
480 tmp1 = tcg_temp_new_i32(); \
481 tcg_gen_andi_i32(tmp1, t1, 0xff); \
482 tmp2 = tcg_const_i32(0); \
483 tmp3 = tcg_const_i32(0x1f); \
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
485 tcg_temp_free_i32(tmp3); \
486 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
487 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
488 tcg_temp_free_i32(tmp2); \
489 tcg_temp_free_i32(tmp1); \
490}
491GEN_SHIFT(shl)
492GEN_SHIFT(shr)
493#undef GEN_SHIFT
494
495static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
496{
497 TCGv tmp1, tmp2;
498 tmp1 = tcg_temp_new_i32();
499 tcg_gen_andi_i32(tmp1, t1, 0xff);
500 tmp2 = tcg_const_i32(0x1f);
501 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
502 tcg_temp_free_i32(tmp2);
503 tcg_gen_sar_i32(dest, t0, tmp1);
504 tcg_temp_free_i32(tmp1);
505}
506
36c91fd1
PM
507static void tcg_gen_abs_i32(TCGv dest, TCGv src)
508{
509 TCGv c0 = tcg_const_i32(0);
510 TCGv tmp = tcg_temp_new_i32();
511 tcg_gen_neg_i32(tmp, src);
512 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
513 tcg_temp_free_i32(c0);
514 tcg_temp_free_i32(tmp);
515}
ad69471c 516
9a119ff6 517static void shifter_out_im(TCGv var, int shift)
b26eefb6 518{
9a119ff6 519 if (shift == 0) {
66c374de 520 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 521 } else {
66c374de
AJ
522 tcg_gen_shri_i32(cpu_CF, var, shift);
523 if (shift != 31) {
524 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
525 }
9a119ff6 526 }
9a119ff6 527}
b26eefb6 528
9a119ff6
PB
529/* Shift by immediate. Includes special handling for shift == 0. */
530static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
531{
532 switch (shiftop) {
533 case 0: /* LSL */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, 32 - shift);
537 tcg_gen_shli_i32(var, var, shift);
538 }
539 break;
540 case 1: /* LSR */
541 if (shift == 0) {
542 if (flags) {
66c374de 543 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
544 }
545 tcg_gen_movi_i32(var, 0);
546 } else {
547 if (flags)
548 shifter_out_im(var, shift - 1);
549 tcg_gen_shri_i32(var, var, shift);
550 }
551 break;
552 case 2: /* ASR */
553 if (shift == 0)
554 shift = 32;
555 if (flags)
556 shifter_out_im(var, shift - 1);
557 if (shift == 32)
558 shift = 31;
559 tcg_gen_sari_i32(var, var, shift);
560 break;
561 case 3: /* ROR/RRX */
562 if (shift != 0) {
563 if (flags)
564 shifter_out_im(var, shift - 1);
f669df27 565 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 566 } else {
66c374de 567 TCGv tmp = tcg_temp_new_i32();
b6348f29 568 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
569 if (flags)
570 shifter_out_im(var, 0);
571 tcg_gen_shri_i32(var, var, 1);
b26eefb6 572 tcg_gen_or_i32(var, var, tmp);
7d1b0095 573 tcg_temp_free_i32(tmp);
b26eefb6
PB
574 }
575 }
576};
577
8984bd2e
PB
578static inline void gen_arm_shift_reg(TCGv var, int shiftop,
579 TCGv shift, int flags)
580{
581 if (flags) {
582 switch (shiftop) {
9ef39277
BS
583 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
584 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
585 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
586 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
587 }
588 } else {
589 switch (shiftop) {
365af80e
AJ
590 case 0:
591 gen_shl(var, var, shift);
592 break;
593 case 1:
594 gen_shr(var, var, shift);
595 break;
596 case 2:
597 gen_sar(var, var, shift);
598 break;
f669df27
AJ
599 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
600 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
601 }
602 }
7d1b0095 603 tcg_temp_free_i32(shift);
8984bd2e
PB
604}
605
6ddbc6e4
PB
606#define PAS_OP(pfx) \
607 switch (op2) { \
608 case 0: gen_pas_helper(glue(pfx,add16)); break; \
609 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
610 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
611 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
612 case 4: gen_pas_helper(glue(pfx,add8)); break; \
613 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
614 }
d9ba4830 615static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 616{
a7812ae4 617 TCGv_ptr tmp;
6ddbc6e4
PB
618
619 switch (op1) {
620#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
621 case 1:
a7812ae4 622 tmp = tcg_temp_new_ptr();
0ecb72a5 623 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 624 PAS_OP(s)
b75263d6 625 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
626 break;
627 case 5:
a7812ae4 628 tmp = tcg_temp_new_ptr();
0ecb72a5 629 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 630 PAS_OP(u)
b75263d6 631 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
632 break;
633#undef gen_pas_helper
634#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
635 case 2:
636 PAS_OP(q);
637 break;
638 case 3:
639 PAS_OP(sh);
640 break;
641 case 6:
642 PAS_OP(uq);
643 break;
644 case 7:
645 PAS_OP(uh);
646 break;
647#undef gen_pas_helper
648 }
649}
9ee6e8bb
PB
650#undef PAS_OP
651
6ddbc6e4
PB
652/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
653#define PAS_OP(pfx) \
ed89a2f1 654 switch (op1) { \
6ddbc6e4
PB
655 case 0: gen_pas_helper(glue(pfx,add8)); break; \
656 case 1: gen_pas_helper(glue(pfx,add16)); break; \
657 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
658 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
659 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
660 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
661 }
d9ba4830 662static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 663{
a7812ae4 664 TCGv_ptr tmp;
6ddbc6e4 665
ed89a2f1 666 switch (op2) {
6ddbc6e4
PB
667#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
668 case 0:
a7812ae4 669 tmp = tcg_temp_new_ptr();
0ecb72a5 670 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 671 PAS_OP(s)
b75263d6 672 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
673 break;
674 case 4:
a7812ae4 675 tmp = tcg_temp_new_ptr();
0ecb72a5 676 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 677 PAS_OP(u)
b75263d6 678 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
679 break;
680#undef gen_pas_helper
681#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
682 case 1:
683 PAS_OP(q);
684 break;
685 case 2:
686 PAS_OP(sh);
687 break;
688 case 5:
689 PAS_OP(uq);
690 break;
691 case 6:
692 PAS_OP(uh);
693 break;
694#undef gen_pas_helper
695 }
696}
9ee6e8bb
PB
697#undef PAS_OP
698
d9ba4830
PB
699static void gen_test_cc(int cc, int label)
700{
701 TCGv tmp;
d9ba4830
PB
702 int inv;
703
d9ba4830
PB
704 switch (cc) {
705 case 0: /* eq: Z */
66c374de 706 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
707 break;
708 case 1: /* ne: !Z */
66c374de 709 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
710 break;
711 case 2: /* cs: C */
66c374de 712 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
713 break;
714 case 3: /* cc: !C */
66c374de 715 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
716 break;
717 case 4: /* mi: N */
66c374de 718 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
719 break;
720 case 5: /* pl: !N */
66c374de 721 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
722 break;
723 case 6: /* vs: V */
66c374de 724 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
725 break;
726 case 7: /* vc: !V */
66c374de 727 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
728 break;
729 case 8: /* hi: C && !Z */
730 inv = gen_new_label();
66c374de
AJ
731 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
732 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
733 gen_set_label(inv);
734 break;
735 case 9: /* ls: !C || Z */
66c374de
AJ
736 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
738 break;
739 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
740 tmp = tcg_temp_new_i32();
741 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 742 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 743 tcg_temp_free_i32(tmp);
d9ba4830
PB
744 break;
745 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
746 tmp = tcg_temp_new_i32();
747 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 748 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 749 tcg_temp_free_i32(tmp);
d9ba4830
PB
750 break;
751 case 12: /* gt: !Z && N == V */
752 inv = gen_new_label();
66c374de
AJ
753 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
754 tmp = tcg_temp_new_i32();
755 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 756 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 757 tcg_temp_free_i32(tmp);
d9ba4830
PB
758 gen_set_label(inv);
759 break;
760 case 13: /* le: Z || N != V */
66c374de
AJ
761 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
762 tmp = tcg_temp_new_i32();
763 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 764 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 765 tcg_temp_free_i32(tmp);
d9ba4830
PB
766 break;
767 default:
768 fprintf(stderr, "Bad condition code 0x%x\n", cc);
769 abort();
770 }
d9ba4830 771}
2c0262af 772
b1d8e52e 773static const uint8_t table_logic_cc[16] = {
2c0262af
FB
774 1, /* and */
775 1, /* xor */
776 0, /* sub */
777 0, /* rsb */
778 0, /* add */
779 0, /* adc */
780 0, /* sbc */
781 0, /* rsc */
782 1, /* andl */
783 1, /* xorl */
784 0, /* cmp */
785 0, /* cmn */
786 1, /* orr */
787 1, /* mov */
788 1, /* bic */
789 1, /* mvn */
790};
3b46e624 791
d9ba4830
PB
792/* Set PC and Thumb state from an immediate address. */
793static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 794{
b26eefb6 795 TCGv tmp;
99c475ab 796
b26eefb6 797 s->is_jmp = DISAS_UPDATE;
d9ba4830 798 if (s->thumb != (addr & 1)) {
7d1b0095 799 tmp = tcg_temp_new_i32();
d9ba4830 800 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 801 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 802 tcg_temp_free_i32(tmp);
d9ba4830 803 }
155c3eac 804 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
805}
806
807/* Set PC and Thumb state from var. var is marked as dead. */
808static inline void gen_bx(DisasContext *s, TCGv var)
809{
d9ba4830 810 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
811 tcg_gen_andi_i32(cpu_R[15], var, ~1);
812 tcg_gen_andi_i32(var, var, 1);
813 store_cpu_field(var, thumb);
d9ba4830
PB
814}
815
21aeb343
JR
816/* Variant of store_reg which uses branch&exchange logic when storing
817 to r15 in ARM architecture v7 and above. The source must be a temporary
818 and will be marked as dead. */
0ecb72a5 819static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
820 int reg, TCGv var)
821{
822 if (reg == 15 && ENABLE_ARCH_7) {
823 gen_bx(s, var);
824 } else {
825 store_reg(s, reg, var);
826 }
827}
828
be5e7a76
DES
829/* Variant of store_reg which uses branch&exchange logic when storing
830 * to r15 in ARM architecture v5T and above. This is used for storing
831 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
832 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 833static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
834 int reg, TCGv var)
835{
836 if (reg == 15 && ENABLE_ARCH_5) {
837 gen_bx(s, var);
838 } else {
839 store_reg(s, reg, var);
840 }
841}
842
b0109805
PB
843static inline TCGv gen_ld8s(TCGv addr, int index)
844{
7d1b0095 845 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
846 tcg_gen_qemu_ld8s(tmp, addr, index);
847 return tmp;
848}
849static inline TCGv gen_ld8u(TCGv addr, int index)
850{
7d1b0095 851 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
852 tcg_gen_qemu_ld8u(tmp, addr, index);
853 return tmp;
854}
855static inline TCGv gen_ld16s(TCGv addr, int index)
856{
7d1b0095 857 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
858 tcg_gen_qemu_ld16s(tmp, addr, index);
859 return tmp;
860}
861static inline TCGv gen_ld16u(TCGv addr, int index)
862{
7d1b0095 863 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
864 tcg_gen_qemu_ld16u(tmp, addr, index);
865 return tmp;
866}
867static inline TCGv gen_ld32(TCGv addr, int index)
868{
7d1b0095 869 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
870 tcg_gen_qemu_ld32u(tmp, addr, index);
871 return tmp;
872}
84496233
JR
873static inline TCGv_i64 gen_ld64(TCGv addr, int index)
874{
875 TCGv_i64 tmp = tcg_temp_new_i64();
876 tcg_gen_qemu_ld64(tmp, addr, index);
877 return tmp;
878}
b0109805
PB
879static inline void gen_st8(TCGv val, TCGv addr, int index)
880{
881 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 882 tcg_temp_free_i32(val);
b0109805
PB
883}
884static inline void gen_st16(TCGv val, TCGv addr, int index)
885{
886 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 887 tcg_temp_free_i32(val);
b0109805
PB
888}
889static inline void gen_st32(TCGv val, TCGv addr, int index)
890{
891 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 892 tcg_temp_free_i32(val);
b0109805 893}
84496233
JR
894static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
895{
896 tcg_gen_qemu_st64(val, addr, index);
897 tcg_temp_free_i64(val);
898}
b5ff1b31 899
5e3f878a
PB
900static inline void gen_set_pc_im(uint32_t val)
901{
155c3eac 902 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
903}
904
b5ff1b31
FB
905/* Force a TB lookup after an instruction that changes the CPU state. */
906static inline void gen_lookup_tb(DisasContext *s)
907{
a6445c52 908 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
909 s->is_jmp = DISAS_UPDATE;
910}
911
b0109805
PB
912static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
913 TCGv var)
2c0262af 914{
1e8d4eec 915 int val, rm, shift, shiftop;
b26eefb6 916 TCGv offset;
2c0262af
FB
917
918 if (!(insn & (1 << 25))) {
919 /* immediate */
920 val = insn & 0xfff;
921 if (!(insn & (1 << 23)))
922 val = -val;
537730b9 923 if (val != 0)
b0109805 924 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
925 } else {
926 /* shift/register */
927 rm = (insn) & 0xf;
928 shift = (insn >> 7) & 0x1f;
1e8d4eec 929 shiftop = (insn >> 5) & 3;
b26eefb6 930 offset = load_reg(s, rm);
9a119ff6 931 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 932 if (!(insn & (1 << 23)))
b0109805 933 tcg_gen_sub_i32(var, var, offset);
2c0262af 934 else
b0109805 935 tcg_gen_add_i32(var, var, offset);
7d1b0095 936 tcg_temp_free_i32(offset);
2c0262af
FB
937 }
938}
939
191f9a93 940static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 941 int extra, TCGv var)
2c0262af
FB
942{
943 int val, rm;
b26eefb6 944 TCGv offset;
3b46e624 945
2c0262af
FB
946 if (insn & (1 << 22)) {
947 /* immediate */
948 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
949 if (!(insn & (1 << 23)))
950 val = -val;
18acad92 951 val += extra;
537730b9 952 if (val != 0)
b0109805 953 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
954 } else {
955 /* register */
191f9a93 956 if (extra)
b0109805 957 tcg_gen_addi_i32(var, var, extra);
2c0262af 958 rm = (insn) & 0xf;
b26eefb6 959 offset = load_reg(s, rm);
2c0262af 960 if (!(insn & (1 << 23)))
b0109805 961 tcg_gen_sub_i32(var, var, offset);
2c0262af 962 else
b0109805 963 tcg_gen_add_i32(var, var, offset);
7d1b0095 964 tcg_temp_free_i32(offset);
2c0262af
FB
965 }
966}
967
5aaebd13
PM
968static TCGv_ptr get_fpstatus_ptr(int neon)
969{
970 TCGv_ptr statusptr = tcg_temp_new_ptr();
971 int offset;
972 if (neon) {
0ecb72a5 973 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 974 } else {
0ecb72a5 975 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
976 }
977 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
978 return statusptr;
979}
980
4373f3ce
PB
981#define VFP_OP2(name) \
982static inline void gen_vfp_##name(int dp) \
983{ \
ae1857ec
PM
984 TCGv_ptr fpst = get_fpstatus_ptr(0); \
985 if (dp) { \
986 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
987 } else { \
988 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
989 } \
990 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
991}
992
4373f3ce
PB
993VFP_OP2(add)
994VFP_OP2(sub)
995VFP_OP2(mul)
996VFP_OP2(div)
997
998#undef VFP_OP2
999
605a6aed
PM
1000static inline void gen_vfp_F1_mul(int dp)
1001{
1002 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1003 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1004 if (dp) {
ae1857ec 1005 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1006 } else {
ae1857ec 1007 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1008 }
ae1857ec 1009 tcg_temp_free_ptr(fpst);
605a6aed
PM
1010}
1011
1012static inline void gen_vfp_F1_neg(int dp)
1013{
1014 /* Like gen_vfp_neg() but put result in F1 */
1015 if (dp) {
1016 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1017 } else {
1018 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1019 }
1020}
1021
4373f3ce
PB
1022static inline void gen_vfp_abs(int dp)
1023{
1024 if (dp)
1025 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1026 else
1027 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1028}
1029
1030static inline void gen_vfp_neg(int dp)
1031{
1032 if (dp)
1033 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1034 else
1035 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1036}
1037
1038static inline void gen_vfp_sqrt(int dp)
1039{
1040 if (dp)
1041 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1042 else
1043 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1044}
1045
1046static inline void gen_vfp_cmp(int dp)
1047{
1048 if (dp)
1049 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1050 else
1051 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1052}
1053
1054static inline void gen_vfp_cmpe(int dp)
1055{
1056 if (dp)
1057 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1058 else
1059 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1060}
1061
1062static inline void gen_vfp_F1_ld0(int dp)
1063{
1064 if (dp)
5b340b51 1065 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1066 else
5b340b51 1067 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1068}
1069
5500b06c
PM
1070#define VFP_GEN_ITOF(name) \
1071static inline void gen_vfp_##name(int dp, int neon) \
1072{ \
5aaebd13 1073 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1074 if (dp) { \
1075 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1076 } else { \
1077 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1078 } \
b7fa9214 1079 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1080}
1081
5500b06c
PM
1082VFP_GEN_ITOF(uito)
1083VFP_GEN_ITOF(sito)
1084#undef VFP_GEN_ITOF
4373f3ce 1085
5500b06c
PM
1086#define VFP_GEN_FTOI(name) \
1087static inline void gen_vfp_##name(int dp, int neon) \
1088{ \
5aaebd13 1089 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1090 if (dp) { \
1091 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1092 } else { \
1093 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1094 } \
b7fa9214 1095 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1096}
1097
5500b06c
PM
1098VFP_GEN_FTOI(toui)
1099VFP_GEN_FTOI(touiz)
1100VFP_GEN_FTOI(tosi)
1101VFP_GEN_FTOI(tosiz)
1102#undef VFP_GEN_FTOI
4373f3ce
PB
1103
1104#define VFP_GEN_FIX(name) \
5500b06c 1105static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1106{ \
b75263d6 1107 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1108 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1109 if (dp) { \
1110 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1111 } else { \
1112 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1113 } \
b75263d6 1114 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1115 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1116}
4373f3ce
PB
1117VFP_GEN_FIX(tosh)
1118VFP_GEN_FIX(tosl)
1119VFP_GEN_FIX(touh)
1120VFP_GEN_FIX(toul)
1121VFP_GEN_FIX(shto)
1122VFP_GEN_FIX(slto)
1123VFP_GEN_FIX(uhto)
1124VFP_GEN_FIX(ulto)
1125#undef VFP_GEN_FIX
9ee6e8bb 1126
312eea9f 1127static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1128{
1129 if (dp)
312eea9f 1130 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1131 else
312eea9f 1132 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1133}
1134
312eea9f 1135static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1136{
1137 if (dp)
312eea9f 1138 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1139 else
312eea9f 1140 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1141}
1142
8e96005d
FB
1143static inline long
1144vfp_reg_offset (int dp, int reg)
1145{
1146 if (dp)
1147 return offsetof(CPUARMState, vfp.regs[reg]);
1148 else if (reg & 1) {
1149 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1150 + offsetof(CPU_DoubleU, l.upper);
1151 } else {
1152 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1153 + offsetof(CPU_DoubleU, l.lower);
1154 }
1155}
9ee6e8bb
PB
1156
1157/* Return the offset of a 32-bit piece of a NEON register.
1158 zero is the least significant end of the register. */
1159static inline long
1160neon_reg_offset (int reg, int n)
1161{
1162 int sreg;
1163 sreg = reg * 2 + n;
1164 return vfp_reg_offset(0, sreg);
1165}
1166
8f8e3aa4
PB
1167static TCGv neon_load_reg(int reg, int pass)
1168{
7d1b0095 1169 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1170 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1171 return tmp;
1172}
1173
1174static void neon_store_reg(int reg, int pass, TCGv var)
1175{
1176 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1177 tcg_temp_free_i32(var);
8f8e3aa4
PB
1178}
1179
a7812ae4 1180static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1181{
1182 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1183}
1184
a7812ae4 1185static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1186{
1187 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1188}
1189
4373f3ce
PB
1190#define tcg_gen_ld_f32 tcg_gen_ld_i32
1191#define tcg_gen_ld_f64 tcg_gen_ld_i64
1192#define tcg_gen_st_f32 tcg_gen_st_i32
1193#define tcg_gen_st_f64 tcg_gen_st_i64
1194
b7bcbe95
FB
1195static inline void gen_mov_F0_vreg(int dp, int reg)
1196{
1197 if (dp)
4373f3ce 1198 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1199 else
4373f3ce 1200 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1201}
1202
1203static inline void gen_mov_F1_vreg(int dp, int reg)
1204{
1205 if (dp)
4373f3ce 1206 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1207 else
4373f3ce 1208 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1209}
1210
1211static inline void gen_mov_vreg_F0(int dp, int reg)
1212{
1213 if (dp)
4373f3ce 1214 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1215 else
4373f3ce 1216 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1217}
1218
18c9b560
AZ
1219#define ARM_CP_RW_BIT (1 << 20)
1220
a7812ae4 1221static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1222{
0ecb72a5 1223 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1224}
1225
a7812ae4 1226static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1227{
0ecb72a5 1228 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1229}
1230
da6b5335 1231static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1232{
7d1b0095 1233 TCGv var = tcg_temp_new_i32();
0ecb72a5 1234 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1235 return var;
e677137d
PB
1236}
1237
da6b5335 1238static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1239{
0ecb72a5 1240 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1241 tcg_temp_free_i32(var);
e677137d
PB
1242}
1243
1244static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1245{
1246 iwmmxt_store_reg(cpu_M0, rn);
1247}
1248
1249static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1250{
1251 iwmmxt_load_reg(cpu_M0, rn);
1252}
1253
1254static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1255{
1256 iwmmxt_load_reg(cpu_V1, rn);
1257 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1258}
1259
1260static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1261{
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1264}
1265
1266static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1267{
1268 iwmmxt_load_reg(cpu_V1, rn);
1269 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1270}
1271
1272#define IWMMXT_OP(name) \
1273static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1274{ \
1275 iwmmxt_load_reg(cpu_V1, rn); \
1276 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1277}
1278
477955bd
PM
1279#define IWMMXT_OP_ENV(name) \
1280static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1281{ \
1282 iwmmxt_load_reg(cpu_V1, rn); \
1283 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1284}
1285
1286#define IWMMXT_OP_ENV_SIZE(name) \
1287IWMMXT_OP_ENV(name##b) \
1288IWMMXT_OP_ENV(name##w) \
1289IWMMXT_OP_ENV(name##l)
e677137d 1290
477955bd 1291#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1292static inline void gen_op_iwmmxt_##name##_M0(void) \
1293{ \
477955bd 1294 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1295}
1296
1297IWMMXT_OP(maddsq)
1298IWMMXT_OP(madduq)
1299IWMMXT_OP(sadb)
1300IWMMXT_OP(sadw)
1301IWMMXT_OP(mulslw)
1302IWMMXT_OP(mulshw)
1303IWMMXT_OP(mululw)
1304IWMMXT_OP(muluhw)
1305IWMMXT_OP(macsw)
1306IWMMXT_OP(macuw)
1307
477955bd
PM
1308IWMMXT_OP_ENV_SIZE(unpackl)
1309IWMMXT_OP_ENV_SIZE(unpackh)
1310
1311IWMMXT_OP_ENV1(unpacklub)
1312IWMMXT_OP_ENV1(unpackluw)
1313IWMMXT_OP_ENV1(unpacklul)
1314IWMMXT_OP_ENV1(unpackhub)
1315IWMMXT_OP_ENV1(unpackhuw)
1316IWMMXT_OP_ENV1(unpackhul)
1317IWMMXT_OP_ENV1(unpacklsb)
1318IWMMXT_OP_ENV1(unpacklsw)
1319IWMMXT_OP_ENV1(unpacklsl)
1320IWMMXT_OP_ENV1(unpackhsb)
1321IWMMXT_OP_ENV1(unpackhsw)
1322IWMMXT_OP_ENV1(unpackhsl)
1323
1324IWMMXT_OP_ENV_SIZE(cmpeq)
1325IWMMXT_OP_ENV_SIZE(cmpgtu)
1326IWMMXT_OP_ENV_SIZE(cmpgts)
1327
1328IWMMXT_OP_ENV_SIZE(mins)
1329IWMMXT_OP_ENV_SIZE(minu)
1330IWMMXT_OP_ENV_SIZE(maxs)
1331IWMMXT_OP_ENV_SIZE(maxu)
1332
1333IWMMXT_OP_ENV_SIZE(subn)
1334IWMMXT_OP_ENV_SIZE(addn)
1335IWMMXT_OP_ENV_SIZE(subu)
1336IWMMXT_OP_ENV_SIZE(addu)
1337IWMMXT_OP_ENV_SIZE(subs)
1338IWMMXT_OP_ENV_SIZE(adds)
1339
1340IWMMXT_OP_ENV(avgb0)
1341IWMMXT_OP_ENV(avgb1)
1342IWMMXT_OP_ENV(avgw0)
1343IWMMXT_OP_ENV(avgw1)
e677137d
PB
1344
1345IWMMXT_OP(msadb)
1346
477955bd
PM
1347IWMMXT_OP_ENV(packuw)
1348IWMMXT_OP_ENV(packul)
1349IWMMXT_OP_ENV(packuq)
1350IWMMXT_OP_ENV(packsw)
1351IWMMXT_OP_ENV(packsl)
1352IWMMXT_OP_ENV(packsq)
e677137d 1353
e677137d
PB
1354static void gen_op_iwmmxt_set_mup(void)
1355{
1356 TCGv tmp;
1357 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1358 tcg_gen_ori_i32(tmp, tmp, 2);
1359 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1360}
1361
1362static void gen_op_iwmmxt_set_cup(void)
1363{
1364 TCGv tmp;
1365 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1366 tcg_gen_ori_i32(tmp, tmp, 1);
1367 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1368}
1369
1370static void gen_op_iwmmxt_setpsr_nz(void)
1371{
7d1b0095 1372 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1373 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1374 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1375}
1376
1377static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1378{
1379 iwmmxt_load_reg(cpu_V1, rn);
86831435 1380 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1381 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1382}
1383
da6b5335 1384static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1385{
1386 int rd;
1387 uint32_t offset;
da6b5335 1388 TCGv tmp;
18c9b560
AZ
1389
1390 rd = (insn >> 16) & 0xf;
da6b5335 1391 tmp = load_reg(s, rd);
18c9b560
AZ
1392
1393 offset = (insn & 0xff) << ((insn >> 7) & 2);
1394 if (insn & (1 << 24)) {
1395 /* Pre indexed */
1396 if (insn & (1 << 23))
da6b5335 1397 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1398 else
da6b5335
FN
1399 tcg_gen_addi_i32(tmp, tmp, -offset);
1400 tcg_gen_mov_i32(dest, tmp);
18c9b560 1401 if (insn & (1 << 21))
da6b5335
FN
1402 store_reg(s, rd, tmp);
1403 else
7d1b0095 1404 tcg_temp_free_i32(tmp);
18c9b560
AZ
1405 } else if (insn & (1 << 21)) {
1406 /* Post indexed */
da6b5335 1407 tcg_gen_mov_i32(dest, tmp);
18c9b560 1408 if (insn & (1 << 23))
da6b5335 1409 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1410 else
da6b5335
FN
1411 tcg_gen_addi_i32(tmp, tmp, -offset);
1412 store_reg(s, rd, tmp);
18c9b560
AZ
1413 } else if (!(insn & (1 << 23)))
1414 return 1;
1415 return 0;
1416}
1417
da6b5335 1418static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1419{
1420 int rd = (insn >> 0) & 0xf;
da6b5335 1421 TCGv tmp;
18c9b560 1422
da6b5335
FN
1423 if (insn & (1 << 8)) {
1424 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1425 return 1;
da6b5335
FN
1426 } else {
1427 tmp = iwmmxt_load_creg(rd);
1428 }
1429 } else {
7d1b0095 1430 tmp = tcg_temp_new_i32();
da6b5335
FN
1431 iwmmxt_load_reg(cpu_V0, rd);
1432 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1433 }
1434 tcg_gen_andi_i32(tmp, tmp, mask);
1435 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1436 tcg_temp_free_i32(tmp);
18c9b560
AZ
1437 return 0;
1438}
1439
a1c7273b 1440/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1441 (ie. an undefined instruction). */
0ecb72a5 1442static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1443{
1444 int rd, wrd;
1445 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1446 TCGv addr;
1447 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1448
1449 if ((insn & 0x0e000e00) == 0x0c000000) {
1450 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1451 wrd = insn & 0xf;
1452 rdlo = (insn >> 12) & 0xf;
1453 rdhi = (insn >> 16) & 0xf;
1454 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1455 iwmmxt_load_reg(cpu_V0, wrd);
1456 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1457 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1458 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1459 } else { /* TMCRR */
da6b5335
FN
1460 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1461 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1462 gen_op_iwmmxt_set_mup();
1463 }
1464 return 0;
1465 }
1466
1467 wrd = (insn >> 12) & 0xf;
7d1b0095 1468 addr = tcg_temp_new_i32();
da6b5335 1469 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1470 tcg_temp_free_i32(addr);
18c9b560 1471 return 1;
da6b5335 1472 }
18c9b560
AZ
1473 if (insn & ARM_CP_RW_BIT) {
1474 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1475 tmp = tcg_temp_new_i32();
da6b5335
FN
1476 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1477 iwmmxt_store_creg(wrd, tmp);
18c9b560 1478 } else {
e677137d
PB
1479 i = 1;
1480 if (insn & (1 << 8)) {
1481 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1482 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1483 i = 0;
1484 } else { /* WLDRW wRd */
da6b5335 1485 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1486 }
1487 } else {
1488 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1489 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1490 } else { /* WLDRB */
da6b5335 1491 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1492 }
1493 }
1494 if (i) {
1495 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1496 tcg_temp_free_i32(tmp);
e677137d 1497 }
18c9b560
AZ
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1499 }
1500 } else {
1501 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1502 tmp = iwmmxt_load_creg(wrd);
1503 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1504 } else {
1505 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1506 tmp = tcg_temp_new_i32();
e677137d
PB
1507 if (insn & (1 << 8)) {
1508 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1509 tcg_temp_free_i32(tmp);
da6b5335 1510 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1511 } else { /* WSTRW wRd */
1512 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1513 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1514 }
1515 } else {
1516 if (insn & (1 << 22)) { /* WSTRH */
1517 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1518 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1519 } else { /* WSTRB */
1520 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1521 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1522 }
1523 }
18c9b560
AZ
1524 }
1525 }
7d1b0095 1526 tcg_temp_free_i32(addr);
18c9b560
AZ
1527 return 0;
1528 }
1529
1530 if ((insn & 0x0f000000) != 0x0e000000)
1531 return 1;
1532
1533 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1534 case 0x000: /* WOR */
1535 wrd = (insn >> 12) & 0xf;
1536 rd0 = (insn >> 0) & 0xf;
1537 rd1 = (insn >> 16) & 0xf;
1538 gen_op_iwmmxt_movq_M0_wRn(rd0);
1539 gen_op_iwmmxt_orq_M0_wRn(rd1);
1540 gen_op_iwmmxt_setpsr_nz();
1541 gen_op_iwmmxt_movq_wRn_M0(wrd);
1542 gen_op_iwmmxt_set_mup();
1543 gen_op_iwmmxt_set_cup();
1544 break;
1545 case 0x011: /* TMCR */
1546 if (insn & 0xf)
1547 return 1;
1548 rd = (insn >> 12) & 0xf;
1549 wrd = (insn >> 16) & 0xf;
1550 switch (wrd) {
1551 case ARM_IWMMXT_wCID:
1552 case ARM_IWMMXT_wCASF:
1553 break;
1554 case ARM_IWMMXT_wCon:
1555 gen_op_iwmmxt_set_cup();
1556 /* Fall through. */
1557 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1558 tmp = iwmmxt_load_creg(wrd);
1559 tmp2 = load_reg(s, rd);
f669df27 1560 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1561 tcg_temp_free_i32(tmp2);
da6b5335 1562 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1563 break;
1564 case ARM_IWMMXT_wCGR0:
1565 case ARM_IWMMXT_wCGR1:
1566 case ARM_IWMMXT_wCGR2:
1567 case ARM_IWMMXT_wCGR3:
1568 gen_op_iwmmxt_set_cup();
da6b5335
FN
1569 tmp = load_reg(s, rd);
1570 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1571 break;
1572 default:
1573 return 1;
1574 }
1575 break;
1576 case 0x100: /* WXOR */
1577 wrd = (insn >> 12) & 0xf;
1578 rd0 = (insn >> 0) & 0xf;
1579 rd1 = (insn >> 16) & 0xf;
1580 gen_op_iwmmxt_movq_M0_wRn(rd0);
1581 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1582 gen_op_iwmmxt_setpsr_nz();
1583 gen_op_iwmmxt_movq_wRn_M0(wrd);
1584 gen_op_iwmmxt_set_mup();
1585 gen_op_iwmmxt_set_cup();
1586 break;
1587 case 0x111: /* TMRC */
1588 if (insn & 0xf)
1589 return 1;
1590 rd = (insn >> 12) & 0xf;
1591 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1592 tmp = iwmmxt_load_creg(wrd);
1593 store_reg(s, rd, tmp);
18c9b560
AZ
1594 break;
1595 case 0x300: /* WANDN */
1596 wrd = (insn >> 12) & 0xf;
1597 rd0 = (insn >> 0) & 0xf;
1598 rd1 = (insn >> 16) & 0xf;
1599 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1600 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1601 gen_op_iwmmxt_andq_M0_wRn(rd1);
1602 gen_op_iwmmxt_setpsr_nz();
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 gen_op_iwmmxt_set_cup();
1606 break;
1607 case 0x200: /* WAND */
1608 wrd = (insn >> 12) & 0xf;
1609 rd0 = (insn >> 0) & 0xf;
1610 rd1 = (insn >> 16) & 0xf;
1611 gen_op_iwmmxt_movq_M0_wRn(rd0);
1612 gen_op_iwmmxt_andq_M0_wRn(rd1);
1613 gen_op_iwmmxt_setpsr_nz();
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 gen_op_iwmmxt_set_cup();
1617 break;
1618 case 0x810: case 0xa10: /* WMADD */
1619 wrd = (insn >> 12) & 0xf;
1620 rd0 = (insn >> 0) & 0xf;
1621 rd1 = (insn >> 16) & 0xf;
1622 gen_op_iwmmxt_movq_M0_wRn(rd0);
1623 if (insn & (1 << 21))
1624 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1625 else
1626 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 break;
1630 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 16) & 0xf;
1633 rd1 = (insn >> 0) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
1635 switch ((insn >> 22) & 3) {
1636 case 0:
1637 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1638 break;
1639 case 1:
1640 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1641 break;
1642 case 2:
1643 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1644 break;
1645 case 3:
1646 return 1;
1647 }
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 gen_op_iwmmxt_set_cup();
1651 break;
1652 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1653 wrd = (insn >> 12) & 0xf;
1654 rd0 = (insn >> 16) & 0xf;
1655 rd1 = (insn >> 0) & 0xf;
1656 gen_op_iwmmxt_movq_M0_wRn(rd0);
1657 switch ((insn >> 22) & 3) {
1658 case 0:
1659 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1660 break;
1661 case 1:
1662 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1663 break;
1664 case 2:
1665 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1666 break;
1667 case 3:
1668 return 1;
1669 }
1670 gen_op_iwmmxt_movq_wRn_M0(wrd);
1671 gen_op_iwmmxt_set_mup();
1672 gen_op_iwmmxt_set_cup();
1673 break;
1674 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1675 wrd = (insn >> 12) & 0xf;
1676 rd0 = (insn >> 16) & 0xf;
1677 rd1 = (insn >> 0) & 0xf;
1678 gen_op_iwmmxt_movq_M0_wRn(rd0);
1679 if (insn & (1 << 22))
1680 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1681 else
1682 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1683 if (!(insn & (1 << 20)))
1684 gen_op_iwmmxt_addl_M0_wRn(wrd);
1685 gen_op_iwmmxt_movq_wRn_M0(wrd);
1686 gen_op_iwmmxt_set_mup();
1687 break;
1688 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1689 wrd = (insn >> 12) & 0xf;
1690 rd0 = (insn >> 16) & 0xf;
1691 rd1 = (insn >> 0) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1693 if (insn & (1 << 21)) {
1694 if (insn & (1 << 20))
1695 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1696 else
1697 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1698 } else {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1703 }
18c9b560
AZ
1704 gen_op_iwmmxt_movq_wRn_M0(wrd);
1705 gen_op_iwmmxt_set_mup();
1706 break;
1707 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1708 wrd = (insn >> 12) & 0xf;
1709 rd0 = (insn >> 16) & 0xf;
1710 rd1 = (insn >> 0) & 0xf;
1711 gen_op_iwmmxt_movq_M0_wRn(rd0);
1712 if (insn & (1 << 21))
1713 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1714 else
1715 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1716 if (!(insn & (1 << 20))) {
e677137d
PB
1717 iwmmxt_load_reg(cpu_V1, wrd);
1718 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1719 }
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
1728 switch ((insn >> 22) & 3) {
1729 case 0:
1730 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1731 break;
1732 case 1:
1733 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1734 break;
1735 case 2:
1736 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1737 break;
1738 case 3:
1739 return 1;
1740 }
1741 gen_op_iwmmxt_movq_wRn_M0(wrd);
1742 gen_op_iwmmxt_set_mup();
1743 gen_op_iwmmxt_set_cup();
1744 break;
1745 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1750 if (insn & (1 << 22)) {
1751 if (insn & (1 << 20))
1752 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1753 else
1754 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1755 } else {
1756 if (insn & (1 << 20))
1757 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1758 else
1759 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1760 }
18c9b560
AZ
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 gen_op_iwmmxt_set_cup();
1764 break;
1765 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1766 wrd = (insn >> 12) & 0xf;
1767 rd0 = (insn >> 16) & 0xf;
1768 rd1 = (insn >> 0) & 0xf;
1769 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1770 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1771 tcg_gen_andi_i32(tmp, tmp, 7);
1772 iwmmxt_load_reg(cpu_V1, rd1);
1773 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1774 tcg_temp_free_i32(tmp);
18c9b560
AZ
1775 gen_op_iwmmxt_movq_wRn_M0(wrd);
1776 gen_op_iwmmxt_set_mup();
1777 break;
1778 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1779 if (((insn >> 6) & 3) == 3)
1780 return 1;
18c9b560
AZ
1781 rd = (insn >> 12) & 0xf;
1782 wrd = (insn >> 16) & 0xf;
da6b5335 1783 tmp = load_reg(s, rd);
18c9b560
AZ
1784 gen_op_iwmmxt_movq_M0_wRn(wrd);
1785 switch ((insn >> 6) & 3) {
1786 case 0:
da6b5335
FN
1787 tmp2 = tcg_const_i32(0xff);
1788 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1789 break;
1790 case 1:
da6b5335
FN
1791 tmp2 = tcg_const_i32(0xffff);
1792 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1793 break;
1794 case 2:
da6b5335
FN
1795 tmp2 = tcg_const_i32(0xffffffff);
1796 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1797 break;
da6b5335
FN
1798 default:
1799 TCGV_UNUSED(tmp2);
1800 TCGV_UNUSED(tmp3);
18c9b560 1801 }
da6b5335
FN
1802 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1803 tcg_temp_free(tmp3);
1804 tcg_temp_free(tmp2);
7d1b0095 1805 tcg_temp_free_i32(tmp);
18c9b560
AZ
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 break;
1809 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1810 rd = (insn >> 12) & 0xf;
1811 wrd = (insn >> 16) & 0xf;
da6b5335 1812 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1813 return 1;
1814 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1815 tmp = tcg_temp_new_i32();
18c9b560
AZ
1816 switch ((insn >> 22) & 3) {
1817 case 0:
da6b5335
FN
1818 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1819 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1820 if (insn & 8) {
1821 tcg_gen_ext8s_i32(tmp, tmp);
1822 } else {
1823 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1824 }
1825 break;
1826 case 1:
da6b5335
FN
1827 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1828 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1829 if (insn & 8) {
1830 tcg_gen_ext16s_i32(tmp, tmp);
1831 } else {
1832 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1833 }
1834 break;
1835 case 2:
da6b5335
FN
1836 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1837 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1838 break;
18c9b560 1839 }
da6b5335 1840 store_reg(s, rd, tmp);
18c9b560
AZ
1841 break;
1842 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1843 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1844 return 1;
da6b5335 1845 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1846 switch ((insn >> 22) & 3) {
1847 case 0:
da6b5335 1848 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1849 break;
1850 case 1:
da6b5335 1851 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1852 break;
1853 case 2:
da6b5335 1854 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1855 break;
18c9b560 1856 }
da6b5335
FN
1857 tcg_gen_shli_i32(tmp, tmp, 28);
1858 gen_set_nzcv(tmp);
7d1b0095 1859 tcg_temp_free_i32(tmp);
18c9b560
AZ
1860 break;
1861 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1862 if (((insn >> 6) & 3) == 3)
1863 return 1;
18c9b560
AZ
1864 rd = (insn >> 12) & 0xf;
1865 wrd = (insn >> 16) & 0xf;
da6b5335 1866 tmp = load_reg(s, rd);
18c9b560
AZ
1867 switch ((insn >> 6) & 3) {
1868 case 0:
da6b5335 1869 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1870 break;
1871 case 1:
da6b5335 1872 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1873 break;
1874 case 2:
da6b5335 1875 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1876 break;
18c9b560 1877 }
7d1b0095 1878 tcg_temp_free_i32(tmp);
18c9b560
AZ
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 gen_op_iwmmxt_set_mup();
1881 break;
1882 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1883 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1884 return 1;
da6b5335 1885 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1886 tmp2 = tcg_temp_new_i32();
da6b5335 1887 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1888 switch ((insn >> 22) & 3) {
1889 case 0:
1890 for (i = 0; i < 7; i ++) {
da6b5335
FN
1891 tcg_gen_shli_i32(tmp2, tmp2, 4);
1892 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1893 }
1894 break;
1895 case 1:
1896 for (i = 0; i < 3; i ++) {
da6b5335
FN
1897 tcg_gen_shli_i32(tmp2, tmp2, 8);
1898 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1899 }
1900 break;
1901 case 2:
da6b5335
FN
1902 tcg_gen_shli_i32(tmp2, tmp2, 16);
1903 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1904 break;
18c9b560 1905 }
da6b5335 1906 gen_set_nzcv(tmp);
7d1b0095
PM
1907 tcg_temp_free_i32(tmp2);
1908 tcg_temp_free_i32(tmp);
18c9b560
AZ
1909 break;
1910 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1911 wrd = (insn >> 12) & 0xf;
1912 rd0 = (insn >> 16) & 0xf;
1913 gen_op_iwmmxt_movq_M0_wRn(rd0);
1914 switch ((insn >> 22) & 3) {
1915 case 0:
e677137d 1916 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1917 break;
1918 case 1:
e677137d 1919 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1920 break;
1921 case 2:
e677137d 1922 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1923 break;
1924 case 3:
1925 return 1;
1926 }
1927 gen_op_iwmmxt_movq_wRn_M0(wrd);
1928 gen_op_iwmmxt_set_mup();
1929 break;
1930 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1931 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1932 return 1;
da6b5335 1933 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1934 tmp2 = tcg_temp_new_i32();
da6b5335 1935 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1936 switch ((insn >> 22) & 3) {
1937 case 0:
1938 for (i = 0; i < 7; i ++) {
da6b5335
FN
1939 tcg_gen_shli_i32(tmp2, tmp2, 4);
1940 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1941 }
1942 break;
1943 case 1:
1944 for (i = 0; i < 3; i ++) {
da6b5335
FN
1945 tcg_gen_shli_i32(tmp2, tmp2, 8);
1946 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1947 }
1948 break;
1949 case 2:
da6b5335
FN
1950 tcg_gen_shli_i32(tmp2, tmp2, 16);
1951 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1952 break;
18c9b560 1953 }
da6b5335 1954 gen_set_nzcv(tmp);
7d1b0095
PM
1955 tcg_temp_free_i32(tmp2);
1956 tcg_temp_free_i32(tmp);
18c9b560
AZ
1957 break;
1958 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1959 rd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
da6b5335 1961 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1962 return 1;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1964 tmp = tcg_temp_new_i32();
18c9b560
AZ
1965 switch ((insn >> 22) & 3) {
1966 case 0:
da6b5335 1967 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1968 break;
1969 case 1:
da6b5335 1970 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1971 break;
1972 case 2:
da6b5335 1973 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1974 break;
18c9b560 1975 }
da6b5335 1976 store_reg(s, rd, tmp);
18c9b560
AZ
1977 break;
1978 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1979 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1980 wrd = (insn >> 12) & 0xf;
1981 rd0 = (insn >> 16) & 0xf;
1982 rd1 = (insn >> 0) & 0xf;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
1984 switch ((insn >> 22) & 3) {
1985 case 0:
1986 if (insn & (1 << 21))
1987 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1988 else
1989 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1990 break;
1991 case 1:
1992 if (insn & (1 << 21))
1993 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1994 else
1995 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1996 break;
1997 case 2:
1998 if (insn & (1 << 21))
1999 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2000 else
2001 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2002 break;
2003 case 3:
2004 return 1;
2005 }
2006 gen_op_iwmmxt_movq_wRn_M0(wrd);
2007 gen_op_iwmmxt_set_mup();
2008 gen_op_iwmmxt_set_cup();
2009 break;
2010 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2011 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2012 wrd = (insn >> 12) & 0xf;
2013 rd0 = (insn >> 16) & 0xf;
2014 gen_op_iwmmxt_movq_M0_wRn(rd0);
2015 switch ((insn >> 22) & 3) {
2016 case 0:
2017 if (insn & (1 << 21))
2018 gen_op_iwmmxt_unpacklsb_M0();
2019 else
2020 gen_op_iwmmxt_unpacklub_M0();
2021 break;
2022 case 1:
2023 if (insn & (1 << 21))
2024 gen_op_iwmmxt_unpacklsw_M0();
2025 else
2026 gen_op_iwmmxt_unpackluw_M0();
2027 break;
2028 case 2:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_unpacklsl_M0();
2031 else
2032 gen_op_iwmmxt_unpacklul_M0();
2033 break;
2034 case 3:
2035 return 1;
2036 }
2037 gen_op_iwmmxt_movq_wRn_M0(wrd);
2038 gen_op_iwmmxt_set_mup();
2039 gen_op_iwmmxt_set_cup();
2040 break;
2041 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2042 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2043 wrd = (insn >> 12) & 0xf;
2044 rd0 = (insn >> 16) & 0xf;
2045 gen_op_iwmmxt_movq_M0_wRn(rd0);
2046 switch ((insn >> 22) & 3) {
2047 case 0:
2048 if (insn & (1 << 21))
2049 gen_op_iwmmxt_unpackhsb_M0();
2050 else
2051 gen_op_iwmmxt_unpackhub_M0();
2052 break;
2053 case 1:
2054 if (insn & (1 << 21))
2055 gen_op_iwmmxt_unpackhsw_M0();
2056 else
2057 gen_op_iwmmxt_unpackhuw_M0();
2058 break;
2059 case 2:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpackhsl_M0();
2062 else
2063 gen_op_iwmmxt_unpackhul_M0();
2064 break;
2065 case 3:
2066 return 1;
2067 }
2068 gen_op_iwmmxt_movq_wRn_M0(wrd);
2069 gen_op_iwmmxt_set_mup();
2070 gen_op_iwmmxt_set_cup();
2071 break;
2072 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2073 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2074 if (((insn >> 22) & 3) == 0)
2075 return 1;
18c9b560
AZ
2076 wrd = (insn >> 12) & 0xf;
2077 rd0 = (insn >> 16) & 0xf;
2078 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2079 tmp = tcg_temp_new_i32();
da6b5335 2080 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2081 tcg_temp_free_i32(tmp);
18c9b560 2082 return 1;
da6b5335 2083 }
18c9b560 2084 switch ((insn >> 22) & 3) {
18c9b560 2085 case 1:
477955bd 2086 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2087 break;
2088 case 2:
477955bd 2089 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2090 break;
2091 case 3:
477955bd 2092 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2093 break;
2094 }
7d1b0095 2095 tcg_temp_free_i32(tmp);
18c9b560
AZ
2096 gen_op_iwmmxt_movq_wRn_M0(wrd);
2097 gen_op_iwmmxt_set_mup();
2098 gen_op_iwmmxt_set_cup();
2099 break;
2100 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2101 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2102 if (((insn >> 22) & 3) == 0)
2103 return 1;
18c9b560
AZ
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2107 tmp = tcg_temp_new_i32();
da6b5335 2108 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2109 tcg_temp_free_i32(tmp);
18c9b560 2110 return 1;
da6b5335 2111 }
18c9b560 2112 switch ((insn >> 22) & 3) {
18c9b560 2113 case 1:
477955bd 2114 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2115 break;
2116 case 2:
477955bd 2117 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2118 break;
2119 case 3:
477955bd 2120 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2121 break;
2122 }
7d1b0095 2123 tcg_temp_free_i32(tmp);
18c9b560
AZ
2124 gen_op_iwmmxt_movq_wRn_M0(wrd);
2125 gen_op_iwmmxt_set_mup();
2126 gen_op_iwmmxt_set_cup();
2127 break;
2128 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2129 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2130 if (((insn >> 22) & 3) == 0)
2131 return 1;
18c9b560
AZ
2132 wrd = (insn >> 12) & 0xf;
2133 rd0 = (insn >> 16) & 0xf;
2134 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2135 tmp = tcg_temp_new_i32();
da6b5335 2136 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2137 tcg_temp_free_i32(tmp);
18c9b560 2138 return 1;
da6b5335 2139 }
18c9b560 2140 switch ((insn >> 22) & 3) {
18c9b560 2141 case 1:
477955bd 2142 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2143 break;
2144 case 2:
477955bd 2145 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2146 break;
2147 case 3:
477955bd 2148 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2149 break;
2150 }
7d1b0095 2151 tcg_temp_free_i32(tmp);
18c9b560
AZ
2152 gen_op_iwmmxt_movq_wRn_M0(wrd);
2153 gen_op_iwmmxt_set_mup();
2154 gen_op_iwmmxt_set_cup();
2155 break;
2156 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2157 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2158 if (((insn >> 22) & 3) == 0)
2159 return 1;
18c9b560
AZ
2160 wrd = (insn >> 12) & 0xf;
2161 rd0 = (insn >> 16) & 0xf;
2162 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2163 tmp = tcg_temp_new_i32();
18c9b560 2164 switch ((insn >> 22) & 3) {
18c9b560 2165 case 1:
da6b5335 2166 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2167 tcg_temp_free_i32(tmp);
18c9b560 2168 return 1;
da6b5335 2169 }
477955bd 2170 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2171 break;
2172 case 2:
da6b5335 2173 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2174 tcg_temp_free_i32(tmp);
18c9b560 2175 return 1;
da6b5335 2176 }
477955bd 2177 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2178 break;
2179 case 3:
da6b5335 2180 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2181 tcg_temp_free_i32(tmp);
18c9b560 2182 return 1;
da6b5335 2183 }
477955bd 2184 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2185 break;
2186 }
7d1b0095 2187 tcg_temp_free_i32(tmp);
18c9b560
AZ
2188 gen_op_iwmmxt_movq_wRn_M0(wrd);
2189 gen_op_iwmmxt_set_mup();
2190 gen_op_iwmmxt_set_cup();
2191 break;
2192 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2193 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2194 wrd = (insn >> 12) & 0xf;
2195 rd0 = (insn >> 16) & 0xf;
2196 rd1 = (insn >> 0) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 22) & 3) {
2199 case 0:
2200 if (insn & (1 << 21))
2201 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2202 else
2203 gen_op_iwmmxt_minub_M0_wRn(rd1);
2204 break;
2205 case 1:
2206 if (insn & (1 << 21))
2207 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2208 else
2209 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2210 break;
2211 case 2:
2212 if (insn & (1 << 21))
2213 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2214 else
2215 gen_op_iwmmxt_minul_M0_wRn(rd1);
2216 break;
2217 case 3:
2218 return 1;
2219 }
2220 gen_op_iwmmxt_movq_wRn_M0(wrd);
2221 gen_op_iwmmxt_set_mup();
2222 break;
2223 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2224 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2225 wrd = (insn >> 12) & 0xf;
2226 rd0 = (insn >> 16) & 0xf;
2227 rd1 = (insn >> 0) & 0xf;
2228 gen_op_iwmmxt_movq_M0_wRn(rd0);
2229 switch ((insn >> 22) & 3) {
2230 case 0:
2231 if (insn & (1 << 21))
2232 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2233 else
2234 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2235 break;
2236 case 1:
2237 if (insn & (1 << 21))
2238 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2239 else
2240 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2241 break;
2242 case 2:
2243 if (insn & (1 << 21))
2244 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2245 else
2246 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2247 break;
2248 case 3:
2249 return 1;
2250 }
2251 gen_op_iwmmxt_movq_wRn_M0(wrd);
2252 gen_op_iwmmxt_set_mup();
2253 break;
2254 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2255 case 0x402: case 0x502: case 0x602: case 0x702:
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 rd1 = (insn >> 0) & 0xf;
2259 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2260 tmp = tcg_const_i32((insn >> 20) & 3);
2261 iwmmxt_load_reg(cpu_V1, rd1);
2262 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2263 tcg_temp_free(tmp);
18c9b560
AZ
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 break;
2267 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2268 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2269 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2270 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2271 wrd = (insn >> 12) & 0xf;
2272 rd0 = (insn >> 16) & 0xf;
2273 rd1 = (insn >> 0) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
2275 switch ((insn >> 20) & 0xf) {
2276 case 0x0:
2277 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2278 break;
2279 case 0x1:
2280 gen_op_iwmmxt_subub_M0_wRn(rd1);
2281 break;
2282 case 0x3:
2283 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2284 break;
2285 case 0x4:
2286 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2287 break;
2288 case 0x5:
2289 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2290 break;
2291 case 0x7:
2292 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2293 break;
2294 case 0x8:
2295 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2296 break;
2297 case 0x9:
2298 gen_op_iwmmxt_subul_M0_wRn(rd1);
2299 break;
2300 case 0xb:
2301 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2302 break;
2303 default:
2304 return 1;
2305 }
2306 gen_op_iwmmxt_movq_wRn_M0(wrd);
2307 gen_op_iwmmxt_set_mup();
2308 gen_op_iwmmxt_set_cup();
2309 break;
2310 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2311 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2312 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2313 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2314 wrd = (insn >> 12) & 0xf;
2315 rd0 = (insn >> 16) & 0xf;
2316 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2317 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2318 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2319 tcg_temp_free(tmp);
18c9b560
AZ
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2333 case 0x0:
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2335 break;
2336 case 0x1:
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2338 break;
2339 case 0x3:
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2341 break;
2342 case 0x4:
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2344 break;
2345 case 0x5:
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2347 break;
2348 case 0x7:
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2350 break;
2351 case 0x8:
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2353 break;
2354 case 0x9:
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2356 break;
2357 case 0xb:
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2359 break;
2360 default:
2361 return 1;
2362 }
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2371 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2372 return 1;
18c9b560
AZ
2373 wrd = (insn >> 12) & 0xf;
2374 rd0 = (insn >> 16) & 0xf;
2375 rd1 = (insn >> 0) & 0xf;
2376 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2377 switch ((insn >> 22) & 3) {
18c9b560
AZ
2378 case 1:
2379 if (insn & (1 << 21))
2380 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2381 else
2382 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2383 break;
2384 case 2:
2385 if (insn & (1 << 21))
2386 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2387 else
2388 gen_op_iwmmxt_packul_M0_wRn(rd1);
2389 break;
2390 case 3:
2391 if (insn & (1 << 21))
2392 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2393 else
2394 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2395 break;
2396 }
2397 gen_op_iwmmxt_movq_wRn_M0(wrd);
2398 gen_op_iwmmxt_set_mup();
2399 gen_op_iwmmxt_set_cup();
2400 break;
2401 case 0x201: case 0x203: case 0x205: case 0x207:
2402 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2403 case 0x211: case 0x213: case 0x215: case 0x217:
2404 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2405 wrd = (insn >> 5) & 0xf;
2406 rd0 = (insn >> 12) & 0xf;
2407 rd1 = (insn >> 0) & 0xf;
2408 if (rd0 == 0xf || rd1 == 0xf)
2409 return 1;
2410 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2411 tmp = load_reg(s, rd0);
2412 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
da6b5335 2415 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2416 break;
2417 case 0x8: /* TMIAPH */
da6b5335 2418 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2419 break;
2420 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2421 if (insn & (1 << 16))
da6b5335 2422 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2423 if (insn & (1 << 17))
da6b5335
FN
2424 tcg_gen_shri_i32(tmp2, tmp2, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2426 break;
2427 default:
7d1b0095
PM
2428 tcg_temp_free_i32(tmp2);
2429 tcg_temp_free_i32(tmp);
18c9b560
AZ
2430 return 1;
2431 }
7d1b0095
PM
2432 tcg_temp_free_i32(tmp2);
2433 tcg_temp_free_i32(tmp);
18c9b560
AZ
2434 gen_op_iwmmxt_movq_wRn_M0(wrd);
2435 gen_op_iwmmxt_set_mup();
2436 break;
2437 default:
2438 return 1;
2439 }
2440
2441 return 0;
2442}
2443
a1c7273b 2444/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2445 (ie. an undefined instruction). */
0ecb72a5 2446static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2447{
2448 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2449 TCGv tmp, tmp2;
18c9b560
AZ
2450
2451 if ((insn & 0x0ff00f10) == 0x0e200010) {
2452 /* Multiply with Internal Accumulate Format */
2453 rd0 = (insn >> 12) & 0xf;
2454 rd1 = insn & 0xf;
2455 acc = (insn >> 5) & 7;
2456
2457 if (acc != 0)
2458 return 1;
2459
3a554c0f
FN
2460 tmp = load_reg(s, rd0);
2461 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2462 switch ((insn >> 16) & 0xf) {
2463 case 0x0: /* MIA */
3a554c0f 2464 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2465 break;
2466 case 0x8: /* MIAPH */
3a554c0f 2467 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2468 break;
2469 case 0xc: /* MIABB */
2470 case 0xd: /* MIABT */
2471 case 0xe: /* MIATB */
2472 case 0xf: /* MIATT */
18c9b560 2473 if (insn & (1 << 16))
3a554c0f 2474 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2475 if (insn & (1 << 17))
3a554c0f
FN
2476 tcg_gen_shri_i32(tmp2, tmp2, 16);
2477 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2478 break;
2479 default:
2480 return 1;
2481 }
7d1b0095
PM
2482 tcg_temp_free_i32(tmp2);
2483 tcg_temp_free_i32(tmp);
18c9b560
AZ
2484
2485 gen_op_iwmmxt_movq_wRn_M0(acc);
2486 return 0;
2487 }
2488
2489 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2490 /* Internal Accumulator Access Format */
2491 rdhi = (insn >> 16) & 0xf;
2492 rdlo = (insn >> 12) & 0xf;
2493 acc = insn & 7;
2494
2495 if (acc != 0)
2496 return 1;
2497
2498 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2499 iwmmxt_load_reg(cpu_V0, acc);
2500 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2501 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2502 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2503 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2504 } else { /* MAR */
3a554c0f
FN
2505 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2506 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2507 }
2508 return 0;
2509 }
2510
2511 return 1;
2512}
2513
9ee6e8bb
PB
2514#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2515#define VFP_SREG(insn, bigbit, smallbit) \
2516 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2517#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2518 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2519 reg = (((insn) >> (bigbit)) & 0x0f) \
2520 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2521 } else { \
2522 if (insn & (1 << (smallbit))) \
2523 return 1; \
2524 reg = ((insn) >> (bigbit)) & 0x0f; \
2525 }} while (0)
2526
2527#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2528#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2529#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2530#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2531#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2532#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2533
4373f3ce
PB
2534/* Move between integer and VFP cores. */
2535static TCGv gen_vfp_mrs(void)
2536{
7d1b0095 2537 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2538 tcg_gen_mov_i32(tmp, cpu_F0s);
2539 return tmp;
2540}
2541
2542static void gen_vfp_msr(TCGv tmp)
2543{
2544 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2545 tcg_temp_free_i32(tmp);
4373f3ce
PB
2546}
2547
ad69471c
PB
2548static void gen_neon_dup_u8(TCGv var, int shift)
2549{
7d1b0095 2550 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2551 if (shift)
2552 tcg_gen_shri_i32(var, var, shift);
86831435 2553 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2554 tcg_gen_shli_i32(tmp, var, 8);
2555 tcg_gen_or_i32(var, var, tmp);
2556 tcg_gen_shli_i32(tmp, var, 16);
2557 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2558 tcg_temp_free_i32(tmp);
ad69471c
PB
2559}
2560
2561static void gen_neon_dup_low16(TCGv var)
2562{
7d1b0095 2563 TCGv tmp = tcg_temp_new_i32();
86831435 2564 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2565 tcg_gen_shli_i32(tmp, var, 16);
2566 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2567 tcg_temp_free_i32(tmp);
ad69471c
PB
2568}
2569
2570static void gen_neon_dup_high16(TCGv var)
2571{
7d1b0095 2572 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2573 tcg_gen_andi_i32(var, var, 0xffff0000);
2574 tcg_gen_shri_i32(tmp, var, 16);
2575 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2576 tcg_temp_free_i32(tmp);
ad69471c
PB
2577}
2578
8e18cde3
PM
2579static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2580{
2581 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2582 TCGv tmp;
2583 switch (size) {
2584 case 0:
2585 tmp = gen_ld8u(addr, IS_USER(s));
2586 gen_neon_dup_u8(tmp, 0);
2587 break;
2588 case 1:
2589 tmp = gen_ld16u(addr, IS_USER(s));
2590 gen_neon_dup_low16(tmp);
2591 break;
2592 case 2:
2593 tmp = gen_ld32(addr, IS_USER(s));
2594 break;
2595 default: /* Avoid compiler warnings. */
2596 abort();
2597 }
2598 return tmp;
2599}
2600
a1c7273b 2601/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2602 (ie. an undefined instruction). */
0ecb72a5 2603static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2604{
2605 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2606 int dp, veclen;
312eea9f 2607 TCGv addr;
4373f3ce 2608 TCGv tmp;
ad69471c 2609 TCGv tmp2;
b7bcbe95 2610
40f137e1
PB
2611 if (!arm_feature(env, ARM_FEATURE_VFP))
2612 return 1;
2613
5df8bac1 2614 if (!s->vfp_enabled) {
9ee6e8bb 2615 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2616 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2617 return 1;
2618 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2619 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2620 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2621 return 1;
2622 }
b7bcbe95
FB
2623 dp = ((insn & 0xf00) == 0xb00);
2624 switch ((insn >> 24) & 0xf) {
2625 case 0xe:
2626 if (insn & (1 << 4)) {
2627 /* single register transfer */
b7bcbe95
FB
2628 rd = (insn >> 12) & 0xf;
2629 if (dp) {
9ee6e8bb
PB
2630 int size;
2631 int pass;
2632
2633 VFP_DREG_N(rn, insn);
2634 if (insn & 0xf)
b7bcbe95 2635 return 1;
9ee6e8bb
PB
2636 if (insn & 0x00c00060
2637 && !arm_feature(env, ARM_FEATURE_NEON))
2638 return 1;
2639
2640 pass = (insn >> 21) & 1;
2641 if (insn & (1 << 22)) {
2642 size = 0;
2643 offset = ((insn >> 5) & 3) * 8;
2644 } else if (insn & (1 << 5)) {
2645 size = 1;
2646 offset = (insn & (1 << 6)) ? 16 : 0;
2647 } else {
2648 size = 2;
2649 offset = 0;
2650 }
18c9b560 2651 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2652 /* vfp->arm */
ad69471c 2653 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2654 switch (size) {
2655 case 0:
9ee6e8bb 2656 if (offset)
ad69471c 2657 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2658 if (insn & (1 << 23))
ad69471c 2659 gen_uxtb(tmp);
9ee6e8bb 2660 else
ad69471c 2661 gen_sxtb(tmp);
9ee6e8bb
PB
2662 break;
2663 case 1:
9ee6e8bb
PB
2664 if (insn & (1 << 23)) {
2665 if (offset) {
ad69471c 2666 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2667 } else {
ad69471c 2668 gen_uxth(tmp);
9ee6e8bb
PB
2669 }
2670 } else {
2671 if (offset) {
ad69471c 2672 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2673 } else {
ad69471c 2674 gen_sxth(tmp);
9ee6e8bb
PB
2675 }
2676 }
2677 break;
2678 case 2:
9ee6e8bb
PB
2679 break;
2680 }
ad69471c 2681 store_reg(s, rd, tmp);
b7bcbe95
FB
2682 } else {
2683 /* arm->vfp */
ad69471c 2684 tmp = load_reg(s, rd);
9ee6e8bb
PB
2685 if (insn & (1 << 23)) {
2686 /* VDUP */
2687 if (size == 0) {
ad69471c 2688 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2689 } else if (size == 1) {
ad69471c 2690 gen_neon_dup_low16(tmp);
9ee6e8bb 2691 }
cbbccffc 2692 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2693 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2694 tcg_gen_mov_i32(tmp2, tmp);
2695 neon_store_reg(rn, n, tmp2);
2696 }
2697 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2698 } else {
2699 /* VMOV */
2700 switch (size) {
2701 case 0:
ad69471c 2702 tmp2 = neon_load_reg(rn, pass);
d593c48e 2703 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2704 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2705 break;
2706 case 1:
ad69471c 2707 tmp2 = neon_load_reg(rn, pass);
d593c48e 2708 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2709 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2710 break;
2711 case 2:
9ee6e8bb
PB
2712 break;
2713 }
ad69471c 2714 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2715 }
b7bcbe95 2716 }
9ee6e8bb
PB
2717 } else { /* !dp */
2718 if ((insn & 0x6f) != 0x00)
2719 return 1;
2720 rn = VFP_SREG_N(insn);
18c9b560 2721 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2722 /* vfp->arm */
2723 if (insn & (1 << 21)) {
2724 /* system register */
40f137e1 2725 rn >>= 1;
9ee6e8bb 2726
b7bcbe95 2727 switch (rn) {
40f137e1 2728 case ARM_VFP_FPSID:
4373f3ce 2729 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2730 VFP3 restricts all id registers to privileged
2731 accesses. */
2732 if (IS_USER(s)
2733 && arm_feature(env, ARM_FEATURE_VFP3))
2734 return 1;
4373f3ce 2735 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2736 break;
40f137e1 2737 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2738 if (IS_USER(s))
2739 return 1;
4373f3ce 2740 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2741 break;
40f137e1
PB
2742 case ARM_VFP_FPINST:
2743 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2744 /* Not present in VFP3. */
2745 if (IS_USER(s)
2746 || arm_feature(env, ARM_FEATURE_VFP3))
2747 return 1;
4373f3ce 2748 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2749 break;
40f137e1 2750 case ARM_VFP_FPSCR:
601d70b9 2751 if (rd == 15) {
4373f3ce
PB
2752 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2753 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2754 } else {
7d1b0095 2755 tmp = tcg_temp_new_i32();
4373f3ce
PB
2756 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2757 }
b7bcbe95 2758 break;
9ee6e8bb
PB
2759 case ARM_VFP_MVFR0:
2760 case ARM_VFP_MVFR1:
2761 if (IS_USER(s)
06ed5d66 2762 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2763 return 1;
4373f3ce 2764 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2765 break;
b7bcbe95
FB
2766 default:
2767 return 1;
2768 }
2769 } else {
2770 gen_mov_F0_vreg(0, rn);
4373f3ce 2771 tmp = gen_vfp_mrs();
b7bcbe95
FB
2772 }
2773 if (rd == 15) {
b5ff1b31 2774 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2775 gen_set_nzcv(tmp);
7d1b0095 2776 tcg_temp_free_i32(tmp);
4373f3ce
PB
2777 } else {
2778 store_reg(s, rd, tmp);
2779 }
b7bcbe95
FB
2780 } else {
2781 /* arm->vfp */
b7bcbe95 2782 if (insn & (1 << 21)) {
40f137e1 2783 rn >>= 1;
b7bcbe95
FB
2784 /* system register */
2785 switch (rn) {
40f137e1 2786 case ARM_VFP_FPSID:
9ee6e8bb
PB
2787 case ARM_VFP_MVFR0:
2788 case ARM_VFP_MVFR1:
b7bcbe95
FB
2789 /* Writes are ignored. */
2790 break;
40f137e1 2791 case ARM_VFP_FPSCR:
e4c1cfa5 2792 tmp = load_reg(s, rd);
4373f3ce 2793 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2794 tcg_temp_free_i32(tmp);
b5ff1b31 2795 gen_lookup_tb(s);
b7bcbe95 2796 break;
40f137e1 2797 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2798 if (IS_USER(s))
2799 return 1;
71b3c3de
JR
2800 /* TODO: VFP subarchitecture support.
2801 * For now, keep the EN bit only */
e4c1cfa5 2802 tmp = load_reg(s, rd);
71b3c3de 2803 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2804 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2805 gen_lookup_tb(s);
2806 break;
2807 case ARM_VFP_FPINST:
2808 case ARM_VFP_FPINST2:
e4c1cfa5 2809 tmp = load_reg(s, rd);
4373f3ce 2810 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2811 break;
b7bcbe95
FB
2812 default:
2813 return 1;
2814 }
2815 } else {
e4c1cfa5 2816 tmp = load_reg(s, rd);
4373f3ce 2817 gen_vfp_msr(tmp);
b7bcbe95
FB
2818 gen_mov_vreg_F0(0, rn);
2819 }
2820 }
2821 }
2822 } else {
2823 /* data processing */
2824 /* The opcode is in bits 23, 21, 20 and 6. */
2825 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2826 if (dp) {
2827 if (op == 15) {
2828 /* rn is opcode */
2829 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2830 } else {
2831 /* rn is register number */
9ee6e8bb 2832 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2833 }
2834
04595bf6 2835 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2836 /* Integer or single precision destination. */
9ee6e8bb 2837 rd = VFP_SREG_D(insn);
b7bcbe95 2838 } else {
9ee6e8bb 2839 VFP_DREG_D(rd, insn);
b7bcbe95 2840 }
04595bf6
PM
2841 if (op == 15 &&
2842 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2843 /* VCVT from int is always from S reg regardless of dp bit.
2844 * VCVT with immediate frac_bits has same format as SREG_M
2845 */
2846 rm = VFP_SREG_M(insn);
b7bcbe95 2847 } else {
9ee6e8bb 2848 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2849 }
2850 } else {
9ee6e8bb 2851 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2852 if (op == 15 && rn == 15) {
2853 /* Double precision destination. */
9ee6e8bb
PB
2854 VFP_DREG_D(rd, insn);
2855 } else {
2856 rd = VFP_SREG_D(insn);
2857 }
04595bf6
PM
2858 /* NB that we implicitly rely on the encoding for the frac_bits
2859 * in VCVT of fixed to float being the same as that of an SREG_M
2860 */
9ee6e8bb 2861 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2862 }
2863
69d1fc22 2864 veclen = s->vec_len;
b7bcbe95
FB
2865 if (op == 15 && rn > 3)
2866 veclen = 0;
2867
2868 /* Shut up compiler warnings. */
2869 delta_m = 0;
2870 delta_d = 0;
2871 bank_mask = 0;
3b46e624 2872
b7bcbe95
FB
2873 if (veclen > 0) {
2874 if (dp)
2875 bank_mask = 0xc;
2876 else
2877 bank_mask = 0x18;
2878
2879 /* Figure out what type of vector operation this is. */
2880 if ((rd & bank_mask) == 0) {
2881 /* scalar */
2882 veclen = 0;
2883 } else {
2884 if (dp)
69d1fc22 2885 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2886 else
69d1fc22 2887 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2888
2889 if ((rm & bank_mask) == 0) {
2890 /* mixed scalar/vector */
2891 delta_m = 0;
2892 } else {
2893 /* vector */
2894 delta_m = delta_d;
2895 }
2896 }
2897 }
2898
2899 /* Load the initial operands. */
2900 if (op == 15) {
2901 switch (rn) {
2902 case 16:
2903 case 17:
2904 /* Integer source */
2905 gen_mov_F0_vreg(0, rm);
2906 break;
2907 case 8:
2908 case 9:
2909 /* Compare */
2910 gen_mov_F0_vreg(dp, rd);
2911 gen_mov_F1_vreg(dp, rm);
2912 break;
2913 case 10:
2914 case 11:
2915 /* Compare with zero */
2916 gen_mov_F0_vreg(dp, rd);
2917 gen_vfp_F1_ld0(dp);
2918 break;
9ee6e8bb
PB
2919 case 20:
2920 case 21:
2921 case 22:
2922 case 23:
644ad806
PB
2923 case 28:
2924 case 29:
2925 case 30:
2926 case 31:
9ee6e8bb
PB
2927 /* Source and destination the same. */
2928 gen_mov_F0_vreg(dp, rd);
2929 break;
6e0c0ed1
PM
2930 case 4:
2931 case 5:
2932 case 6:
2933 case 7:
2934 /* VCVTB, VCVTT: only present with the halfprec extension,
2935 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2936 */
2937 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2938 return 1;
2939 }
2940 /* Otherwise fall through */
b7bcbe95
FB
2941 default:
2942 /* One source operand. */
2943 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2944 break;
b7bcbe95
FB
2945 }
2946 } else {
2947 /* Two source operands. */
2948 gen_mov_F0_vreg(dp, rn);
2949 gen_mov_F1_vreg(dp, rm);
2950 }
2951
2952 for (;;) {
2953 /* Perform the calculation. */
2954 switch (op) {
605a6aed
PM
2955 case 0: /* VMLA: fd + (fn * fm) */
2956 /* Note that order of inputs to the add matters for NaNs */
2957 gen_vfp_F1_mul(dp);
2958 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2959 gen_vfp_add(dp);
2960 break;
605a6aed 2961 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2962 gen_vfp_mul(dp);
605a6aed
PM
2963 gen_vfp_F1_neg(dp);
2964 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2965 gen_vfp_add(dp);
2966 break;
605a6aed
PM
2967 case 2: /* VNMLS: -fd + (fn * fm) */
2968 /* Note that it isn't valid to replace (-A + B) with (B - A)
2969 * or similar plausible looking simplifications
2970 * because this will give wrong results for NaNs.
2971 */
2972 gen_vfp_F1_mul(dp);
2973 gen_mov_F0_vreg(dp, rd);
2974 gen_vfp_neg(dp);
2975 gen_vfp_add(dp);
b7bcbe95 2976 break;
605a6aed 2977 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2978 gen_vfp_mul(dp);
605a6aed
PM
2979 gen_vfp_F1_neg(dp);
2980 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2981 gen_vfp_neg(dp);
605a6aed 2982 gen_vfp_add(dp);
b7bcbe95
FB
2983 break;
2984 case 4: /* mul: fn * fm */
2985 gen_vfp_mul(dp);
2986 break;
2987 case 5: /* nmul: -(fn * fm) */
2988 gen_vfp_mul(dp);
2989 gen_vfp_neg(dp);
2990 break;
2991 case 6: /* add: fn + fm */
2992 gen_vfp_add(dp);
2993 break;
2994 case 7: /* sub: fn - fm */
2995 gen_vfp_sub(dp);
2996 break;
2997 case 8: /* div: fn / fm */
2998 gen_vfp_div(dp);
2999 break;
da97f52c
PM
3000 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3001 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3002 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3003 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3004 /* These are fused multiply-add, and must be done as one
3005 * floating point operation with no rounding between the
3006 * multiplication and addition steps.
3007 * NB that doing the negations here as separate steps is
3008 * correct : an input NaN should come out with its sign bit
3009 * flipped if it is a negated-input.
3010 */
3011 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3012 return 1;
3013 }
3014 if (dp) {
3015 TCGv_ptr fpst;
3016 TCGv_i64 frd;
3017 if (op & 1) {
3018 /* VFNMS, VFMS */
3019 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3020 }
3021 frd = tcg_temp_new_i64();
3022 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3023 if (op & 2) {
3024 /* VFNMA, VFNMS */
3025 gen_helper_vfp_negd(frd, frd);
3026 }
3027 fpst = get_fpstatus_ptr(0);
3028 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3029 cpu_F1d, frd, fpst);
3030 tcg_temp_free_ptr(fpst);
3031 tcg_temp_free_i64(frd);
3032 } else {
3033 TCGv_ptr fpst;
3034 TCGv_i32 frd;
3035 if (op & 1) {
3036 /* VFNMS, VFMS */
3037 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3038 }
3039 frd = tcg_temp_new_i32();
3040 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3041 if (op & 2) {
3042 gen_helper_vfp_negs(frd, frd);
3043 }
3044 fpst = get_fpstatus_ptr(0);
3045 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3046 cpu_F1s, frd, fpst);
3047 tcg_temp_free_ptr(fpst);
3048 tcg_temp_free_i32(frd);
3049 }
3050 break;
9ee6e8bb
PB
3051 case 14: /* fconst */
3052 if (!arm_feature(env, ARM_FEATURE_VFP3))
3053 return 1;
3054
3055 n = (insn << 12) & 0x80000000;
3056 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3057 if (dp) {
3058 if (i & 0x40)
3059 i |= 0x3f80;
3060 else
3061 i |= 0x4000;
3062 n |= i << 16;
4373f3ce 3063 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3064 } else {
3065 if (i & 0x40)
3066 i |= 0x780;
3067 else
3068 i |= 0x800;
3069 n |= i << 19;
5b340b51 3070 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3071 }
9ee6e8bb 3072 break;
b7bcbe95
FB
3073 case 15: /* extension space */
3074 switch (rn) {
3075 case 0: /* cpy */
3076 /* no-op */
3077 break;
3078 case 1: /* abs */
3079 gen_vfp_abs(dp);
3080 break;
3081 case 2: /* neg */
3082 gen_vfp_neg(dp);
3083 break;
3084 case 3: /* sqrt */
3085 gen_vfp_sqrt(dp);
3086 break;
60011498 3087 case 4: /* vcvtb.f32.f16 */
60011498
PB
3088 tmp = gen_vfp_mrs();
3089 tcg_gen_ext16u_i32(tmp, tmp);
3090 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3091 tcg_temp_free_i32(tmp);
60011498
PB
3092 break;
3093 case 5: /* vcvtt.f32.f16 */
60011498
PB
3094 tmp = gen_vfp_mrs();
3095 tcg_gen_shri_i32(tmp, tmp, 16);
3096 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3097 tcg_temp_free_i32(tmp);
60011498
PB
3098 break;
3099 case 6: /* vcvtb.f16.f32 */
7d1b0095 3100 tmp = tcg_temp_new_i32();
60011498
PB
3101 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3102 gen_mov_F0_vreg(0, rd);
3103 tmp2 = gen_vfp_mrs();
3104 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3105 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3106 tcg_temp_free_i32(tmp2);
60011498
PB
3107 gen_vfp_msr(tmp);
3108 break;
3109 case 7: /* vcvtt.f16.f32 */
7d1b0095 3110 tmp = tcg_temp_new_i32();
60011498
PB
3111 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3112 tcg_gen_shli_i32(tmp, tmp, 16);
3113 gen_mov_F0_vreg(0, rd);
3114 tmp2 = gen_vfp_mrs();
3115 tcg_gen_ext16u_i32(tmp2, tmp2);
3116 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3117 tcg_temp_free_i32(tmp2);
60011498
PB
3118 gen_vfp_msr(tmp);
3119 break;
b7bcbe95
FB
3120 case 8: /* cmp */
3121 gen_vfp_cmp(dp);
3122 break;
3123 case 9: /* cmpe */
3124 gen_vfp_cmpe(dp);
3125 break;
3126 case 10: /* cmpz */
3127 gen_vfp_cmp(dp);
3128 break;
3129 case 11: /* cmpez */
3130 gen_vfp_F1_ld0(dp);
3131 gen_vfp_cmpe(dp);
3132 break;
3133 case 15: /* single<->double conversion */
3134 if (dp)
4373f3ce 3135 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3136 else
4373f3ce 3137 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3138 break;
3139 case 16: /* fuito */
5500b06c 3140 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3141 break;
3142 case 17: /* fsito */
5500b06c 3143 gen_vfp_sito(dp, 0);
b7bcbe95 3144 break;
9ee6e8bb
PB
3145 case 20: /* fshto */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
5500b06c 3148 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3149 break;
3150 case 21: /* fslto */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
5500b06c 3153 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3154 break;
3155 case 22: /* fuhto */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
5500b06c 3158 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3159 break;
3160 case 23: /* fulto */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
5500b06c 3163 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3164 break;
b7bcbe95 3165 case 24: /* ftoui */
5500b06c 3166 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3167 break;
3168 case 25: /* ftouiz */
5500b06c 3169 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3170 break;
3171 case 26: /* ftosi */
5500b06c 3172 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3173 break;
3174 case 27: /* ftosiz */
5500b06c 3175 gen_vfp_tosiz(dp, 0);
b7bcbe95 3176 break;
9ee6e8bb
PB
3177 case 28: /* ftosh */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
5500b06c 3180 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3181 break;
3182 case 29: /* ftosl */
3183 if (!arm_feature(env, ARM_FEATURE_VFP3))
3184 return 1;
5500b06c 3185 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3186 break;
3187 case 30: /* ftouh */
3188 if (!arm_feature(env, ARM_FEATURE_VFP3))
3189 return 1;
5500b06c 3190 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3191 break;
3192 case 31: /* ftoul */
3193 if (!arm_feature(env, ARM_FEATURE_VFP3))
3194 return 1;
5500b06c 3195 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3196 break;
b7bcbe95 3197 default: /* undefined */
b7bcbe95
FB
3198 return 1;
3199 }
3200 break;
3201 default: /* undefined */
b7bcbe95
FB
3202 return 1;
3203 }
3204
3205 /* Write back the result. */
3206 if (op == 15 && (rn >= 8 && rn <= 11))
3207 ; /* Comparison, do nothing. */
04595bf6
PM
3208 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3209 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3210 gen_mov_vreg_F0(0, rd);
3211 else if (op == 15 && rn == 15)
3212 /* conversion */
3213 gen_mov_vreg_F0(!dp, rd);
3214 else
3215 gen_mov_vreg_F0(dp, rd);
3216
3217 /* break out of the loop if we have finished */
3218 if (veclen == 0)
3219 break;
3220
3221 if (op == 15 && delta_m == 0) {
3222 /* single source one-many */
3223 while (veclen--) {
3224 rd = ((rd + delta_d) & (bank_mask - 1))
3225 | (rd & bank_mask);
3226 gen_mov_vreg_F0(dp, rd);
3227 }
3228 break;
3229 }
3230 /* Setup the next operands. */
3231 veclen--;
3232 rd = ((rd + delta_d) & (bank_mask - 1))
3233 | (rd & bank_mask);
3234
3235 if (op == 15) {
3236 /* One source operand. */
3237 rm = ((rm + delta_m) & (bank_mask - 1))
3238 | (rm & bank_mask);
3239 gen_mov_F0_vreg(dp, rm);
3240 } else {
3241 /* Two source operands. */
3242 rn = ((rn + delta_d) & (bank_mask - 1))
3243 | (rn & bank_mask);
3244 gen_mov_F0_vreg(dp, rn);
3245 if (delta_m) {
3246 rm = ((rm + delta_m) & (bank_mask - 1))
3247 | (rm & bank_mask);
3248 gen_mov_F1_vreg(dp, rm);
3249 }
3250 }
3251 }
3252 }
3253 break;
3254 case 0xc:
3255 case 0xd:
8387da81 3256 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3257 /* two-register transfer */
3258 rn = (insn >> 16) & 0xf;
3259 rd = (insn >> 12) & 0xf;
3260 if (dp) {
9ee6e8bb
PB
3261 VFP_DREG_M(rm, insn);
3262 } else {
3263 rm = VFP_SREG_M(insn);
3264 }
b7bcbe95 3265
18c9b560 3266 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3267 /* vfp->arm */
3268 if (dp) {
4373f3ce
PB
3269 gen_mov_F0_vreg(0, rm * 2);
3270 tmp = gen_vfp_mrs();
3271 store_reg(s, rd, tmp);
3272 gen_mov_F0_vreg(0, rm * 2 + 1);
3273 tmp = gen_vfp_mrs();
3274 store_reg(s, rn, tmp);
b7bcbe95
FB
3275 } else {
3276 gen_mov_F0_vreg(0, rm);
4373f3ce 3277 tmp = gen_vfp_mrs();
8387da81 3278 store_reg(s, rd, tmp);
b7bcbe95 3279 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3280 tmp = gen_vfp_mrs();
8387da81 3281 store_reg(s, rn, tmp);
b7bcbe95
FB
3282 }
3283 } else {
3284 /* arm->vfp */
3285 if (dp) {
4373f3ce
PB
3286 tmp = load_reg(s, rd);
3287 gen_vfp_msr(tmp);
3288 gen_mov_vreg_F0(0, rm * 2);
3289 tmp = load_reg(s, rn);
3290 gen_vfp_msr(tmp);
3291 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3292 } else {
8387da81 3293 tmp = load_reg(s, rd);
4373f3ce 3294 gen_vfp_msr(tmp);
b7bcbe95 3295 gen_mov_vreg_F0(0, rm);
8387da81 3296 tmp = load_reg(s, rn);
4373f3ce 3297 gen_vfp_msr(tmp);
b7bcbe95
FB
3298 gen_mov_vreg_F0(0, rm + 1);
3299 }
3300 }
3301 } else {
3302 /* Load/store */
3303 rn = (insn >> 16) & 0xf;
3304 if (dp)
9ee6e8bb 3305 VFP_DREG_D(rd, insn);
b7bcbe95 3306 else
9ee6e8bb 3307 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3308 if ((insn & 0x01200000) == 0x01000000) {
3309 /* Single load/store */
3310 offset = (insn & 0xff) << 2;
3311 if ((insn & (1 << 23)) == 0)
3312 offset = -offset;
934814f1
PM
3313 if (s->thumb && rn == 15) {
3314 /* This is actually UNPREDICTABLE */
3315 addr = tcg_temp_new_i32();
3316 tcg_gen_movi_i32(addr, s->pc & ~2);
3317 } else {
3318 addr = load_reg(s, rn);
3319 }
312eea9f 3320 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3321 if (insn & (1 << 20)) {
312eea9f 3322 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3323 gen_mov_vreg_F0(dp, rd);
3324 } else {
3325 gen_mov_F0_vreg(dp, rd);
312eea9f 3326 gen_vfp_st(s, dp, addr);
b7bcbe95 3327 }
7d1b0095 3328 tcg_temp_free_i32(addr);
b7bcbe95
FB
3329 } else {
3330 /* load/store multiple */
934814f1 3331 int w = insn & (1 << 21);
b7bcbe95
FB
3332 if (dp)
3333 n = (insn >> 1) & 0x7f;
3334 else
3335 n = insn & 0xff;
3336
934814f1
PM
3337 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3338 /* P == U , W == 1 => UNDEF */
3339 return 1;
3340 }
3341 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3342 /* UNPREDICTABLE cases for bad immediates: we choose to
3343 * UNDEF to avoid generating huge numbers of TCG ops
3344 */
3345 return 1;
3346 }
3347 if (rn == 15 && w) {
3348 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3349 return 1;
3350 }
3351
3352 if (s->thumb && rn == 15) {
3353 /* This is actually UNPREDICTABLE */
3354 addr = tcg_temp_new_i32();
3355 tcg_gen_movi_i32(addr, s->pc & ~2);
3356 } else {
3357 addr = load_reg(s, rn);
3358 }
b7bcbe95 3359 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3360 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3361
3362 if (dp)
3363 offset = 8;
3364 else
3365 offset = 4;
3366 for (i = 0; i < n; i++) {
18c9b560 3367 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3368 /* load */
312eea9f 3369 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3370 gen_mov_vreg_F0(dp, rd + i);
3371 } else {
3372 /* store */
3373 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3374 gen_vfp_st(s, dp, addr);
b7bcbe95 3375 }
312eea9f 3376 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3377 }
934814f1 3378 if (w) {
b7bcbe95
FB
3379 /* writeback */
3380 if (insn & (1 << 24))
3381 offset = -offset * n;
3382 else if (dp && (insn & 1))
3383 offset = 4;
3384 else
3385 offset = 0;
3386
3387 if (offset != 0)
312eea9f
FN
3388 tcg_gen_addi_i32(addr, addr, offset);
3389 store_reg(s, rn, addr);
3390 } else {
7d1b0095 3391 tcg_temp_free_i32(addr);
b7bcbe95
FB
3392 }
3393 }
3394 }
3395 break;
3396 default:
3397 /* Should never happen. */
3398 return 1;
3399 }
3400 return 0;
3401}
3402
6e256c93 3403static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3404{
6e256c93
FB
3405 TranslationBlock *tb;
3406
3407 tb = s->tb;
3408 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3409 tcg_gen_goto_tb(n);
8984bd2e 3410 gen_set_pc_im(dest);
4b4a72e5 3411 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3412 } else {
8984bd2e 3413 gen_set_pc_im(dest);
57fec1fe 3414 tcg_gen_exit_tb(0);
6e256c93 3415 }
c53be334
FB
3416}
3417
8aaca4c0
FB
3418static inline void gen_jmp (DisasContext *s, uint32_t dest)
3419{
551bd27f 3420 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3421 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3422 if (s->thumb)
d9ba4830
PB
3423 dest |= 1;
3424 gen_bx_im(s, dest);
8aaca4c0 3425 } else {
6e256c93 3426 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3427 s->is_jmp = DISAS_TB_JUMP;
3428 }
3429}
3430
d9ba4830 3431static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3432{
ee097184 3433 if (x)
d9ba4830 3434 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3435 else
d9ba4830 3436 gen_sxth(t0);
ee097184 3437 if (y)
d9ba4830 3438 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3439 else
d9ba4830
PB
3440 gen_sxth(t1);
3441 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3442}
3443
3444/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3445static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3446 uint32_t mask;
3447
3448 mask = 0;
3449 if (flags & (1 << 0))
3450 mask |= 0xff;
3451 if (flags & (1 << 1))
3452 mask |= 0xff00;
3453 if (flags & (1 << 2))
3454 mask |= 0xff0000;
3455 if (flags & (1 << 3))
3456 mask |= 0xff000000;
9ee6e8bb 3457
2ae23e75 3458 /* Mask out undefined bits. */
9ee6e8bb 3459 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3460 if (!arm_feature(env, ARM_FEATURE_V4T))
3461 mask &= ~CPSR_T;
3462 if (!arm_feature(env, ARM_FEATURE_V5))
3463 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3464 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3465 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3466 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3467 mask &= ~CPSR_IT;
9ee6e8bb 3468 /* Mask out execution state bits. */
2ae23e75 3469 if (!spsr)
e160c51c 3470 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3471 /* Mask out privileged bits. */
3472 if (IS_USER(s))
9ee6e8bb 3473 mask &= CPSR_USER;
b5ff1b31
FB
3474 return mask;
3475}
3476
2fbac54b
FN
3477/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3478static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3479{
d9ba4830 3480 TCGv tmp;
b5ff1b31
FB
3481 if (spsr) {
3482 /* ??? This is also undefined in system mode. */
3483 if (IS_USER(s))
3484 return 1;
d9ba4830
PB
3485
3486 tmp = load_cpu_field(spsr);
3487 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3488 tcg_gen_andi_i32(t0, t0, mask);
3489 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3490 store_cpu_field(tmp, spsr);
b5ff1b31 3491 } else {
2fbac54b 3492 gen_set_cpsr(t0, mask);
b5ff1b31 3493 }
7d1b0095 3494 tcg_temp_free_i32(t0);
b5ff1b31
FB
3495 gen_lookup_tb(s);
3496 return 0;
3497}
3498
2fbac54b
FN
3499/* Returns nonzero if access to the PSR is not permitted. */
3500static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3501{
3502 TCGv tmp;
7d1b0095 3503 tmp = tcg_temp_new_i32();
2fbac54b
FN
3504 tcg_gen_movi_i32(tmp, val);
3505 return gen_set_psr(s, mask, spsr, tmp);
3506}
3507
e9bb4aa9
JR
3508/* Generate an old-style exception return. Marks pc as dead. */
3509static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3510{
d9ba4830 3511 TCGv tmp;
e9bb4aa9 3512 store_reg(s, 15, pc);
d9ba4830
PB
3513 tmp = load_cpu_field(spsr);
3514 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3515 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3516 s->is_jmp = DISAS_UPDATE;
3517}
3518
b0109805
PB
3519/* Generate a v6 exception return. Marks both values as dead. */
3520static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3521{
b0109805 3522 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3523 tcg_temp_free_i32(cpsr);
b0109805 3524 store_reg(s, 15, pc);
9ee6e8bb
PB
3525 s->is_jmp = DISAS_UPDATE;
3526}
3b46e624 3527
9ee6e8bb
PB
3528static inline void
3529gen_set_condexec (DisasContext *s)
3530{
3531 if (s->condexec_mask) {
8f01245e 3532 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3533 TCGv tmp = tcg_temp_new_i32();
8f01245e 3534 tcg_gen_movi_i32(tmp, val);
d9ba4830 3535 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3536 }
3537}
3b46e624 3538
bc4a0de0
PM
3539static void gen_exception_insn(DisasContext *s, int offset, int excp)
3540{
3541 gen_set_condexec(s);
3542 gen_set_pc_im(s->pc - offset);
3543 gen_exception(excp);
3544 s->is_jmp = DISAS_JUMP;
3545}
3546
9ee6e8bb
PB
3547static void gen_nop_hint(DisasContext *s, int val)
3548{
3549 switch (val) {
3550 case 3: /* wfi */
8984bd2e 3551 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3552 s->is_jmp = DISAS_WFI;
3553 break;
3554 case 2: /* wfe */
3555 case 4: /* sev */
3556 /* TODO: Implement SEV and WFE. May help SMP performance. */
3557 default: /* nop */
3558 break;
3559 }
3560}
99c475ab 3561
ad69471c 3562#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3563
62698be3 3564static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3565{
3566 switch (size) {
dd8fbd78
FN
3567 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3568 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3569 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3570 default: abort();
9ee6e8bb 3571 }
9ee6e8bb
PB
3572}
3573
dd8fbd78 3574static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3575{
3576 switch (size) {
dd8fbd78
FN
3577 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3578 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3579 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3580 default: return;
3581 }
3582}
3583
3584/* 32-bit pairwise ops end up the same as the elementwise versions. */
3585#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3586#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3587#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3588#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3589
ad69471c
PB
3590#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3591 switch ((size << 1) | u) { \
3592 case 0: \
dd8fbd78 3593 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3594 break; \
3595 case 1: \
dd8fbd78 3596 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3597 break; \
3598 case 2: \
dd8fbd78 3599 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3600 break; \
3601 case 3: \
dd8fbd78 3602 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3603 break; \
3604 case 4: \
dd8fbd78 3605 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3606 break; \
3607 case 5: \
dd8fbd78 3608 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3609 break; \
3610 default: return 1; \
3611 }} while (0)
9ee6e8bb
PB
3612
3613#define GEN_NEON_INTEGER_OP(name) do { \
3614 switch ((size << 1) | u) { \
ad69471c 3615 case 0: \
dd8fbd78 3616 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3617 break; \
3618 case 1: \
dd8fbd78 3619 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3620 break; \
3621 case 2: \
dd8fbd78 3622 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3623 break; \
3624 case 3: \
dd8fbd78 3625 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3626 break; \
3627 case 4: \
dd8fbd78 3628 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3629 break; \
3630 case 5: \
dd8fbd78 3631 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3632 break; \
9ee6e8bb
PB
3633 default: return 1; \
3634 }} while (0)
3635
dd8fbd78 3636static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3637{
7d1b0095 3638 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3639 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3640 return tmp;
9ee6e8bb
PB
3641}
3642
dd8fbd78 3643static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3644{
dd8fbd78 3645 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3646 tcg_temp_free_i32(var);
9ee6e8bb
PB
3647}
3648
dd8fbd78 3649static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3650{
dd8fbd78 3651 TCGv tmp;
9ee6e8bb 3652 if (size == 1) {
0fad6efc
PM
3653 tmp = neon_load_reg(reg & 7, reg >> 4);
3654 if (reg & 8) {
dd8fbd78 3655 gen_neon_dup_high16(tmp);
0fad6efc
PM
3656 } else {
3657 gen_neon_dup_low16(tmp);
dd8fbd78 3658 }
0fad6efc
PM
3659 } else {
3660 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3661 }
dd8fbd78 3662 return tmp;
9ee6e8bb
PB
3663}
3664
02acedf9 3665static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3666{
02acedf9 3667 TCGv tmp, tmp2;
600b828c 3668 if (!q && size == 2) {
02acedf9
PM
3669 return 1;
3670 }
3671 tmp = tcg_const_i32(rd);
3672 tmp2 = tcg_const_i32(rm);
3673 if (q) {
3674 switch (size) {
3675 case 0:
02da0b2d 3676 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3677 break;
3678 case 1:
02da0b2d 3679 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3680 break;
3681 case 2:
02da0b2d 3682 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3683 break;
3684 default:
3685 abort();
3686 }
3687 } else {
3688 switch (size) {
3689 case 0:
02da0b2d 3690 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3691 break;
3692 case 1:
02da0b2d 3693 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3694 break;
3695 default:
3696 abort();
3697 }
3698 }
3699 tcg_temp_free_i32(tmp);
3700 tcg_temp_free_i32(tmp2);
3701 return 0;
19457615
FN
3702}
3703
d68a6f3a 3704static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3705{
3706 TCGv tmp, tmp2;
600b828c 3707 if (!q && size == 2) {
d68a6f3a
PM
3708 return 1;
3709 }
3710 tmp = tcg_const_i32(rd);
3711 tmp2 = tcg_const_i32(rm);
3712 if (q) {
3713 switch (size) {
3714 case 0:
02da0b2d 3715 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3716 break;
3717 case 1:
02da0b2d 3718 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3719 break;
3720 case 2:
02da0b2d 3721 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3722 break;
3723 default:
3724 abort();
3725 }
3726 } else {
3727 switch (size) {
3728 case 0:
02da0b2d 3729 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3730 break;
3731 case 1:
02da0b2d 3732 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3733 break;
3734 default:
3735 abort();
3736 }
3737 }
3738 tcg_temp_free_i32(tmp);
3739 tcg_temp_free_i32(tmp2);
3740 return 0;
19457615
FN
3741}
3742
19457615
FN
3743static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3744{
3745 TCGv rd, tmp;
3746
7d1b0095
PM
3747 rd = tcg_temp_new_i32();
3748 tmp = tcg_temp_new_i32();
19457615
FN
3749
3750 tcg_gen_shli_i32(rd, t0, 8);
3751 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3752 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3753 tcg_gen_or_i32(rd, rd, tmp);
3754
3755 tcg_gen_shri_i32(t1, t1, 8);
3756 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3757 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3758 tcg_gen_or_i32(t1, t1, tmp);
3759 tcg_gen_mov_i32(t0, rd);
3760
7d1b0095
PM
3761 tcg_temp_free_i32(tmp);
3762 tcg_temp_free_i32(rd);
19457615
FN
3763}
3764
3765static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3766{
3767 TCGv rd, tmp;
3768
7d1b0095
PM
3769 rd = tcg_temp_new_i32();
3770 tmp = tcg_temp_new_i32();
19457615
FN
3771
3772 tcg_gen_shli_i32(rd, t0, 16);
3773 tcg_gen_andi_i32(tmp, t1, 0xffff);
3774 tcg_gen_or_i32(rd, rd, tmp);
3775 tcg_gen_shri_i32(t1, t1, 16);
3776 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3777 tcg_gen_or_i32(t1, t1, tmp);
3778 tcg_gen_mov_i32(t0, rd);
3779
7d1b0095
PM
3780 tcg_temp_free_i32(tmp);
3781 tcg_temp_free_i32(rd);
19457615
FN
3782}
3783
3784
9ee6e8bb
PB
3785static struct {
3786 int nregs;
3787 int interleave;
3788 int spacing;
3789} neon_ls_element_type[11] = {
3790 {4, 4, 1},
3791 {4, 4, 2},
3792 {4, 1, 1},
3793 {4, 2, 1},
3794 {3, 3, 1},
3795 {3, 3, 2},
3796 {3, 1, 1},
3797 {1, 1, 1},
3798 {2, 2, 1},
3799 {2, 2, 2},
3800 {2, 1, 1}
3801};
3802
3803/* Translate a NEON load/store element instruction. Return nonzero if the
3804 instruction is invalid. */
0ecb72a5 3805static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3806{
3807 int rd, rn, rm;
3808 int op;
3809 int nregs;
3810 int interleave;
84496233 3811 int spacing;
9ee6e8bb
PB
3812 int stride;
3813 int size;
3814 int reg;
3815 int pass;
3816 int load;
3817 int shift;
9ee6e8bb 3818 int n;
1b2b1e54 3819 TCGv addr;
b0109805 3820 TCGv tmp;
8f8e3aa4 3821 TCGv tmp2;
84496233 3822 TCGv_i64 tmp64;
9ee6e8bb 3823
5df8bac1 3824 if (!s->vfp_enabled)
9ee6e8bb
PB
3825 return 1;
3826 VFP_DREG_D(rd, insn);
3827 rn = (insn >> 16) & 0xf;
3828 rm = insn & 0xf;
3829 load = (insn & (1 << 21)) != 0;
3830 if ((insn & (1 << 23)) == 0) {
3831 /* Load store all elements. */
3832 op = (insn >> 8) & 0xf;
3833 size = (insn >> 6) & 3;
84496233 3834 if (op > 10)
9ee6e8bb 3835 return 1;
f2dd89d0
PM
3836 /* Catch UNDEF cases for bad values of align field */
3837 switch (op & 0xc) {
3838 case 4:
3839 if (((insn >> 5) & 1) == 1) {
3840 return 1;
3841 }
3842 break;
3843 case 8:
3844 if (((insn >> 4) & 3) == 3) {
3845 return 1;
3846 }
3847 break;
3848 default:
3849 break;
3850 }
9ee6e8bb
PB
3851 nregs = neon_ls_element_type[op].nregs;
3852 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3853 spacing = neon_ls_element_type[op].spacing;
3854 if (size == 3 && (interleave | spacing) != 1)
3855 return 1;
e318a60b 3856 addr = tcg_temp_new_i32();
dcc65026 3857 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3858 stride = (1 << size) * interleave;
3859 for (reg = 0; reg < nregs; reg++) {
3860 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3861 load_reg_var(s, addr, rn);
3862 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3863 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3864 load_reg_var(s, addr, rn);
3865 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3866 }
84496233
JR
3867 if (size == 3) {
3868 if (load) {
3869 tmp64 = gen_ld64(addr, IS_USER(s));
3870 neon_store_reg64(tmp64, rd);
3871 tcg_temp_free_i64(tmp64);
3872 } else {
3873 tmp64 = tcg_temp_new_i64();
3874 neon_load_reg64(tmp64, rd);
3875 gen_st64(tmp64, addr, IS_USER(s));
3876 }
3877 tcg_gen_addi_i32(addr, addr, stride);
3878 } else {
3879 for (pass = 0; pass < 2; pass++) {
3880 if (size == 2) {
3881 if (load) {
3882 tmp = gen_ld32(addr, IS_USER(s));
3883 neon_store_reg(rd, pass, tmp);
3884 } else {
3885 tmp = neon_load_reg(rd, pass);
3886 gen_st32(tmp, addr, IS_USER(s));
3887 }
1b2b1e54 3888 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3889 } else if (size == 1) {
3890 if (load) {
3891 tmp = gen_ld16u(addr, IS_USER(s));
3892 tcg_gen_addi_i32(addr, addr, stride);
3893 tmp2 = gen_ld16u(addr, IS_USER(s));
3894 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3895 tcg_gen_shli_i32(tmp2, tmp2, 16);
3896 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3897 tcg_temp_free_i32(tmp2);
84496233
JR
3898 neon_store_reg(rd, pass, tmp);
3899 } else {
3900 tmp = neon_load_reg(rd, pass);
7d1b0095 3901 tmp2 = tcg_temp_new_i32();
84496233
JR
3902 tcg_gen_shri_i32(tmp2, tmp, 16);
3903 gen_st16(tmp, addr, IS_USER(s));
3904 tcg_gen_addi_i32(addr, addr, stride);
3905 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3906 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3907 }
84496233
JR
3908 } else /* size == 0 */ {
3909 if (load) {
3910 TCGV_UNUSED(tmp2);
3911 for (n = 0; n < 4; n++) {
3912 tmp = gen_ld8u(addr, IS_USER(s));
3913 tcg_gen_addi_i32(addr, addr, stride);
3914 if (n == 0) {
3915 tmp2 = tmp;
3916 } else {
41ba8341
PB
3917 tcg_gen_shli_i32(tmp, tmp, n * 8);
3918 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3919 tcg_temp_free_i32(tmp);
84496233 3920 }
9ee6e8bb 3921 }
84496233
JR
3922 neon_store_reg(rd, pass, tmp2);
3923 } else {
3924 tmp2 = neon_load_reg(rd, pass);
3925 for (n = 0; n < 4; n++) {
7d1b0095 3926 tmp = tcg_temp_new_i32();
84496233
JR
3927 if (n == 0) {
3928 tcg_gen_mov_i32(tmp, tmp2);
3929 } else {
3930 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3931 }
3932 gen_st8(tmp, addr, IS_USER(s));
3933 tcg_gen_addi_i32(addr, addr, stride);
3934 }
7d1b0095 3935 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3936 }
3937 }
3938 }
3939 }
84496233 3940 rd += spacing;
9ee6e8bb 3941 }
e318a60b 3942 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3943 stride = nregs * 8;
3944 } else {
3945 size = (insn >> 10) & 3;
3946 if (size == 3) {
3947 /* Load single element to all lanes. */
8e18cde3
PM
3948 int a = (insn >> 4) & 1;
3949 if (!load) {
9ee6e8bb 3950 return 1;
8e18cde3 3951 }
9ee6e8bb
PB
3952 size = (insn >> 6) & 3;
3953 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3954
3955 if (size == 3) {
3956 if (nregs != 4 || a == 0) {
9ee6e8bb 3957 return 1;
99c475ab 3958 }
8e18cde3
PM
3959 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3960 size = 2;
3961 }
3962 if (nregs == 1 && a == 1 && size == 0) {
3963 return 1;
3964 }
3965 if (nregs == 3 && a == 1) {
3966 return 1;
3967 }
e318a60b 3968 addr = tcg_temp_new_i32();
8e18cde3
PM
3969 load_reg_var(s, addr, rn);
3970 if (nregs == 1) {
3971 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3972 tmp = gen_load_and_replicate(s, addr, size);
3973 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3974 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3975 if (insn & (1 << 5)) {
3976 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3977 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3978 }
3979 tcg_temp_free_i32(tmp);
3980 } else {
3981 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3982 stride = (insn & (1 << 5)) ? 2 : 1;
3983 for (reg = 0; reg < nregs; reg++) {
3984 tmp = gen_load_and_replicate(s, addr, size);
3985 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3986 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3987 tcg_temp_free_i32(tmp);
3988 tcg_gen_addi_i32(addr, addr, 1 << size);
3989 rd += stride;
3990 }
9ee6e8bb 3991 }
e318a60b 3992 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3993 stride = (1 << size) * nregs;
3994 } else {
3995 /* Single element. */
93262b16 3996 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3997 pass = (insn >> 7) & 1;
3998 switch (size) {
3999 case 0:
4000 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4001 stride = 1;
4002 break;
4003 case 1:
4004 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4005 stride = (insn & (1 << 5)) ? 2 : 1;
4006 break;
4007 case 2:
4008 shift = 0;
9ee6e8bb
PB
4009 stride = (insn & (1 << 6)) ? 2 : 1;
4010 break;
4011 default:
4012 abort();
4013 }
4014 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4015 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4016 switch (nregs) {
4017 case 1:
4018 if (((idx & (1 << size)) != 0) ||
4019 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4020 return 1;
4021 }
4022 break;
4023 case 3:
4024 if ((idx & 1) != 0) {
4025 return 1;
4026 }
4027 /* fall through */
4028 case 2:
4029 if (size == 2 && (idx & 2) != 0) {
4030 return 1;
4031 }
4032 break;
4033 case 4:
4034 if ((size == 2) && ((idx & 3) == 3)) {
4035 return 1;
4036 }
4037 break;
4038 default:
4039 abort();
4040 }
4041 if ((rd + stride * (nregs - 1)) > 31) {
4042 /* Attempts to write off the end of the register file
4043 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4044 * the neon_load_reg() would write off the end of the array.
4045 */
4046 return 1;
4047 }
e318a60b 4048 addr = tcg_temp_new_i32();
dcc65026 4049 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4050 for (reg = 0; reg < nregs; reg++) {
4051 if (load) {
9ee6e8bb
PB
4052 switch (size) {
4053 case 0:
1b2b1e54 4054 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4055 break;
4056 case 1:
1b2b1e54 4057 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4058 break;
4059 case 2:
1b2b1e54 4060 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4061 break;
a50f5b91
PB
4062 default: /* Avoid compiler warnings. */
4063 abort();
9ee6e8bb
PB
4064 }
4065 if (size != 2) {
8f8e3aa4 4066 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4067 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4068 shift, size ? 16 : 8);
7d1b0095 4069 tcg_temp_free_i32(tmp2);
9ee6e8bb 4070 }
8f8e3aa4 4071 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4072 } else { /* Store */
8f8e3aa4
PB
4073 tmp = neon_load_reg(rd, pass);
4074 if (shift)
4075 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4076 switch (size) {
4077 case 0:
1b2b1e54 4078 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4079 break;
4080 case 1:
1b2b1e54 4081 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4082 break;
4083 case 2:
1b2b1e54 4084 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4085 break;
99c475ab 4086 }
99c475ab 4087 }
9ee6e8bb 4088 rd += stride;
1b2b1e54 4089 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4090 }
e318a60b 4091 tcg_temp_free_i32(addr);
9ee6e8bb 4092 stride = nregs * (1 << size);
99c475ab 4093 }
9ee6e8bb
PB
4094 }
4095 if (rm != 15) {
b26eefb6
PB
4096 TCGv base;
4097
4098 base = load_reg(s, rn);
9ee6e8bb 4099 if (rm == 13) {
b26eefb6 4100 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4101 } else {
b26eefb6
PB
4102 TCGv index;
4103 index = load_reg(s, rm);
4104 tcg_gen_add_i32(base, base, index);
7d1b0095 4105 tcg_temp_free_i32(index);
9ee6e8bb 4106 }
b26eefb6 4107 store_reg(s, rn, base);
9ee6e8bb
PB
4108 }
4109 return 0;
4110}
3b46e624 4111
8f8e3aa4
PB
4112/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4113static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4114{
4115 tcg_gen_and_i32(t, t, c);
f669df27 4116 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4117 tcg_gen_or_i32(dest, t, f);
4118}
4119
a7812ae4 4120static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4121{
4122 switch (size) {
4123 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4124 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4125 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4126 default: abort();
4127 }
4128}
4129
a7812ae4 4130static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4131{
4132 switch (size) {
02da0b2d
PM
4133 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4134 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4135 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4136 default: abort();
4137 }
4138}
4139
a7812ae4 4140static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4141{
4142 switch (size) {
02da0b2d
PM
4143 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4144 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4145 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4146 default: abort();
4147 }
4148}
4149
af1bbf30
JR
4150static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4151{
4152 switch (size) {
02da0b2d
PM
4153 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4154 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4155 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4156 default: abort();
4157 }
4158}
4159
ad69471c
PB
4160static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4161 int q, int u)
4162{
4163 if (q) {
4164 if (u) {
4165 switch (size) {
4166 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4167 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4168 default: abort();
4169 }
4170 } else {
4171 switch (size) {
4172 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4173 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4174 default: abort();
4175 }
4176 }
4177 } else {
4178 if (u) {
4179 switch (size) {
b408a9b0
CL
4180 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4181 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4182 default: abort();
4183 }
4184 } else {
4185 switch (size) {
4186 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4187 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4188 default: abort();
4189 }
4190 }
4191 }
4192}
4193
a7812ae4 4194static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4195{
4196 if (u) {
4197 switch (size) {
4198 case 0: gen_helper_neon_widen_u8(dest, src); break;
4199 case 1: gen_helper_neon_widen_u16(dest, src); break;
4200 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4201 default: abort();
4202 }
4203 } else {
4204 switch (size) {
4205 case 0: gen_helper_neon_widen_s8(dest, src); break;
4206 case 1: gen_helper_neon_widen_s16(dest, src); break;
4207 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4208 default: abort();
4209 }
4210 }
7d1b0095 4211 tcg_temp_free_i32(src);
ad69471c
PB
4212}
4213
4214static inline void gen_neon_addl(int size)
4215{
4216 switch (size) {
4217 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4218 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4219 case 2: tcg_gen_add_i64(CPU_V001); break;
4220 default: abort();
4221 }
4222}
4223
4224static inline void gen_neon_subl(int size)
4225{
4226 switch (size) {
4227 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4228 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4229 case 2: tcg_gen_sub_i64(CPU_V001); break;
4230 default: abort();
4231 }
4232}
4233
a7812ae4 4234static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4235{
4236 switch (size) {
4237 case 0: gen_helper_neon_negl_u16(var, var); break;
4238 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4239 case 2:
4240 tcg_gen_neg_i64(var, var);
4241 break;
ad69471c
PB
4242 default: abort();
4243 }
4244}
4245
a7812ae4 4246static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4247{
4248 switch (size) {
02da0b2d
PM
4249 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4250 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4251 default: abort();
4252 }
4253}
4254
a7812ae4 4255static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4256{
a7812ae4 4257 TCGv_i64 tmp;
ad69471c
PB
4258
4259 switch ((size << 1) | u) {
4260 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4261 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4262 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4263 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4264 case 4:
4265 tmp = gen_muls_i64_i32(a, b);
4266 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4267 tcg_temp_free_i64(tmp);
ad69471c
PB
4268 break;
4269 case 5:
4270 tmp = gen_mulu_i64_i32(a, b);
4271 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4272 tcg_temp_free_i64(tmp);
ad69471c
PB
4273 break;
4274 default: abort();
4275 }
c6067f04
CL
4276
4277 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4278 Don't forget to clean them now. */
4279 if (size < 2) {
7d1b0095
PM
4280 tcg_temp_free_i32(a);
4281 tcg_temp_free_i32(b);
c6067f04 4282 }
ad69471c
PB
4283}
4284
c33171c7
PM
4285static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4286{
4287 if (op) {
4288 if (u) {
4289 gen_neon_unarrow_sats(size, dest, src);
4290 } else {
4291 gen_neon_narrow(size, dest, src);
4292 }
4293 } else {
4294 if (u) {
4295 gen_neon_narrow_satu(size, dest, src);
4296 } else {
4297 gen_neon_narrow_sats(size, dest, src);
4298 }
4299 }
4300}
4301
62698be3
PM
4302/* Symbolic constants for op fields for Neon 3-register same-length.
4303 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4304 * table A7-9.
4305 */
4306#define NEON_3R_VHADD 0
4307#define NEON_3R_VQADD 1
4308#define NEON_3R_VRHADD 2
4309#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4310#define NEON_3R_VHSUB 4
4311#define NEON_3R_VQSUB 5
4312#define NEON_3R_VCGT 6
4313#define NEON_3R_VCGE 7
4314#define NEON_3R_VSHL 8
4315#define NEON_3R_VQSHL 9
4316#define NEON_3R_VRSHL 10
4317#define NEON_3R_VQRSHL 11
4318#define NEON_3R_VMAX 12
4319#define NEON_3R_VMIN 13
4320#define NEON_3R_VABD 14
4321#define NEON_3R_VABA 15
4322#define NEON_3R_VADD_VSUB 16
4323#define NEON_3R_VTST_VCEQ 17
4324#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4325#define NEON_3R_VMUL 19
4326#define NEON_3R_VPMAX 20
4327#define NEON_3R_VPMIN 21
4328#define NEON_3R_VQDMULH_VQRDMULH 22
4329#define NEON_3R_VPADD 23
da97f52c 4330#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4331#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4332#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4333#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4334#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4335#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4336#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4337
4338static const uint8_t neon_3r_sizes[] = {
4339 [NEON_3R_VHADD] = 0x7,
4340 [NEON_3R_VQADD] = 0xf,
4341 [NEON_3R_VRHADD] = 0x7,
4342 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4343 [NEON_3R_VHSUB] = 0x7,
4344 [NEON_3R_VQSUB] = 0xf,
4345 [NEON_3R_VCGT] = 0x7,
4346 [NEON_3R_VCGE] = 0x7,
4347 [NEON_3R_VSHL] = 0xf,
4348 [NEON_3R_VQSHL] = 0xf,
4349 [NEON_3R_VRSHL] = 0xf,
4350 [NEON_3R_VQRSHL] = 0xf,
4351 [NEON_3R_VMAX] = 0x7,
4352 [NEON_3R_VMIN] = 0x7,
4353 [NEON_3R_VABD] = 0x7,
4354 [NEON_3R_VABA] = 0x7,
4355 [NEON_3R_VADD_VSUB] = 0xf,
4356 [NEON_3R_VTST_VCEQ] = 0x7,
4357 [NEON_3R_VML] = 0x7,
4358 [NEON_3R_VMUL] = 0x7,
4359 [NEON_3R_VPMAX] = 0x7,
4360 [NEON_3R_VPMIN] = 0x7,
4361 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4362 [NEON_3R_VPADD] = 0x7,
da97f52c 4363 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4364 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4365 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4366 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4367 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4368 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4369 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4370};
4371
600b828c
PM
4372/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4373 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4374 * table A7-13.
4375 */
4376#define NEON_2RM_VREV64 0
4377#define NEON_2RM_VREV32 1
4378#define NEON_2RM_VREV16 2
4379#define NEON_2RM_VPADDL 4
4380#define NEON_2RM_VPADDL_U 5
4381#define NEON_2RM_VCLS 8
4382#define NEON_2RM_VCLZ 9
4383#define NEON_2RM_VCNT 10
4384#define NEON_2RM_VMVN 11
4385#define NEON_2RM_VPADAL 12
4386#define NEON_2RM_VPADAL_U 13
4387#define NEON_2RM_VQABS 14
4388#define NEON_2RM_VQNEG 15
4389#define NEON_2RM_VCGT0 16
4390#define NEON_2RM_VCGE0 17
4391#define NEON_2RM_VCEQ0 18
4392#define NEON_2RM_VCLE0 19
4393#define NEON_2RM_VCLT0 20
4394#define NEON_2RM_VABS 22
4395#define NEON_2RM_VNEG 23
4396#define NEON_2RM_VCGT0_F 24
4397#define NEON_2RM_VCGE0_F 25
4398#define NEON_2RM_VCEQ0_F 26
4399#define NEON_2RM_VCLE0_F 27
4400#define NEON_2RM_VCLT0_F 28
4401#define NEON_2RM_VABS_F 30
4402#define NEON_2RM_VNEG_F 31
4403#define NEON_2RM_VSWP 32
4404#define NEON_2RM_VTRN 33
4405#define NEON_2RM_VUZP 34
4406#define NEON_2RM_VZIP 35
4407#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4408#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4409#define NEON_2RM_VSHLL 38
4410#define NEON_2RM_VCVT_F16_F32 44
4411#define NEON_2RM_VCVT_F32_F16 46
4412#define NEON_2RM_VRECPE 56
4413#define NEON_2RM_VRSQRTE 57
4414#define NEON_2RM_VRECPE_F 58
4415#define NEON_2RM_VRSQRTE_F 59
4416#define NEON_2RM_VCVT_FS 60
4417#define NEON_2RM_VCVT_FU 61
4418#define NEON_2RM_VCVT_SF 62
4419#define NEON_2RM_VCVT_UF 63
4420
4421static int neon_2rm_is_float_op(int op)
4422{
4423 /* Return true if this neon 2reg-misc op is float-to-float */
4424 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4425 op >= NEON_2RM_VRECPE_F);
4426}
4427
4428/* Each entry in this array has bit n set if the insn allows
4429 * size value n (otherwise it will UNDEF). Since unallocated
4430 * op values will have no bits set they always UNDEF.
4431 */
4432static const uint8_t neon_2rm_sizes[] = {
4433 [NEON_2RM_VREV64] = 0x7,
4434 [NEON_2RM_VREV32] = 0x3,
4435 [NEON_2RM_VREV16] = 0x1,
4436 [NEON_2RM_VPADDL] = 0x7,
4437 [NEON_2RM_VPADDL_U] = 0x7,
4438 [NEON_2RM_VCLS] = 0x7,
4439 [NEON_2RM_VCLZ] = 0x7,
4440 [NEON_2RM_VCNT] = 0x1,
4441 [NEON_2RM_VMVN] = 0x1,
4442 [NEON_2RM_VPADAL] = 0x7,
4443 [NEON_2RM_VPADAL_U] = 0x7,
4444 [NEON_2RM_VQABS] = 0x7,
4445 [NEON_2RM_VQNEG] = 0x7,
4446 [NEON_2RM_VCGT0] = 0x7,
4447 [NEON_2RM_VCGE0] = 0x7,
4448 [NEON_2RM_VCEQ0] = 0x7,
4449 [NEON_2RM_VCLE0] = 0x7,
4450 [NEON_2RM_VCLT0] = 0x7,
4451 [NEON_2RM_VABS] = 0x7,
4452 [NEON_2RM_VNEG] = 0x7,
4453 [NEON_2RM_VCGT0_F] = 0x4,
4454 [NEON_2RM_VCGE0_F] = 0x4,
4455 [NEON_2RM_VCEQ0_F] = 0x4,
4456 [NEON_2RM_VCLE0_F] = 0x4,
4457 [NEON_2RM_VCLT0_F] = 0x4,
4458 [NEON_2RM_VABS_F] = 0x4,
4459 [NEON_2RM_VNEG_F] = 0x4,
4460 [NEON_2RM_VSWP] = 0x1,
4461 [NEON_2RM_VTRN] = 0x7,
4462 [NEON_2RM_VUZP] = 0x7,
4463 [NEON_2RM_VZIP] = 0x7,
4464 [NEON_2RM_VMOVN] = 0x7,
4465 [NEON_2RM_VQMOVN] = 0x7,
4466 [NEON_2RM_VSHLL] = 0x7,
4467 [NEON_2RM_VCVT_F16_F32] = 0x2,
4468 [NEON_2RM_VCVT_F32_F16] = 0x2,
4469 [NEON_2RM_VRECPE] = 0x4,
4470 [NEON_2RM_VRSQRTE] = 0x4,
4471 [NEON_2RM_VRECPE_F] = 0x4,
4472 [NEON_2RM_VRSQRTE_F] = 0x4,
4473 [NEON_2RM_VCVT_FS] = 0x4,
4474 [NEON_2RM_VCVT_FU] = 0x4,
4475 [NEON_2RM_VCVT_SF] = 0x4,
4476 [NEON_2RM_VCVT_UF] = 0x4,
4477};
4478
9ee6e8bb
PB
4479/* Translate a NEON data processing instruction. Return nonzero if the
4480 instruction is invalid.
ad69471c
PB
4481 We process data in a mixture of 32-bit and 64-bit chunks.
4482 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4483
0ecb72a5 4484static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4485{
4486 int op;
4487 int q;
4488 int rd, rn, rm;
4489 int size;
4490 int shift;
4491 int pass;
4492 int count;
4493 int pairwise;
4494 int u;
ca9a32e4 4495 uint32_t imm, mask;
b75263d6 4496 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4497 TCGv_i64 tmp64;
9ee6e8bb 4498
5df8bac1 4499 if (!s->vfp_enabled)
9ee6e8bb
PB
4500 return 1;
4501 q = (insn & (1 << 6)) != 0;
4502 u = (insn >> 24) & 1;
4503 VFP_DREG_D(rd, insn);
4504 VFP_DREG_N(rn, insn);
4505 VFP_DREG_M(rm, insn);
4506 size = (insn >> 20) & 3;
4507 if ((insn & (1 << 23)) == 0) {
4508 /* Three register same length. */
4509 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4510 /* Catch invalid op and bad size combinations: UNDEF */
4511 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4512 return 1;
4513 }
25f84f79
PM
4514 /* All insns of this form UNDEF for either this condition or the
4515 * superset of cases "Q==1"; we catch the latter later.
4516 */
4517 if (q && ((rd | rn | rm) & 1)) {
4518 return 1;
4519 }
62698be3
PM
4520 if (size == 3 && op != NEON_3R_LOGIC) {
4521 /* 64-bit element instructions. */
9ee6e8bb 4522 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4523 neon_load_reg64(cpu_V0, rn + pass);
4524 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4525 switch (op) {
62698be3 4526 case NEON_3R_VQADD:
9ee6e8bb 4527 if (u) {
02da0b2d
PM
4528 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4529 cpu_V0, cpu_V1);
2c0262af 4530 } else {
02da0b2d
PM
4531 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4532 cpu_V0, cpu_V1);
2c0262af 4533 }
9ee6e8bb 4534 break;
62698be3 4535 case NEON_3R_VQSUB:
9ee6e8bb 4536 if (u) {
02da0b2d
PM
4537 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4538 cpu_V0, cpu_V1);
ad69471c 4539 } else {
02da0b2d
PM
4540 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4541 cpu_V0, cpu_V1);
ad69471c
PB
4542 }
4543 break;
62698be3 4544 case NEON_3R_VSHL:
ad69471c
PB
4545 if (u) {
4546 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4547 } else {
4548 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4549 }
4550 break;
62698be3 4551 case NEON_3R_VQSHL:
ad69471c 4552 if (u) {
02da0b2d
PM
4553 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4554 cpu_V1, cpu_V0);
ad69471c 4555 } else {
02da0b2d
PM
4556 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4557 cpu_V1, cpu_V0);
ad69471c
PB
4558 }
4559 break;
62698be3 4560 case NEON_3R_VRSHL:
ad69471c
PB
4561 if (u) {
4562 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4563 } else {
ad69471c
PB
4564 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4565 }
4566 break;
62698be3 4567 case NEON_3R_VQRSHL:
ad69471c 4568 if (u) {
02da0b2d
PM
4569 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4570 cpu_V1, cpu_V0);
ad69471c 4571 } else {
02da0b2d
PM
4572 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4573 cpu_V1, cpu_V0);
1e8d4eec 4574 }
9ee6e8bb 4575 break;
62698be3 4576 case NEON_3R_VADD_VSUB:
9ee6e8bb 4577 if (u) {
ad69471c 4578 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4579 } else {
ad69471c 4580 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4581 }
4582 break;
4583 default:
4584 abort();
2c0262af 4585 }
ad69471c 4586 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4587 }
9ee6e8bb 4588 return 0;
2c0262af 4589 }
25f84f79 4590 pairwise = 0;
9ee6e8bb 4591 switch (op) {
62698be3
PM
4592 case NEON_3R_VSHL:
4593 case NEON_3R_VQSHL:
4594 case NEON_3R_VRSHL:
4595 case NEON_3R_VQRSHL:
9ee6e8bb 4596 {
ad69471c
PB
4597 int rtmp;
4598 /* Shift instruction operands are reversed. */
4599 rtmp = rn;
9ee6e8bb 4600 rn = rm;
ad69471c 4601 rm = rtmp;
9ee6e8bb 4602 }
2c0262af 4603 break;
25f84f79
PM
4604 case NEON_3R_VPADD:
4605 if (u) {
4606 return 1;
4607 }
4608 /* Fall through */
62698be3
PM
4609 case NEON_3R_VPMAX:
4610 case NEON_3R_VPMIN:
9ee6e8bb 4611 pairwise = 1;
2c0262af 4612 break;
25f84f79
PM
4613 case NEON_3R_FLOAT_ARITH:
4614 pairwise = (u && size < 2); /* if VPADD (float) */
4615 break;
4616 case NEON_3R_FLOAT_MINMAX:
4617 pairwise = u; /* if VPMIN/VPMAX (float) */
4618 break;
4619 case NEON_3R_FLOAT_CMP:
4620 if (!u && size) {
4621 /* no encoding for U=0 C=1x */
4622 return 1;
4623 }
4624 break;
4625 case NEON_3R_FLOAT_ACMP:
4626 if (!u) {
4627 return 1;
4628 }
4629 break;
4630 case NEON_3R_VRECPS_VRSQRTS:
4631 if (u) {
4632 return 1;
4633 }
2c0262af 4634 break;
25f84f79
PM
4635 case NEON_3R_VMUL:
4636 if (u && (size != 0)) {
4637 /* UNDEF on invalid size for polynomial subcase */
4638 return 1;
4639 }
2c0262af 4640 break;
da97f52c
PM
4641 case NEON_3R_VFM:
4642 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4643 return 1;
4644 }
4645 break;
9ee6e8bb 4646 default:
2c0262af 4647 break;
9ee6e8bb 4648 }
dd8fbd78 4649
25f84f79
PM
4650 if (pairwise && q) {
4651 /* All the pairwise insns UNDEF if Q is set */
4652 return 1;
4653 }
4654
9ee6e8bb
PB
4655 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4656
4657 if (pairwise) {
4658 /* Pairwise. */
a5a14945
JR
4659 if (pass < 1) {
4660 tmp = neon_load_reg(rn, 0);
4661 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4662 } else {
a5a14945
JR
4663 tmp = neon_load_reg(rm, 0);
4664 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4665 }
4666 } else {
4667 /* Elementwise. */
dd8fbd78
FN
4668 tmp = neon_load_reg(rn, pass);
4669 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4670 }
4671 switch (op) {
62698be3 4672 case NEON_3R_VHADD:
9ee6e8bb
PB
4673 GEN_NEON_INTEGER_OP(hadd);
4674 break;
62698be3 4675 case NEON_3R_VQADD:
02da0b2d 4676 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4677 break;
62698be3 4678 case NEON_3R_VRHADD:
9ee6e8bb 4679 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4680 break;
62698be3 4681 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4682 switch ((u << 2) | size) {
4683 case 0: /* VAND */
dd8fbd78 4684 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4685 break;
4686 case 1: /* BIC */
f669df27 4687 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4688 break;
4689 case 2: /* VORR */
dd8fbd78 4690 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4691 break;
4692 case 3: /* VORN */
f669df27 4693 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4694 break;
4695 case 4: /* VEOR */
dd8fbd78 4696 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4697 break;
4698 case 5: /* VBSL */
dd8fbd78
FN
4699 tmp3 = neon_load_reg(rd, pass);
4700 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4701 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4702 break;
4703 case 6: /* VBIT */
dd8fbd78
FN
4704 tmp3 = neon_load_reg(rd, pass);
4705 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4706 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4707 break;
4708 case 7: /* VBIF */
dd8fbd78
FN
4709 tmp3 = neon_load_reg(rd, pass);
4710 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4711 tcg_temp_free_i32(tmp3);
9ee6e8bb 4712 break;
2c0262af
FB
4713 }
4714 break;
62698be3 4715 case NEON_3R_VHSUB:
9ee6e8bb
PB
4716 GEN_NEON_INTEGER_OP(hsub);
4717 break;
62698be3 4718 case NEON_3R_VQSUB:
02da0b2d 4719 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4720 break;
62698be3 4721 case NEON_3R_VCGT:
9ee6e8bb
PB
4722 GEN_NEON_INTEGER_OP(cgt);
4723 break;
62698be3 4724 case NEON_3R_VCGE:
9ee6e8bb
PB
4725 GEN_NEON_INTEGER_OP(cge);
4726 break;
62698be3 4727 case NEON_3R_VSHL:
ad69471c 4728 GEN_NEON_INTEGER_OP(shl);
2c0262af 4729 break;
62698be3 4730 case NEON_3R_VQSHL:
02da0b2d 4731 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4732 break;
62698be3 4733 case NEON_3R_VRSHL:
ad69471c 4734 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4735 break;
62698be3 4736 case NEON_3R_VQRSHL:
02da0b2d 4737 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4738 break;
62698be3 4739 case NEON_3R_VMAX:
9ee6e8bb
PB
4740 GEN_NEON_INTEGER_OP(max);
4741 break;
62698be3 4742 case NEON_3R_VMIN:
9ee6e8bb
PB
4743 GEN_NEON_INTEGER_OP(min);
4744 break;
62698be3 4745 case NEON_3R_VABD:
9ee6e8bb
PB
4746 GEN_NEON_INTEGER_OP(abd);
4747 break;
62698be3 4748 case NEON_3R_VABA:
9ee6e8bb 4749 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4750 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4751 tmp2 = neon_load_reg(rd, pass);
4752 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4753 break;
62698be3 4754 case NEON_3R_VADD_VSUB:
9ee6e8bb 4755 if (!u) { /* VADD */
62698be3 4756 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4757 } else { /* VSUB */
4758 switch (size) {
dd8fbd78
FN
4759 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4760 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4761 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4762 default: abort();
9ee6e8bb
PB
4763 }
4764 }
4765 break;
62698be3 4766 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4767 if (!u) { /* VTST */
4768 switch (size) {
dd8fbd78
FN
4769 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4770 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4771 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4772 default: abort();
9ee6e8bb
PB
4773 }
4774 } else { /* VCEQ */
4775 switch (size) {
dd8fbd78
FN
4776 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4777 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4778 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4779 default: abort();
9ee6e8bb
PB
4780 }
4781 }
4782 break;
62698be3 4783 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4784 switch (size) {
dd8fbd78
FN
4785 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4786 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4787 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4788 default: abort();
9ee6e8bb 4789 }
7d1b0095 4790 tcg_temp_free_i32(tmp2);
dd8fbd78 4791 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4792 if (u) { /* VMLS */
dd8fbd78 4793 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4794 } else { /* VMLA */
dd8fbd78 4795 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4796 }
4797 break;
62698be3 4798 case NEON_3R_VMUL:
9ee6e8bb 4799 if (u) { /* polynomial */
dd8fbd78 4800 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4801 } else { /* Integer */
4802 switch (size) {
dd8fbd78
FN
4803 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4804 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4805 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4806 default: abort();
9ee6e8bb
PB
4807 }
4808 }
4809 break;
62698be3 4810 case NEON_3R_VPMAX:
9ee6e8bb
PB
4811 GEN_NEON_INTEGER_OP(pmax);
4812 break;
62698be3 4813 case NEON_3R_VPMIN:
9ee6e8bb
PB
4814 GEN_NEON_INTEGER_OP(pmin);
4815 break;
62698be3 4816 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4817 if (!u) { /* VQDMULH */
4818 switch (size) {
02da0b2d
PM
4819 case 1:
4820 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4821 break;
4822 case 2:
4823 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4824 break;
62698be3 4825 default: abort();
9ee6e8bb 4826 }
62698be3 4827 } else { /* VQRDMULH */
9ee6e8bb 4828 switch (size) {
02da0b2d
PM
4829 case 1:
4830 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4831 break;
4832 case 2:
4833 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4834 break;
62698be3 4835 default: abort();
9ee6e8bb
PB
4836 }
4837 }
4838 break;
62698be3 4839 case NEON_3R_VPADD:
9ee6e8bb 4840 switch (size) {
dd8fbd78
FN
4841 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4842 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4843 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4844 default: abort();
9ee6e8bb
PB
4845 }
4846 break;
62698be3 4847 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4848 {
4849 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4850 switch ((u << 2) | size) {
4851 case 0: /* VADD */
aa47cfdd
PM
4852 case 4: /* VPADD */
4853 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4854 break;
4855 case 2: /* VSUB */
aa47cfdd 4856 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4857 break;
4858 case 6: /* VABD */
aa47cfdd 4859 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4860 break;
4861 default:
62698be3 4862 abort();
9ee6e8bb 4863 }
aa47cfdd 4864 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4865 break;
aa47cfdd 4866 }
62698be3 4867 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4868 {
4869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4870 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4871 if (!u) {
7d1b0095 4872 tcg_temp_free_i32(tmp2);
dd8fbd78 4873 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4874 if (size == 0) {
aa47cfdd 4875 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4876 } else {
aa47cfdd 4877 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4878 }
4879 }
aa47cfdd 4880 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4881 break;
aa47cfdd 4882 }
62698be3 4883 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4884 {
4885 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4886 if (!u) {
aa47cfdd 4887 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4888 } else {
aa47cfdd
PM
4889 if (size == 0) {
4890 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4891 } else {
4892 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4893 }
b5ff1b31 4894 }
aa47cfdd 4895 tcg_temp_free_ptr(fpstatus);
2c0262af 4896 break;
aa47cfdd 4897 }
62698be3 4898 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4899 {
4900 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4901 if (size == 0) {
4902 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4903 } else {
4904 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4905 }
4906 tcg_temp_free_ptr(fpstatus);
2c0262af 4907 break;
aa47cfdd 4908 }
62698be3 4909 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4910 {
4911 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4912 if (size == 0) {
4913 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4914 } else {
4915 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4916 }
4917 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4918 break;
aa47cfdd 4919 }
62698be3 4920 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4921 if (size == 0)
dd8fbd78 4922 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4923 else
dd8fbd78 4924 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4925 break;
da97f52c
PM
4926 case NEON_3R_VFM:
4927 {
4928 /* VFMA, VFMS: fused multiply-add */
4929 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4930 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4931 if (size) {
4932 /* VFMS */
4933 gen_helper_vfp_negs(tmp, tmp);
4934 }
4935 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4936 tcg_temp_free_i32(tmp3);
4937 tcg_temp_free_ptr(fpstatus);
4938 break;
4939 }
9ee6e8bb
PB
4940 default:
4941 abort();
2c0262af 4942 }
7d1b0095 4943 tcg_temp_free_i32(tmp2);
dd8fbd78 4944
9ee6e8bb
PB
4945 /* Save the result. For elementwise operations we can put it
4946 straight into the destination register. For pairwise operations
4947 we have to be careful to avoid clobbering the source operands. */
4948 if (pairwise && rd == rm) {
dd8fbd78 4949 neon_store_scratch(pass, tmp);
9ee6e8bb 4950 } else {
dd8fbd78 4951 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4952 }
4953
4954 } /* for pass */
4955 if (pairwise && rd == rm) {
4956 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4957 tmp = neon_load_scratch(pass);
4958 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4959 }
4960 }
ad69471c 4961 /* End of 3 register same size operations. */
9ee6e8bb
PB
4962 } else if (insn & (1 << 4)) {
4963 if ((insn & 0x00380080) != 0) {
4964 /* Two registers and shift. */
4965 op = (insn >> 8) & 0xf;
4966 if (insn & (1 << 7)) {
cc13115b
PM
4967 /* 64-bit shift. */
4968 if (op > 7) {
4969 return 1;
4970 }
9ee6e8bb
PB
4971 size = 3;
4972 } else {
4973 size = 2;
4974 while ((insn & (1 << (size + 19))) == 0)
4975 size--;
4976 }
4977 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4978 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4979 by immediate using the variable shift operations. */
4980 if (op < 8) {
4981 /* Shift by immediate:
4982 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4983 if (q && ((rd | rm) & 1)) {
4984 return 1;
4985 }
4986 if (!u && (op == 4 || op == 6)) {
4987 return 1;
4988 }
9ee6e8bb
PB
4989 /* Right shifts are encoded as N - shift, where N is the
4990 element size in bits. */
4991 if (op <= 4)
4992 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4993 if (size == 3) {
4994 count = q + 1;
4995 } else {
4996 count = q ? 4: 2;
4997 }
4998 switch (size) {
4999 case 0:
5000 imm = (uint8_t) shift;
5001 imm |= imm << 8;
5002 imm |= imm << 16;
5003 break;
5004 case 1:
5005 imm = (uint16_t) shift;
5006 imm |= imm << 16;
5007 break;
5008 case 2:
5009 case 3:
5010 imm = shift;
5011 break;
5012 default:
5013 abort();
5014 }
5015
5016 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5017 if (size == 3) {
5018 neon_load_reg64(cpu_V0, rm + pass);
5019 tcg_gen_movi_i64(cpu_V1, imm);
5020 switch (op) {
5021 case 0: /* VSHR */
5022 case 1: /* VSRA */
5023 if (u)
5024 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5025 else
ad69471c 5026 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5027 break;
ad69471c
PB
5028 case 2: /* VRSHR */
5029 case 3: /* VRSRA */
5030 if (u)
5031 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5032 else
ad69471c 5033 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5034 break;
ad69471c 5035 case 4: /* VSRI */
ad69471c
PB
5036 case 5: /* VSHL, VSLI */
5037 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5038 break;
0322b26e 5039 case 6: /* VQSHLU */
02da0b2d
PM
5040 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5041 cpu_V0, cpu_V1);
ad69471c 5042 break;
0322b26e
PM
5043 case 7: /* VQSHL */
5044 if (u) {
02da0b2d 5045 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5046 cpu_V0, cpu_V1);
5047 } else {
02da0b2d 5048 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5049 cpu_V0, cpu_V1);
5050 }
9ee6e8bb 5051 break;
9ee6e8bb 5052 }
ad69471c
PB
5053 if (op == 1 || op == 3) {
5054 /* Accumulate. */
5371cb81 5055 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5056 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5057 } else if (op == 4 || (op == 5 && u)) {
5058 /* Insert */
923e6509
CL
5059 neon_load_reg64(cpu_V1, rd + pass);
5060 uint64_t mask;
5061 if (shift < -63 || shift > 63) {
5062 mask = 0;
5063 } else {
5064 if (op == 4) {
5065 mask = 0xffffffffffffffffull >> -shift;
5066 } else {
5067 mask = 0xffffffffffffffffull << shift;
5068 }
5069 }
5070 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5071 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5072 }
5073 neon_store_reg64(cpu_V0, rd + pass);
5074 } else { /* size < 3 */
5075 /* Operands in T0 and T1. */
dd8fbd78 5076 tmp = neon_load_reg(rm, pass);
7d1b0095 5077 tmp2 = tcg_temp_new_i32();
dd8fbd78 5078 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5079 switch (op) {
5080 case 0: /* VSHR */
5081 case 1: /* VSRA */
5082 GEN_NEON_INTEGER_OP(shl);
5083 break;
5084 case 2: /* VRSHR */
5085 case 3: /* VRSRA */
5086 GEN_NEON_INTEGER_OP(rshl);
5087 break;
5088 case 4: /* VSRI */
ad69471c
PB
5089 case 5: /* VSHL, VSLI */
5090 switch (size) {
dd8fbd78
FN
5091 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5092 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5093 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5094 default: abort();
ad69471c
PB
5095 }
5096 break;
0322b26e 5097 case 6: /* VQSHLU */
ad69471c 5098 switch (size) {
0322b26e 5099 case 0:
02da0b2d
PM
5100 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5101 tmp, tmp2);
0322b26e
PM
5102 break;
5103 case 1:
02da0b2d
PM
5104 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5105 tmp, tmp2);
0322b26e
PM
5106 break;
5107 case 2:
02da0b2d
PM
5108 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5109 tmp, tmp2);
0322b26e
PM
5110 break;
5111 default:
cc13115b 5112 abort();
ad69471c
PB
5113 }
5114 break;
0322b26e 5115 case 7: /* VQSHL */
02da0b2d 5116 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5117 break;
ad69471c 5118 }
7d1b0095 5119 tcg_temp_free_i32(tmp2);
ad69471c
PB
5120
5121 if (op == 1 || op == 3) {
5122 /* Accumulate. */
dd8fbd78 5123 tmp2 = neon_load_reg(rd, pass);
5371cb81 5124 gen_neon_add(size, tmp, tmp2);
7d1b0095 5125 tcg_temp_free_i32(tmp2);
ad69471c
PB
5126 } else if (op == 4 || (op == 5 && u)) {
5127 /* Insert */
5128 switch (size) {
5129 case 0:
5130 if (op == 4)
ca9a32e4 5131 mask = 0xff >> -shift;
ad69471c 5132 else
ca9a32e4
JR
5133 mask = (uint8_t)(0xff << shift);
5134 mask |= mask << 8;
5135 mask |= mask << 16;
ad69471c
PB
5136 break;
5137 case 1:
5138 if (op == 4)
ca9a32e4 5139 mask = 0xffff >> -shift;
ad69471c 5140 else
ca9a32e4
JR
5141 mask = (uint16_t)(0xffff << shift);
5142 mask |= mask << 16;
ad69471c
PB
5143 break;
5144 case 2:
ca9a32e4
JR
5145 if (shift < -31 || shift > 31) {
5146 mask = 0;
5147 } else {
5148 if (op == 4)
5149 mask = 0xffffffffu >> -shift;
5150 else
5151 mask = 0xffffffffu << shift;
5152 }
ad69471c
PB
5153 break;
5154 default:
5155 abort();
5156 }
dd8fbd78 5157 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5158 tcg_gen_andi_i32(tmp, tmp, mask);
5159 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5160 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5161 tcg_temp_free_i32(tmp2);
ad69471c 5162 }
dd8fbd78 5163 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5164 }
5165 } /* for pass */
5166 } else if (op < 10) {
ad69471c 5167 /* Shift by immediate and narrow:
9ee6e8bb 5168 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5169 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5170 if (rm & 1) {
5171 return 1;
5172 }
9ee6e8bb
PB
5173 shift = shift - (1 << (size + 3));
5174 size++;
92cdfaeb 5175 if (size == 3) {
a7812ae4 5176 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5177 neon_load_reg64(cpu_V0, rm);
5178 neon_load_reg64(cpu_V1, rm + 1);
5179 for (pass = 0; pass < 2; pass++) {
5180 TCGv_i64 in;
5181 if (pass == 0) {
5182 in = cpu_V0;
5183 } else {
5184 in = cpu_V1;
5185 }
ad69471c 5186 if (q) {
0b36f4cd 5187 if (input_unsigned) {
92cdfaeb 5188 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5189 } else {
92cdfaeb 5190 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5191 }
ad69471c 5192 } else {
0b36f4cd 5193 if (input_unsigned) {
92cdfaeb 5194 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5195 } else {
92cdfaeb 5196 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5197 }
ad69471c 5198 }
7d1b0095 5199 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5200 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5201 neon_store_reg(rd, pass, tmp);
5202 } /* for pass */
5203 tcg_temp_free_i64(tmp64);
5204 } else {
5205 if (size == 1) {
5206 imm = (uint16_t)shift;
5207 imm |= imm << 16;
2c0262af 5208 } else {
92cdfaeb
PM
5209 /* size == 2 */
5210 imm = (uint32_t)shift;
5211 }
5212 tmp2 = tcg_const_i32(imm);
5213 tmp4 = neon_load_reg(rm + 1, 0);
5214 tmp5 = neon_load_reg(rm + 1, 1);
5215 for (pass = 0; pass < 2; pass++) {
5216 if (pass == 0) {
5217 tmp = neon_load_reg(rm, 0);
5218 } else {
5219 tmp = tmp4;
5220 }
0b36f4cd
CL
5221 gen_neon_shift_narrow(size, tmp, tmp2, q,
5222 input_unsigned);
92cdfaeb
PM
5223 if (pass == 0) {
5224 tmp3 = neon_load_reg(rm, 1);
5225 } else {
5226 tmp3 = tmp5;
5227 }
0b36f4cd
CL
5228 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5229 input_unsigned);
36aa55dc 5230 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5231 tcg_temp_free_i32(tmp);
5232 tcg_temp_free_i32(tmp3);
5233 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5234 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5235 neon_store_reg(rd, pass, tmp);
5236 } /* for pass */
c6067f04 5237 tcg_temp_free_i32(tmp2);
b75263d6 5238 }
9ee6e8bb 5239 } else if (op == 10) {
cc13115b
PM
5240 /* VSHLL, VMOVL */
5241 if (q || (rd & 1)) {
9ee6e8bb 5242 return 1;
cc13115b 5243 }
ad69471c
PB
5244 tmp = neon_load_reg(rm, 0);
5245 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5246 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5247 if (pass == 1)
5248 tmp = tmp2;
5249
5250 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5251
9ee6e8bb
PB
5252 if (shift != 0) {
5253 /* The shift is less than the width of the source
ad69471c
PB
5254 type, so we can just shift the whole register. */
5255 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5256 /* Widen the result of shift: we need to clear
5257 * the potential overflow bits resulting from
5258 * left bits of the narrow input appearing as
5259 * right bits of left the neighbour narrow
5260 * input. */
ad69471c
PB
5261 if (size < 2 || !u) {
5262 uint64_t imm64;
5263 if (size == 0) {
5264 imm = (0xffu >> (8 - shift));
5265 imm |= imm << 16;
acdf01ef 5266 } else if (size == 1) {
ad69471c 5267 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5268 } else {
5269 /* size == 2 */
5270 imm = 0xffffffff >> (32 - shift);
5271 }
5272 if (size < 2) {
5273 imm64 = imm | (((uint64_t)imm) << 32);
5274 } else {
5275 imm64 = imm;
9ee6e8bb 5276 }
acdf01ef 5277 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5278 }
5279 }
ad69471c 5280 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5281 }
f73534a5 5282 } else if (op >= 14) {
9ee6e8bb 5283 /* VCVT fixed-point. */
cc13115b
PM
5284 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5285 return 1;
5286 }
f73534a5
PM
5287 /* We have already masked out the must-be-1 top bit of imm6,
5288 * hence this 32-shift where the ARM ARM has 64-imm6.
5289 */
5290 shift = 32 - shift;
9ee6e8bb 5291 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5292 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5293 if (!(op & 1)) {
9ee6e8bb 5294 if (u)
5500b06c 5295 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5296 else
5500b06c 5297 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5298 } else {
5299 if (u)
5500b06c 5300 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5301 else
5500b06c 5302 gen_vfp_tosl(0, shift, 1);
2c0262af 5303 }
4373f3ce 5304 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5305 }
5306 } else {
9ee6e8bb
PB
5307 return 1;
5308 }
5309 } else { /* (insn & 0x00380080) == 0 */
5310 int invert;
7d80fee5
PM
5311 if (q && (rd & 1)) {
5312 return 1;
5313 }
9ee6e8bb
PB
5314
5315 op = (insn >> 8) & 0xf;
5316 /* One register and immediate. */
5317 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5318 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5319 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5320 * We choose to not special-case this and will behave as if a
5321 * valid constant encoding of 0 had been given.
5322 */
9ee6e8bb
PB
5323 switch (op) {
5324 case 0: case 1:
5325 /* no-op */
5326 break;
5327 case 2: case 3:
5328 imm <<= 8;
5329 break;
5330 case 4: case 5:
5331 imm <<= 16;
5332 break;
5333 case 6: case 7:
5334 imm <<= 24;
5335 break;
5336 case 8: case 9:
5337 imm |= imm << 16;
5338 break;
5339 case 10: case 11:
5340 imm = (imm << 8) | (imm << 24);
5341 break;
5342 case 12:
8e31209e 5343 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5344 break;
5345 case 13:
5346 imm = (imm << 16) | 0xffff;
5347 break;
5348 case 14:
5349 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5350 if (invert)
5351 imm = ~imm;
5352 break;
5353 case 15:
7d80fee5
PM
5354 if (invert) {
5355 return 1;
5356 }
9ee6e8bb
PB
5357 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5358 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5359 break;
5360 }
5361 if (invert)
5362 imm = ~imm;
5363
9ee6e8bb
PB
5364 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5365 if (op & 1 && op < 12) {
ad69471c 5366 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5367 if (invert) {
5368 /* The immediate value has already been inverted, so
5369 BIC becomes AND. */
ad69471c 5370 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5371 } else {
ad69471c 5372 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5373 }
9ee6e8bb 5374 } else {
ad69471c 5375 /* VMOV, VMVN. */
7d1b0095 5376 tmp = tcg_temp_new_i32();
9ee6e8bb 5377 if (op == 14 && invert) {
a5a14945 5378 int n;
ad69471c
PB
5379 uint32_t val;
5380 val = 0;
9ee6e8bb
PB
5381 for (n = 0; n < 4; n++) {
5382 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5383 val |= 0xff << (n * 8);
9ee6e8bb 5384 }
ad69471c
PB
5385 tcg_gen_movi_i32(tmp, val);
5386 } else {
5387 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5388 }
9ee6e8bb 5389 }
ad69471c 5390 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5391 }
5392 }
e4b3861d 5393 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5394 if (size != 3) {
5395 op = (insn >> 8) & 0xf;
5396 if ((insn & (1 << 6)) == 0) {
5397 /* Three registers of different lengths. */
5398 int src1_wide;
5399 int src2_wide;
5400 int prewiden;
695272dc
PM
5401 /* undefreq: bit 0 : UNDEF if size != 0
5402 * bit 1 : UNDEF if size == 0
5403 * bit 2 : UNDEF if U == 1
5404 * Note that [1:0] set implies 'always UNDEF'
5405 */
5406 int undefreq;
5407 /* prewiden, src1_wide, src2_wide, undefreq */
5408 static const int neon_3reg_wide[16][4] = {
5409 {1, 0, 0, 0}, /* VADDL */
5410 {1, 1, 0, 0}, /* VADDW */
5411 {1, 0, 0, 0}, /* VSUBL */
5412 {1, 1, 0, 0}, /* VSUBW */
5413 {0, 1, 1, 0}, /* VADDHN */
5414 {0, 0, 0, 0}, /* VABAL */
5415 {0, 1, 1, 0}, /* VSUBHN */
5416 {0, 0, 0, 0}, /* VABDL */
5417 {0, 0, 0, 0}, /* VMLAL */
5418 {0, 0, 0, 6}, /* VQDMLAL */
5419 {0, 0, 0, 0}, /* VMLSL */
5420 {0, 0, 0, 6}, /* VQDMLSL */
5421 {0, 0, 0, 0}, /* Integer VMULL */
5422 {0, 0, 0, 2}, /* VQDMULL */
5423 {0, 0, 0, 5}, /* Polynomial VMULL */
5424 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5425 };
5426
5427 prewiden = neon_3reg_wide[op][0];
5428 src1_wide = neon_3reg_wide[op][1];
5429 src2_wide = neon_3reg_wide[op][2];
695272dc 5430 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5431
695272dc
PM
5432 if (((undefreq & 1) && (size != 0)) ||
5433 ((undefreq & 2) && (size == 0)) ||
5434 ((undefreq & 4) && u)) {
5435 return 1;
5436 }
5437 if ((src1_wide && (rn & 1)) ||
5438 (src2_wide && (rm & 1)) ||
5439 (!src2_wide && (rd & 1))) {
ad69471c 5440 return 1;
695272dc 5441 }
ad69471c 5442
9ee6e8bb
PB
5443 /* Avoid overlapping operands. Wide source operands are
5444 always aligned so will never overlap with wide
5445 destinations in problematic ways. */
8f8e3aa4 5446 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5447 tmp = neon_load_reg(rm, 1);
5448 neon_store_scratch(2, tmp);
8f8e3aa4 5449 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5450 tmp = neon_load_reg(rn, 1);
5451 neon_store_scratch(2, tmp);
9ee6e8bb 5452 }
a50f5b91 5453 TCGV_UNUSED(tmp3);
9ee6e8bb 5454 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5455 if (src1_wide) {
5456 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5457 TCGV_UNUSED(tmp);
9ee6e8bb 5458 } else {
ad69471c 5459 if (pass == 1 && rd == rn) {
dd8fbd78 5460 tmp = neon_load_scratch(2);
9ee6e8bb 5461 } else {
ad69471c
PB
5462 tmp = neon_load_reg(rn, pass);
5463 }
5464 if (prewiden) {
5465 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5466 }
5467 }
ad69471c
PB
5468 if (src2_wide) {
5469 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5470 TCGV_UNUSED(tmp2);
9ee6e8bb 5471 } else {
ad69471c 5472 if (pass == 1 && rd == rm) {
dd8fbd78 5473 tmp2 = neon_load_scratch(2);
9ee6e8bb 5474 } else {
ad69471c
PB
5475 tmp2 = neon_load_reg(rm, pass);
5476 }
5477 if (prewiden) {
5478 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5479 }
9ee6e8bb
PB
5480 }
5481 switch (op) {
5482 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5483 gen_neon_addl(size);
9ee6e8bb 5484 break;
79b0e534 5485 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5486 gen_neon_subl(size);
9ee6e8bb
PB
5487 break;
5488 case 5: case 7: /* VABAL, VABDL */
5489 switch ((size << 1) | u) {
ad69471c
PB
5490 case 0:
5491 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5492 break;
5493 case 1:
5494 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5495 break;
5496 case 2:
5497 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5498 break;
5499 case 3:
5500 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5501 break;
5502 case 4:
5503 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5504 break;
5505 case 5:
5506 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5507 break;
9ee6e8bb
PB
5508 default: abort();
5509 }
7d1b0095
PM
5510 tcg_temp_free_i32(tmp2);
5511 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5512 break;
5513 case 8: case 9: case 10: case 11: case 12: case 13:
5514 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5515 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5516 break;
5517 case 14: /* Polynomial VMULL */
e5ca24cb 5518 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5519 tcg_temp_free_i32(tmp2);
5520 tcg_temp_free_i32(tmp);
e5ca24cb 5521 break;
695272dc
PM
5522 default: /* 15 is RESERVED: caught earlier */
5523 abort();
9ee6e8bb 5524 }
ebcd88ce
PM
5525 if (op == 13) {
5526 /* VQDMULL */
5527 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5528 neon_store_reg64(cpu_V0, rd + pass);
5529 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5530 /* Accumulate. */
ebcd88ce 5531 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5532 switch (op) {
4dc064e6
PM
5533 case 10: /* VMLSL */
5534 gen_neon_negl(cpu_V0, size);
5535 /* Fall through */
5536 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5537 gen_neon_addl(size);
9ee6e8bb
PB
5538 break;
5539 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5540 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5541 if (op == 11) {
5542 gen_neon_negl(cpu_V0, size);
5543 }
ad69471c
PB
5544 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5545 break;
9ee6e8bb
PB
5546 default:
5547 abort();
5548 }
ad69471c 5549 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5550 } else if (op == 4 || op == 6) {
5551 /* Narrowing operation. */
7d1b0095 5552 tmp = tcg_temp_new_i32();
79b0e534 5553 if (!u) {
9ee6e8bb 5554 switch (size) {
ad69471c
PB
5555 case 0:
5556 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5557 break;
5558 case 1:
5559 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5560 break;
5561 case 2:
5562 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5563 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5564 break;
9ee6e8bb
PB
5565 default: abort();
5566 }
5567 } else {
5568 switch (size) {
ad69471c
PB
5569 case 0:
5570 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5571 break;
5572 case 1:
5573 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5574 break;
5575 case 2:
5576 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5577 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5578 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5579 break;
9ee6e8bb
PB
5580 default: abort();
5581 }
5582 }
ad69471c
PB
5583 if (pass == 0) {
5584 tmp3 = tmp;
5585 } else {
5586 neon_store_reg(rd, 0, tmp3);
5587 neon_store_reg(rd, 1, tmp);
5588 }
9ee6e8bb
PB
5589 } else {
5590 /* Write back the result. */
ad69471c 5591 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5592 }
5593 }
5594 } else {
3e3326df
PM
5595 /* Two registers and a scalar. NB that for ops of this form
5596 * the ARM ARM labels bit 24 as Q, but it is in our variable
5597 * 'u', not 'q'.
5598 */
5599 if (size == 0) {
5600 return 1;
5601 }
9ee6e8bb 5602 switch (op) {
9ee6e8bb 5603 case 1: /* Float VMLA scalar */
9ee6e8bb 5604 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5605 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5606 if (size == 1) {
5607 return 1;
5608 }
5609 /* fall through */
5610 case 0: /* Integer VMLA scalar */
5611 case 4: /* Integer VMLS scalar */
5612 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5613 case 12: /* VQDMULH scalar */
5614 case 13: /* VQRDMULH scalar */
3e3326df
PM
5615 if (u && ((rd | rn) & 1)) {
5616 return 1;
5617 }
dd8fbd78
FN
5618 tmp = neon_get_scalar(size, rm);
5619 neon_store_scratch(0, tmp);
9ee6e8bb 5620 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5621 tmp = neon_load_scratch(0);
5622 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5623 if (op == 12) {
5624 if (size == 1) {
02da0b2d 5625 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5626 } else {
02da0b2d 5627 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5628 }
5629 } else if (op == 13) {
5630 if (size == 1) {
02da0b2d 5631 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5632 } else {
02da0b2d 5633 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5634 }
5635 } else if (op & 1) {
aa47cfdd
PM
5636 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5637 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5638 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5639 } else {
5640 switch (size) {
dd8fbd78
FN
5641 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5642 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5643 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5644 default: abort();
9ee6e8bb
PB
5645 }
5646 }
7d1b0095 5647 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5648 if (op < 8) {
5649 /* Accumulate. */
dd8fbd78 5650 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5651 switch (op) {
5652 case 0:
dd8fbd78 5653 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5654 break;
5655 case 1:
aa47cfdd
PM
5656 {
5657 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5658 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5659 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5660 break;
aa47cfdd 5661 }
9ee6e8bb 5662 case 4:
dd8fbd78 5663 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5664 break;
5665 case 5:
aa47cfdd
PM
5666 {
5667 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5668 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5669 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5670 break;
aa47cfdd 5671 }
9ee6e8bb
PB
5672 default:
5673 abort();
5674 }
7d1b0095 5675 tcg_temp_free_i32(tmp2);
9ee6e8bb 5676 }
dd8fbd78 5677 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5678 }
5679 break;
9ee6e8bb 5680 case 3: /* VQDMLAL scalar */
9ee6e8bb 5681 case 7: /* VQDMLSL scalar */
9ee6e8bb 5682 case 11: /* VQDMULL scalar */
3e3326df 5683 if (u == 1) {
ad69471c 5684 return 1;
3e3326df
PM
5685 }
5686 /* fall through */
5687 case 2: /* VMLAL sclar */
5688 case 6: /* VMLSL scalar */
5689 case 10: /* VMULL scalar */
5690 if (rd & 1) {
5691 return 1;
5692 }
dd8fbd78 5693 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5694 /* We need a copy of tmp2 because gen_neon_mull
5695 * deletes it during pass 0. */
7d1b0095 5696 tmp4 = tcg_temp_new_i32();
c6067f04 5697 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5698 tmp3 = neon_load_reg(rn, 1);
ad69471c 5699
9ee6e8bb 5700 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5701 if (pass == 0) {
5702 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5703 } else {
dd8fbd78 5704 tmp = tmp3;
c6067f04 5705 tmp2 = tmp4;
9ee6e8bb 5706 }
ad69471c 5707 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5708 if (op != 11) {
5709 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5710 }
9ee6e8bb 5711 switch (op) {
4dc064e6
PM
5712 case 6:
5713 gen_neon_negl(cpu_V0, size);
5714 /* Fall through */
5715 case 2:
ad69471c 5716 gen_neon_addl(size);
9ee6e8bb
PB
5717 break;
5718 case 3: case 7:
ad69471c 5719 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5720 if (op == 7) {
5721 gen_neon_negl(cpu_V0, size);
5722 }
ad69471c 5723 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5724 break;
5725 case 10:
5726 /* no-op */
5727 break;
5728 case 11:
ad69471c 5729 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5730 break;
5731 default:
5732 abort();
5733 }
ad69471c 5734 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5735 }
dd8fbd78 5736
dd8fbd78 5737
9ee6e8bb
PB
5738 break;
5739 default: /* 14 and 15 are RESERVED */
5740 return 1;
5741 }
5742 }
5743 } else { /* size == 3 */
5744 if (!u) {
5745 /* Extract. */
9ee6e8bb 5746 imm = (insn >> 8) & 0xf;
ad69471c
PB
5747
5748 if (imm > 7 && !q)
5749 return 1;
5750
52579ea1
PM
5751 if (q && ((rd | rn | rm) & 1)) {
5752 return 1;
5753 }
5754
ad69471c
PB
5755 if (imm == 0) {
5756 neon_load_reg64(cpu_V0, rn);
5757 if (q) {
5758 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5759 }
ad69471c
PB
5760 } else if (imm == 8) {
5761 neon_load_reg64(cpu_V0, rn + 1);
5762 if (q) {
5763 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5764 }
ad69471c 5765 } else if (q) {
a7812ae4 5766 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5767 if (imm < 8) {
5768 neon_load_reg64(cpu_V0, rn);
a7812ae4 5769 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5770 } else {
5771 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5772 neon_load_reg64(tmp64, rm);
ad69471c
PB
5773 }
5774 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5775 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5776 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5777 if (imm < 8) {
5778 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5779 } else {
ad69471c
PB
5780 neon_load_reg64(cpu_V1, rm + 1);
5781 imm -= 8;
9ee6e8bb 5782 }
ad69471c 5783 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5784 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5785 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5786 tcg_temp_free_i64(tmp64);
ad69471c 5787 } else {
a7812ae4 5788 /* BUGFIX */
ad69471c 5789 neon_load_reg64(cpu_V0, rn);
a7812ae4 5790 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5791 neon_load_reg64(cpu_V1, rm);
a7812ae4 5792 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5793 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5794 }
5795 neon_store_reg64(cpu_V0, rd);
5796 if (q) {
5797 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5798 }
5799 } else if ((insn & (1 << 11)) == 0) {
5800 /* Two register misc. */
5801 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5802 size = (insn >> 18) & 3;
600b828c
PM
5803 /* UNDEF for unknown op values and bad op-size combinations */
5804 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5805 return 1;
5806 }
fc2a9b37
PM
5807 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5808 q && ((rm | rd) & 1)) {
5809 return 1;
5810 }
9ee6e8bb 5811 switch (op) {
600b828c 5812 case NEON_2RM_VREV64:
9ee6e8bb 5813 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5814 tmp = neon_load_reg(rm, pass * 2);
5815 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5816 switch (size) {
dd8fbd78
FN
5817 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5818 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5819 case 2: /* no-op */ break;
5820 default: abort();
5821 }
dd8fbd78 5822 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5823 if (size == 2) {
dd8fbd78 5824 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5825 } else {
9ee6e8bb 5826 switch (size) {
dd8fbd78
FN
5827 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5828 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5829 default: abort();
5830 }
dd8fbd78 5831 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5832 }
5833 }
5834 break;
600b828c
PM
5835 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5836 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5837 for (pass = 0; pass < q + 1; pass++) {
5838 tmp = neon_load_reg(rm, pass * 2);
5839 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5840 tmp = neon_load_reg(rm, pass * 2 + 1);
5841 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5842 switch (size) {
5843 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5844 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5845 case 2: tcg_gen_add_i64(CPU_V001); break;
5846 default: abort();
5847 }
600b828c 5848 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5849 /* Accumulate. */
ad69471c
PB
5850 neon_load_reg64(cpu_V1, rd + pass);
5851 gen_neon_addl(size);
9ee6e8bb 5852 }
ad69471c 5853 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5854 }
5855 break;
600b828c 5856 case NEON_2RM_VTRN:
9ee6e8bb 5857 if (size == 2) {
a5a14945 5858 int n;
9ee6e8bb 5859 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5860 tmp = neon_load_reg(rm, n);
5861 tmp2 = neon_load_reg(rd, n + 1);
5862 neon_store_reg(rm, n, tmp2);
5863 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5864 }
5865 } else {
5866 goto elementwise;
5867 }
5868 break;
600b828c 5869 case NEON_2RM_VUZP:
02acedf9 5870 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5871 return 1;
9ee6e8bb
PB
5872 }
5873 break;
600b828c 5874 case NEON_2RM_VZIP:
d68a6f3a 5875 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5876 return 1;
9ee6e8bb
PB
5877 }
5878 break;
600b828c
PM
5879 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5880 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5881 if (rm & 1) {
5882 return 1;
5883 }
a50f5b91 5884 TCGV_UNUSED(tmp2);
9ee6e8bb 5885 for (pass = 0; pass < 2; pass++) {
ad69471c 5886 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5887 tmp = tcg_temp_new_i32();
600b828c
PM
5888 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5889 tmp, cpu_V0);
ad69471c
PB
5890 if (pass == 0) {
5891 tmp2 = tmp;
5892 } else {
5893 neon_store_reg(rd, 0, tmp2);
5894 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5895 }
9ee6e8bb
PB
5896 }
5897 break;
600b828c 5898 case NEON_2RM_VSHLL:
fc2a9b37 5899 if (q || (rd & 1)) {
9ee6e8bb 5900 return 1;
600b828c 5901 }
ad69471c
PB
5902 tmp = neon_load_reg(rm, 0);
5903 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5904 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5905 if (pass == 1)
5906 tmp = tmp2;
5907 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5908 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5909 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5910 }
5911 break;
600b828c 5912 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5913 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5914 q || (rm & 1)) {
5915 return 1;
5916 }
7d1b0095
PM
5917 tmp = tcg_temp_new_i32();
5918 tmp2 = tcg_temp_new_i32();
60011498 5919 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5920 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5921 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5922 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5923 tcg_gen_shli_i32(tmp2, tmp2, 16);
5924 tcg_gen_or_i32(tmp2, tmp2, tmp);
5925 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5926 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5927 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5928 neon_store_reg(rd, 0, tmp2);
7d1b0095 5929 tmp2 = tcg_temp_new_i32();
2d981da7 5930 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5931 tcg_gen_shli_i32(tmp2, tmp2, 16);
5932 tcg_gen_or_i32(tmp2, tmp2, tmp);
5933 neon_store_reg(rd, 1, tmp2);
7d1b0095 5934 tcg_temp_free_i32(tmp);
60011498 5935 break;
600b828c 5936 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5937 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5938 q || (rd & 1)) {
5939 return 1;
5940 }
7d1b0095 5941 tmp3 = tcg_temp_new_i32();
60011498
PB
5942 tmp = neon_load_reg(rm, 0);
5943 tmp2 = neon_load_reg(rm, 1);
5944 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5945 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5946 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5947 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5948 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5949 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5950 tcg_temp_free_i32(tmp);
60011498 5951 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5952 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5953 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5954 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5955 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5956 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5957 tcg_temp_free_i32(tmp2);
5958 tcg_temp_free_i32(tmp3);
60011498 5959 break;
9ee6e8bb
PB
5960 default:
5961 elementwise:
5962 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5963 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5964 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5965 neon_reg_offset(rm, pass));
dd8fbd78 5966 TCGV_UNUSED(tmp);
9ee6e8bb 5967 } else {
dd8fbd78 5968 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5969 }
5970 switch (op) {
600b828c 5971 case NEON_2RM_VREV32:
9ee6e8bb 5972 switch (size) {
dd8fbd78
FN
5973 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5974 case 1: gen_swap_half(tmp); break;
600b828c 5975 default: abort();
9ee6e8bb
PB
5976 }
5977 break;
600b828c 5978 case NEON_2RM_VREV16:
dd8fbd78 5979 gen_rev16(tmp);
9ee6e8bb 5980 break;
600b828c 5981 case NEON_2RM_VCLS:
9ee6e8bb 5982 switch (size) {
dd8fbd78
FN
5983 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5984 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5985 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5986 default: abort();
9ee6e8bb
PB
5987 }
5988 break;
600b828c 5989 case NEON_2RM_VCLZ:
9ee6e8bb 5990 switch (size) {
dd8fbd78
FN
5991 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5992 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5993 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5994 default: abort();
9ee6e8bb
PB
5995 }
5996 break;
600b828c 5997 case NEON_2RM_VCNT:
dd8fbd78 5998 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5999 break;
600b828c 6000 case NEON_2RM_VMVN:
dd8fbd78 6001 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6002 break;
600b828c 6003 case NEON_2RM_VQABS:
9ee6e8bb 6004 switch (size) {
02da0b2d
PM
6005 case 0:
6006 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6007 break;
6008 case 1:
6009 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6010 break;
6011 case 2:
6012 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6013 break;
600b828c 6014 default: abort();
9ee6e8bb
PB
6015 }
6016 break;
600b828c 6017 case NEON_2RM_VQNEG:
9ee6e8bb 6018 switch (size) {
02da0b2d
PM
6019 case 0:
6020 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6021 break;
6022 case 1:
6023 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6024 break;
6025 case 2:
6026 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6027 break;
600b828c 6028 default: abort();
9ee6e8bb
PB
6029 }
6030 break;
600b828c 6031 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6032 tmp2 = tcg_const_i32(0);
9ee6e8bb 6033 switch(size) {
dd8fbd78
FN
6034 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6035 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6036 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6037 default: abort();
9ee6e8bb 6038 }
dd8fbd78 6039 tcg_temp_free(tmp2);
600b828c 6040 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6041 tcg_gen_not_i32(tmp, tmp);
600b828c 6042 }
9ee6e8bb 6043 break;
600b828c 6044 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6045 tmp2 = tcg_const_i32(0);
9ee6e8bb 6046 switch(size) {
dd8fbd78
FN
6047 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6048 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6049 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6050 default: abort();
9ee6e8bb 6051 }
dd8fbd78 6052 tcg_temp_free(tmp2);
600b828c 6053 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6054 tcg_gen_not_i32(tmp, tmp);
600b828c 6055 }
9ee6e8bb 6056 break;
600b828c 6057 case NEON_2RM_VCEQ0:
dd8fbd78 6058 tmp2 = tcg_const_i32(0);
9ee6e8bb 6059 switch(size) {
dd8fbd78
FN
6060 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6063 default: abort();
9ee6e8bb 6064 }
dd8fbd78 6065 tcg_temp_free(tmp2);
9ee6e8bb 6066 break;
600b828c 6067 case NEON_2RM_VABS:
9ee6e8bb 6068 switch(size) {
dd8fbd78
FN
6069 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6070 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6071 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6072 default: abort();
9ee6e8bb
PB
6073 }
6074 break;
600b828c 6075 case NEON_2RM_VNEG:
dd8fbd78
FN
6076 tmp2 = tcg_const_i32(0);
6077 gen_neon_rsb(size, tmp, tmp2);
6078 tcg_temp_free(tmp2);
9ee6e8bb 6079 break;
600b828c 6080 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6081 {
6082 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6083 tmp2 = tcg_const_i32(0);
aa47cfdd 6084 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6085 tcg_temp_free(tmp2);
aa47cfdd 6086 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6087 break;
aa47cfdd 6088 }
600b828c 6089 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6090 {
6091 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6092 tmp2 = tcg_const_i32(0);
aa47cfdd 6093 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6094 tcg_temp_free(tmp2);
aa47cfdd 6095 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6096 break;
aa47cfdd 6097 }
600b828c 6098 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6099 {
6100 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6101 tmp2 = tcg_const_i32(0);
aa47cfdd 6102 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6103 tcg_temp_free(tmp2);
aa47cfdd 6104 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6105 break;
aa47cfdd 6106 }
600b828c 6107 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6108 {
6109 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6110 tmp2 = tcg_const_i32(0);
aa47cfdd 6111 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6112 tcg_temp_free(tmp2);
aa47cfdd 6113 tcg_temp_free_ptr(fpstatus);
0e326109 6114 break;
aa47cfdd 6115 }
600b828c 6116 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6117 {
6118 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6119 tmp2 = tcg_const_i32(0);
aa47cfdd 6120 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6121 tcg_temp_free(tmp2);
aa47cfdd 6122 tcg_temp_free_ptr(fpstatus);
0e326109 6123 break;
aa47cfdd 6124 }
600b828c 6125 case NEON_2RM_VABS_F:
4373f3ce 6126 gen_vfp_abs(0);
9ee6e8bb 6127 break;
600b828c 6128 case NEON_2RM_VNEG_F:
4373f3ce 6129 gen_vfp_neg(0);
9ee6e8bb 6130 break;
600b828c 6131 case NEON_2RM_VSWP:
dd8fbd78
FN
6132 tmp2 = neon_load_reg(rd, pass);
6133 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6134 break;
600b828c 6135 case NEON_2RM_VTRN:
dd8fbd78 6136 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6137 switch (size) {
dd8fbd78
FN
6138 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6139 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6140 default: abort();
9ee6e8bb 6141 }
dd8fbd78 6142 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6143 break;
600b828c 6144 case NEON_2RM_VRECPE:
dd8fbd78 6145 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6146 break;
600b828c 6147 case NEON_2RM_VRSQRTE:
dd8fbd78 6148 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6149 break;
600b828c 6150 case NEON_2RM_VRECPE_F:
4373f3ce 6151 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6152 break;
600b828c 6153 case NEON_2RM_VRSQRTE_F:
4373f3ce 6154 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6155 break;
600b828c 6156 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6157 gen_vfp_sito(0, 1);
9ee6e8bb 6158 break;
600b828c 6159 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6160 gen_vfp_uito(0, 1);
9ee6e8bb 6161 break;
600b828c 6162 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6163 gen_vfp_tosiz(0, 1);
9ee6e8bb 6164 break;
600b828c 6165 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6166 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6167 break;
6168 default:
600b828c
PM
6169 /* Reserved op values were caught by the
6170 * neon_2rm_sizes[] check earlier.
6171 */
6172 abort();
9ee6e8bb 6173 }
600b828c 6174 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6175 tcg_gen_st_f32(cpu_F0s, cpu_env,
6176 neon_reg_offset(rd, pass));
9ee6e8bb 6177 } else {
dd8fbd78 6178 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6179 }
6180 }
6181 break;
6182 }
6183 } else if ((insn & (1 << 10)) == 0) {
6184 /* VTBL, VTBX. */
56907d77
PM
6185 int n = ((insn >> 8) & 3) + 1;
6186 if ((rn + n) > 32) {
6187 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6188 * helper function running off the end of the register file.
6189 */
6190 return 1;
6191 }
6192 n <<= 3;
9ee6e8bb 6193 if (insn & (1 << 6)) {
8f8e3aa4 6194 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6195 } else {
7d1b0095 6196 tmp = tcg_temp_new_i32();
8f8e3aa4 6197 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6198 }
8f8e3aa4 6199 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6200 tmp4 = tcg_const_i32(rn);
6201 tmp5 = tcg_const_i32(n);
9ef39277 6202 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6203 tcg_temp_free_i32(tmp);
9ee6e8bb 6204 if (insn & (1 << 6)) {
8f8e3aa4 6205 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6206 } else {
7d1b0095 6207 tmp = tcg_temp_new_i32();
8f8e3aa4 6208 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6209 }
8f8e3aa4 6210 tmp3 = neon_load_reg(rm, 1);
9ef39277 6211 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6212 tcg_temp_free_i32(tmp5);
6213 tcg_temp_free_i32(tmp4);
8f8e3aa4 6214 neon_store_reg(rd, 0, tmp2);
3018f259 6215 neon_store_reg(rd, 1, tmp3);
7d1b0095 6216 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6217 } else if ((insn & 0x380) == 0) {
6218 /* VDUP */
133da6aa
JR
6219 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6220 return 1;
6221 }
9ee6e8bb 6222 if (insn & (1 << 19)) {
dd8fbd78 6223 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6224 } else {
dd8fbd78 6225 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6226 }
6227 if (insn & (1 << 16)) {
dd8fbd78 6228 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6229 } else if (insn & (1 << 17)) {
6230 if ((insn >> 18) & 1)
dd8fbd78 6231 gen_neon_dup_high16(tmp);
9ee6e8bb 6232 else
dd8fbd78 6233 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6234 }
6235 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6236 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6237 tcg_gen_mov_i32(tmp2, tmp);
6238 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6239 }
7d1b0095 6240 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6241 } else {
6242 return 1;
6243 }
6244 }
6245 }
6246 return 0;
6247}
6248
0ecb72a5 6249static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6250{
4b6a83fb
PM
6251 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6252 const ARMCPRegInfo *ri;
6253 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6254
6255 cpnum = (insn >> 8) & 0xf;
6256 if (arm_feature(env, ARM_FEATURE_XSCALE)
6257 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6258 return 1;
6259
4b6a83fb 6260 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6261 switch (cpnum) {
6262 case 0:
6263 case 1:
6264 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6265 return disas_iwmmxt_insn(env, s, insn);
6266 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6267 return disas_dsp_insn(env, s, insn);
6268 }
6269 return 1;
6270 case 10:
6271 case 11:
6272 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6273 default:
6274 break;
6275 }
6276
6277 /* Otherwise treat as a generic register access */
6278 is64 = (insn & (1 << 25)) == 0;
6279 if (!is64 && ((insn & (1 << 4)) == 0)) {
6280 /* cdp */
6281 return 1;
6282 }
6283
6284 crm = insn & 0xf;
6285 if (is64) {
6286 crn = 0;
6287 opc1 = (insn >> 4) & 0xf;
6288 opc2 = 0;
6289 rt2 = (insn >> 16) & 0xf;
6290 } else {
6291 crn = (insn >> 16) & 0xf;
6292 opc1 = (insn >> 21) & 7;
6293 opc2 = (insn >> 5) & 7;
6294 rt2 = 0;
6295 }
6296 isread = (insn >> 20) & 1;
6297 rt = (insn >> 12) & 0xf;
6298
6299 ri = get_arm_cp_reginfo(cpu,
6300 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6301 if (ri) {
6302 /* Check access permissions */
6303 if (!cp_access_ok(env, ri, isread)) {
6304 return 1;
6305 }
6306
6307 /* Handle special cases first */
6308 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6309 case ARM_CP_NOP:
6310 return 0;
6311 case ARM_CP_WFI:
6312 if (isread) {
6313 return 1;
6314 }
6315 gen_set_pc_im(s->pc);
6316 s->is_jmp = DISAS_WFI;
2bee5105 6317 return 0;
4b6a83fb
PM
6318 default:
6319 break;
6320 }
6321
6322 if (isread) {
6323 /* Read */
6324 if (is64) {
6325 TCGv_i64 tmp64;
6326 TCGv_i32 tmp;
6327 if (ri->type & ARM_CP_CONST) {
6328 tmp64 = tcg_const_i64(ri->resetvalue);
6329 } else if (ri->readfn) {
6330 TCGv_ptr tmpptr;
6331 gen_set_pc_im(s->pc);
6332 tmp64 = tcg_temp_new_i64();
6333 tmpptr = tcg_const_ptr(ri);
6334 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6335 tcg_temp_free_ptr(tmpptr);
6336 } else {
6337 tmp64 = tcg_temp_new_i64();
6338 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6339 }
6340 tmp = tcg_temp_new_i32();
6341 tcg_gen_trunc_i64_i32(tmp, tmp64);
6342 store_reg(s, rt, tmp);
6343 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6344 tmp = tcg_temp_new_i32();
4b6a83fb 6345 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6346 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6347 store_reg(s, rt2, tmp);
6348 } else {
6349 TCGv tmp;
6350 if (ri->type & ARM_CP_CONST) {
6351 tmp = tcg_const_i32(ri->resetvalue);
6352 } else if (ri->readfn) {
6353 TCGv_ptr tmpptr;
6354 gen_set_pc_im(s->pc);
6355 tmp = tcg_temp_new_i32();
6356 tmpptr = tcg_const_ptr(ri);
6357 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6358 tcg_temp_free_ptr(tmpptr);
6359 } else {
6360 tmp = load_cpu_offset(ri->fieldoffset);
6361 }
6362 if (rt == 15) {
6363 /* Destination register of r15 for 32 bit loads sets
6364 * the condition codes from the high 4 bits of the value
6365 */
6366 gen_set_nzcv(tmp);
6367 tcg_temp_free_i32(tmp);
6368 } else {
6369 store_reg(s, rt, tmp);
6370 }
6371 }
6372 } else {
6373 /* Write */
6374 if (ri->type & ARM_CP_CONST) {
6375 /* If not forbidden by access permissions, treat as WI */
6376 return 0;
6377 }
6378
6379 if (is64) {
6380 TCGv tmplo, tmphi;
6381 TCGv_i64 tmp64 = tcg_temp_new_i64();
6382 tmplo = load_reg(s, rt);
6383 tmphi = load_reg(s, rt2);
6384 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6385 tcg_temp_free_i32(tmplo);
6386 tcg_temp_free_i32(tmphi);
6387 if (ri->writefn) {
6388 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6389 gen_set_pc_im(s->pc);
6390 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6391 tcg_temp_free_ptr(tmpptr);
6392 } else {
6393 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6394 }
6395 tcg_temp_free_i64(tmp64);
6396 } else {
6397 if (ri->writefn) {
6398 TCGv tmp;
6399 TCGv_ptr tmpptr;
6400 gen_set_pc_im(s->pc);
6401 tmp = load_reg(s, rt);
6402 tmpptr = tcg_const_ptr(ri);
6403 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6404 tcg_temp_free_ptr(tmpptr);
6405 tcg_temp_free_i32(tmp);
6406 } else {
6407 TCGv tmp = load_reg(s, rt);
6408 store_cpu_offset(tmp, ri->fieldoffset);
6409 }
6410 }
6411 /* We default to ending the TB on a coprocessor register write,
6412 * but allow this to be suppressed by the register definition
6413 * (usually only necessary to work around guest bugs).
6414 */
6415 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6416 gen_lookup_tb(s);
6417 }
6418 }
6419 return 0;
6420 }
6421
4a9a539f 6422 return 1;
9ee6e8bb
PB
6423}
6424
5e3f878a
PB
6425
6426/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6427static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6428{
6429 TCGv tmp;
7d1b0095 6430 tmp = tcg_temp_new_i32();
5e3f878a
PB
6431 tcg_gen_trunc_i64_i32(tmp, val);
6432 store_reg(s, rlow, tmp);
7d1b0095 6433 tmp = tcg_temp_new_i32();
5e3f878a
PB
6434 tcg_gen_shri_i64(val, val, 32);
6435 tcg_gen_trunc_i64_i32(tmp, val);
6436 store_reg(s, rhigh, tmp);
6437}
6438
6439/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6440static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6441{
a7812ae4 6442 TCGv_i64 tmp;
5e3f878a
PB
6443 TCGv tmp2;
6444
36aa55dc 6445 /* Load value and extend to 64 bits. */
a7812ae4 6446 tmp = tcg_temp_new_i64();
5e3f878a
PB
6447 tmp2 = load_reg(s, rlow);
6448 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6449 tcg_temp_free_i32(tmp2);
5e3f878a 6450 tcg_gen_add_i64(val, val, tmp);
b75263d6 6451 tcg_temp_free_i64(tmp);
5e3f878a
PB
6452}
6453
6454/* load and add a 64-bit value from a register pair. */
a7812ae4 6455static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6456{
a7812ae4 6457 TCGv_i64 tmp;
36aa55dc
PB
6458 TCGv tmpl;
6459 TCGv tmph;
5e3f878a
PB
6460
6461 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6462 tmpl = load_reg(s, rlow);
6463 tmph = load_reg(s, rhigh);
a7812ae4 6464 tmp = tcg_temp_new_i64();
36aa55dc 6465 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6466 tcg_temp_free_i32(tmpl);
6467 tcg_temp_free_i32(tmph);
5e3f878a 6468 tcg_gen_add_i64(val, val, tmp);
b75263d6 6469 tcg_temp_free_i64(tmp);
5e3f878a
PB
6470}
6471
c9f10124
RH
6472/* Set N and Z flags from hi|lo. */
6473static void gen_logicq_cc(TCGv lo, TCGv hi)
5e3f878a 6474{
c9f10124
RH
6475 tcg_gen_mov_i32(cpu_NF, hi);
6476 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6477}
6478
426f5abc
PB
6479/* Load/Store exclusive instructions are implemented by remembering
6480 the value/address loaded, and seeing if these are the same
b90372ad 6481 when the store is performed. This should be sufficient to implement
426f5abc
PB
6482 the architecturally mandated semantics, and avoids having to monitor
6483 regular stores.
6484
6485 In system emulation mode only one CPU will be running at once, so
6486 this sequence is effectively atomic. In user emulation mode we
6487 throw an exception and handle the atomic operation elsewhere. */
6488static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6489 TCGv addr, int size)
6490{
6491 TCGv tmp;
6492
6493 switch (size) {
6494 case 0:
6495 tmp = gen_ld8u(addr, IS_USER(s));
6496 break;
6497 case 1:
6498 tmp = gen_ld16u(addr, IS_USER(s));
6499 break;
6500 case 2:
6501 case 3:
6502 tmp = gen_ld32(addr, IS_USER(s));
6503 break;
6504 default:
6505 abort();
6506 }
6507 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6508 store_reg(s, rt, tmp);
6509 if (size == 3) {
7d1b0095 6510 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6511 tcg_gen_addi_i32(tmp2, addr, 4);
6512 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6513 tcg_temp_free_i32(tmp2);
426f5abc
PB
6514 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6515 store_reg(s, rt2, tmp);
6516 }
6517 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6518}
6519
6520static void gen_clrex(DisasContext *s)
6521{
6522 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6523}
6524
6525#ifdef CONFIG_USER_ONLY
6526static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6527 TCGv addr, int size)
6528{
6529 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6530 tcg_gen_movi_i32(cpu_exclusive_info,
6531 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6532 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6533}
6534#else
6535static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6536 TCGv addr, int size)
6537{
6538 TCGv tmp;
6539 int done_label;
6540 int fail_label;
6541
6542 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6543 [addr] = {Rt};
6544 {Rd} = 0;
6545 } else {
6546 {Rd} = 1;
6547 } */
6548 fail_label = gen_new_label();
6549 done_label = gen_new_label();
6550 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6551 switch (size) {
6552 case 0:
6553 tmp = gen_ld8u(addr, IS_USER(s));
6554 break;
6555 case 1:
6556 tmp = gen_ld16u(addr, IS_USER(s));
6557 break;
6558 case 2:
6559 case 3:
6560 tmp = gen_ld32(addr, IS_USER(s));
6561 break;
6562 default:
6563 abort();
6564 }
6565 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6566 tcg_temp_free_i32(tmp);
426f5abc 6567 if (size == 3) {
7d1b0095 6568 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6569 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6570 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6571 tcg_temp_free_i32(tmp2);
426f5abc 6572 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6573 tcg_temp_free_i32(tmp);
426f5abc
PB
6574 }
6575 tmp = load_reg(s, rt);
6576 switch (size) {
6577 case 0:
6578 gen_st8(tmp, addr, IS_USER(s));
6579 break;
6580 case 1:
6581 gen_st16(tmp, addr, IS_USER(s));
6582 break;
6583 case 2:
6584 case 3:
6585 gen_st32(tmp, addr, IS_USER(s));
6586 break;
6587 default:
6588 abort();
6589 }
6590 if (size == 3) {
6591 tcg_gen_addi_i32(addr, addr, 4);
6592 tmp = load_reg(s, rt2);
6593 gen_st32(tmp, addr, IS_USER(s));
6594 }
6595 tcg_gen_movi_i32(cpu_R[rd], 0);
6596 tcg_gen_br(done_label);
6597 gen_set_label(fail_label);
6598 tcg_gen_movi_i32(cpu_R[rd], 1);
6599 gen_set_label(done_label);
6600 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6601}
6602#endif
6603
0ecb72a5 6604static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6605{
6606 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6607 TCGv tmp;
3670669c 6608 TCGv tmp2;
6ddbc6e4 6609 TCGv tmp3;
b0109805 6610 TCGv addr;
a7812ae4 6611 TCGv_i64 tmp64;
9ee6e8bb 6612
d31dd73e 6613 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6614 s->pc += 4;
6615
6616 /* M variants do not implement ARM mode. */
6617 if (IS_M(env))
6618 goto illegal_op;
6619 cond = insn >> 28;
6620 if (cond == 0xf){
be5e7a76
DES
6621 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6622 * choose to UNDEF. In ARMv5 and above the space is used
6623 * for miscellaneous unconditional instructions.
6624 */
6625 ARCH(5);
6626
9ee6e8bb
PB
6627 /* Unconditional instructions. */
6628 if (((insn >> 25) & 7) == 1) {
6629 /* NEON Data processing. */
6630 if (!arm_feature(env, ARM_FEATURE_NEON))
6631 goto illegal_op;
6632
6633 if (disas_neon_data_insn(env, s, insn))
6634 goto illegal_op;
6635 return;
6636 }
6637 if ((insn & 0x0f100000) == 0x04000000) {
6638 /* NEON load/store. */
6639 if (!arm_feature(env, ARM_FEATURE_NEON))
6640 goto illegal_op;
6641
6642 if (disas_neon_ls_insn(env, s, insn))
6643 goto illegal_op;
6644 return;
6645 }
3d185e5d
PM
6646 if (((insn & 0x0f30f000) == 0x0510f000) ||
6647 ((insn & 0x0f30f010) == 0x0710f000)) {
6648 if ((insn & (1 << 22)) == 0) {
6649 /* PLDW; v7MP */
6650 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6651 goto illegal_op;
6652 }
6653 }
6654 /* Otherwise PLD; v5TE+ */
be5e7a76 6655 ARCH(5TE);
3d185e5d
PM
6656 return;
6657 }
6658 if (((insn & 0x0f70f000) == 0x0450f000) ||
6659 ((insn & 0x0f70f010) == 0x0650f000)) {
6660 ARCH(7);
6661 return; /* PLI; V7 */
6662 }
6663 if (((insn & 0x0f700000) == 0x04100000) ||
6664 ((insn & 0x0f700010) == 0x06100000)) {
6665 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6666 goto illegal_op;
6667 }
6668 return; /* v7MP: Unallocated memory hint: must NOP */
6669 }
6670
6671 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6672 ARCH(6);
6673 /* setend */
10962fd5
PM
6674 if (((insn >> 9) & 1) != s->bswap_code) {
6675 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6676 goto illegal_op;
6677 }
6678 return;
6679 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6680 switch ((insn >> 4) & 0xf) {
6681 case 1: /* clrex */
6682 ARCH(6K);
426f5abc 6683 gen_clrex(s);
9ee6e8bb
PB
6684 return;
6685 case 4: /* dsb */
6686 case 5: /* dmb */
6687 case 6: /* isb */
6688 ARCH(7);
6689 /* We don't emulate caches so these are a no-op. */
6690 return;
6691 default:
6692 goto illegal_op;
6693 }
6694 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6695 /* srs */
c67b6b71 6696 int32_t offset;
9ee6e8bb
PB
6697 if (IS_USER(s))
6698 goto illegal_op;
6699 ARCH(6);
6700 op1 = (insn & 0x1f);
7d1b0095 6701 addr = tcg_temp_new_i32();
39ea3d4e
PM
6702 tmp = tcg_const_i32(op1);
6703 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6704 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6705 i = (insn >> 23) & 3;
6706 switch (i) {
6707 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6708 case 1: offset = 0; break; /* IA */
6709 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6710 case 3: offset = 4; break; /* IB */
6711 default: abort();
6712 }
6713 if (offset)
b0109805
PB
6714 tcg_gen_addi_i32(addr, addr, offset);
6715 tmp = load_reg(s, 14);
6716 gen_st32(tmp, addr, 0);
c67b6b71 6717 tmp = load_cpu_field(spsr);
b0109805
PB
6718 tcg_gen_addi_i32(addr, addr, 4);
6719 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6720 if (insn & (1 << 21)) {
6721 /* Base writeback. */
6722 switch (i) {
6723 case 0: offset = -8; break;
c67b6b71
FN
6724 case 1: offset = 4; break;
6725 case 2: offset = -4; break;
9ee6e8bb
PB
6726 case 3: offset = 0; break;
6727 default: abort();
6728 }
6729 if (offset)
c67b6b71 6730 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6731 tmp = tcg_const_i32(op1);
6732 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6733 tcg_temp_free_i32(tmp);
7d1b0095 6734 tcg_temp_free_i32(addr);
b0109805 6735 } else {
7d1b0095 6736 tcg_temp_free_i32(addr);
9ee6e8bb 6737 }
a990f58f 6738 return;
ea825eee 6739 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6740 /* rfe */
c67b6b71 6741 int32_t offset;
9ee6e8bb
PB
6742 if (IS_USER(s))
6743 goto illegal_op;
6744 ARCH(6);
6745 rn = (insn >> 16) & 0xf;
b0109805 6746 addr = load_reg(s, rn);
9ee6e8bb
PB
6747 i = (insn >> 23) & 3;
6748 switch (i) {
b0109805 6749 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6750 case 1: offset = 0; break; /* IA */
6751 case 2: offset = -8; break; /* DB */
b0109805 6752 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6753 default: abort();
6754 }
6755 if (offset)
b0109805
PB
6756 tcg_gen_addi_i32(addr, addr, offset);
6757 /* Load PC into tmp and CPSR into tmp2. */
6758 tmp = gen_ld32(addr, 0);
6759 tcg_gen_addi_i32(addr, addr, 4);
6760 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6761 if (insn & (1 << 21)) {
6762 /* Base writeback. */
6763 switch (i) {
b0109805 6764 case 0: offset = -8; break;
c67b6b71
FN
6765 case 1: offset = 4; break;
6766 case 2: offset = -4; break;
b0109805 6767 case 3: offset = 0; break;
9ee6e8bb
PB
6768 default: abort();
6769 }
6770 if (offset)
b0109805
PB
6771 tcg_gen_addi_i32(addr, addr, offset);
6772 store_reg(s, rn, addr);
6773 } else {
7d1b0095 6774 tcg_temp_free_i32(addr);
9ee6e8bb 6775 }
b0109805 6776 gen_rfe(s, tmp, tmp2);
c67b6b71 6777 return;
9ee6e8bb
PB
6778 } else if ((insn & 0x0e000000) == 0x0a000000) {
6779 /* branch link and change to thumb (blx <offset>) */
6780 int32_t offset;
6781
6782 val = (uint32_t)s->pc;
7d1b0095 6783 tmp = tcg_temp_new_i32();
d9ba4830
PB
6784 tcg_gen_movi_i32(tmp, val);
6785 store_reg(s, 14, tmp);
9ee6e8bb
PB
6786 /* Sign-extend the 24-bit offset */
6787 offset = (((int32_t)insn) << 8) >> 8;
6788 /* offset * 4 + bit24 * 2 + (thumb bit) */
6789 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6790 /* pipeline offset */
6791 val += 4;
be5e7a76 6792 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6793 gen_bx_im(s, val);
9ee6e8bb
PB
6794 return;
6795 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6796 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6797 /* iWMMXt register transfer. */
6798 if (env->cp15.c15_cpar & (1 << 1))
6799 if (!disas_iwmmxt_insn(env, s, insn))
6800 return;
6801 }
6802 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6803 /* Coprocessor double register transfer. */
be5e7a76 6804 ARCH(5TE);
9ee6e8bb
PB
6805 } else if ((insn & 0x0f000010) == 0x0e000010) {
6806 /* Additional coprocessor register transfer. */
7997d92f 6807 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6808 uint32_t mask;
6809 uint32_t val;
6810 /* cps (privileged) */
6811 if (IS_USER(s))
6812 return;
6813 mask = val = 0;
6814 if (insn & (1 << 19)) {
6815 if (insn & (1 << 8))
6816 mask |= CPSR_A;
6817 if (insn & (1 << 7))
6818 mask |= CPSR_I;
6819 if (insn & (1 << 6))
6820 mask |= CPSR_F;
6821 if (insn & (1 << 18))
6822 val |= mask;
6823 }
7997d92f 6824 if (insn & (1 << 17)) {
9ee6e8bb
PB
6825 mask |= CPSR_M;
6826 val |= (insn & 0x1f);
6827 }
6828 if (mask) {
2fbac54b 6829 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6830 }
6831 return;
6832 }
6833 goto illegal_op;
6834 }
6835 if (cond != 0xe) {
6836 /* if not always execute, we generate a conditional jump to
6837 next instruction */
6838 s->condlabel = gen_new_label();
d9ba4830 6839 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6840 s->condjmp = 1;
6841 }
6842 if ((insn & 0x0f900000) == 0x03000000) {
6843 if ((insn & (1 << 21)) == 0) {
6844 ARCH(6T2);
6845 rd = (insn >> 12) & 0xf;
6846 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6847 if ((insn & (1 << 22)) == 0) {
6848 /* MOVW */
7d1b0095 6849 tmp = tcg_temp_new_i32();
5e3f878a 6850 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6851 } else {
6852 /* MOVT */
5e3f878a 6853 tmp = load_reg(s, rd);
86831435 6854 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6855 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6856 }
5e3f878a 6857 store_reg(s, rd, tmp);
9ee6e8bb
PB
6858 } else {
6859 if (((insn >> 12) & 0xf) != 0xf)
6860 goto illegal_op;
6861 if (((insn >> 16) & 0xf) == 0) {
6862 gen_nop_hint(s, insn & 0xff);
6863 } else {
6864 /* CPSR = immediate */
6865 val = insn & 0xff;
6866 shift = ((insn >> 8) & 0xf) * 2;
6867 if (shift)
6868 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6869 i = ((insn & (1 << 22)) != 0);
2fbac54b 6870 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6871 goto illegal_op;
6872 }
6873 }
6874 } else if ((insn & 0x0f900000) == 0x01000000
6875 && (insn & 0x00000090) != 0x00000090) {
6876 /* miscellaneous instructions */
6877 op1 = (insn >> 21) & 3;
6878 sh = (insn >> 4) & 0xf;
6879 rm = insn & 0xf;
6880 switch (sh) {
6881 case 0x0: /* move program status register */
6882 if (op1 & 1) {
6883 /* PSR = reg */
2fbac54b 6884 tmp = load_reg(s, rm);
9ee6e8bb 6885 i = ((op1 & 2) != 0);
2fbac54b 6886 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6887 goto illegal_op;
6888 } else {
6889 /* reg = PSR */
6890 rd = (insn >> 12) & 0xf;
6891 if (op1 & 2) {
6892 if (IS_USER(s))
6893 goto illegal_op;
d9ba4830 6894 tmp = load_cpu_field(spsr);
9ee6e8bb 6895 } else {
7d1b0095 6896 tmp = tcg_temp_new_i32();
9ef39277 6897 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6898 }
d9ba4830 6899 store_reg(s, rd, tmp);
9ee6e8bb
PB
6900 }
6901 break;
6902 case 0x1:
6903 if (op1 == 1) {
6904 /* branch/exchange thumb (bx). */
be5e7a76 6905 ARCH(4T);
d9ba4830
PB
6906 tmp = load_reg(s, rm);
6907 gen_bx(s, tmp);
9ee6e8bb
PB
6908 } else if (op1 == 3) {
6909 /* clz */
be5e7a76 6910 ARCH(5);
9ee6e8bb 6911 rd = (insn >> 12) & 0xf;
1497c961
PB
6912 tmp = load_reg(s, rm);
6913 gen_helper_clz(tmp, tmp);
6914 store_reg(s, rd, tmp);
9ee6e8bb
PB
6915 } else {
6916 goto illegal_op;
6917 }
6918 break;
6919 case 0x2:
6920 if (op1 == 1) {
6921 ARCH(5J); /* bxj */
6922 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6923 tmp = load_reg(s, rm);
6924 gen_bx(s, tmp);
9ee6e8bb
PB
6925 } else {
6926 goto illegal_op;
6927 }
6928 break;
6929 case 0x3:
6930 if (op1 != 1)
6931 goto illegal_op;
6932
be5e7a76 6933 ARCH(5);
9ee6e8bb 6934 /* branch link/exchange thumb (blx) */
d9ba4830 6935 tmp = load_reg(s, rm);
7d1b0095 6936 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6937 tcg_gen_movi_i32(tmp2, s->pc);
6938 store_reg(s, 14, tmp2);
6939 gen_bx(s, tmp);
9ee6e8bb
PB
6940 break;
6941 case 0x5: /* saturating add/subtract */
be5e7a76 6942 ARCH(5TE);
9ee6e8bb
PB
6943 rd = (insn >> 12) & 0xf;
6944 rn = (insn >> 16) & 0xf;
b40d0353 6945 tmp = load_reg(s, rm);
5e3f878a 6946 tmp2 = load_reg(s, rn);
9ee6e8bb 6947 if (op1 & 2)
9ef39277 6948 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6949 if (op1 & 1)
9ef39277 6950 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6951 else
9ef39277 6952 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6953 tcg_temp_free_i32(tmp2);
5e3f878a 6954 store_reg(s, rd, tmp);
9ee6e8bb 6955 break;
49e14940
AL
6956 case 7:
6957 /* SMC instruction (op1 == 3)
6958 and undefined instructions (op1 == 0 || op1 == 2)
6959 will trap */
6960 if (op1 != 1) {
6961 goto illegal_op;
6962 }
6963 /* bkpt */
be5e7a76 6964 ARCH(5);
bc4a0de0 6965 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6966 break;
6967 case 0x8: /* signed multiply */
6968 case 0xa:
6969 case 0xc:
6970 case 0xe:
be5e7a76 6971 ARCH(5TE);
9ee6e8bb
PB
6972 rs = (insn >> 8) & 0xf;
6973 rn = (insn >> 12) & 0xf;
6974 rd = (insn >> 16) & 0xf;
6975 if (op1 == 1) {
6976 /* (32 * 16) >> 16 */
5e3f878a
PB
6977 tmp = load_reg(s, rm);
6978 tmp2 = load_reg(s, rs);
9ee6e8bb 6979 if (sh & 4)
5e3f878a 6980 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6981 else
5e3f878a 6982 gen_sxth(tmp2);
a7812ae4
PB
6983 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6984 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6985 tmp = tcg_temp_new_i32();
a7812ae4 6986 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6987 tcg_temp_free_i64(tmp64);
9ee6e8bb 6988 if ((sh & 2) == 0) {
5e3f878a 6989 tmp2 = load_reg(s, rn);
9ef39277 6990 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6991 tcg_temp_free_i32(tmp2);
9ee6e8bb 6992 }
5e3f878a 6993 store_reg(s, rd, tmp);
9ee6e8bb
PB
6994 } else {
6995 /* 16 * 16 */
5e3f878a
PB
6996 tmp = load_reg(s, rm);
6997 tmp2 = load_reg(s, rs);
6998 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 6999 tcg_temp_free_i32(tmp2);
9ee6e8bb 7000 if (op1 == 2) {
a7812ae4
PB
7001 tmp64 = tcg_temp_new_i64();
7002 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7003 tcg_temp_free_i32(tmp);
a7812ae4
PB
7004 gen_addq(s, tmp64, rn, rd);
7005 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7006 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7007 } else {
7008 if (op1 == 0) {
5e3f878a 7009 tmp2 = load_reg(s, rn);
9ef39277 7010 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7011 tcg_temp_free_i32(tmp2);
9ee6e8bb 7012 }
5e3f878a 7013 store_reg(s, rd, tmp);
9ee6e8bb
PB
7014 }
7015 }
7016 break;
7017 default:
7018 goto illegal_op;
7019 }
7020 } else if (((insn & 0x0e000000) == 0 &&
7021 (insn & 0x00000090) != 0x90) ||
7022 ((insn & 0x0e000000) == (1 << 25))) {
7023 int set_cc, logic_cc, shiftop;
7024
7025 op1 = (insn >> 21) & 0xf;
7026 set_cc = (insn >> 20) & 1;
7027 logic_cc = table_logic_cc[op1] & set_cc;
7028
7029 /* data processing instruction */
7030 if (insn & (1 << 25)) {
7031 /* immediate operand */
7032 val = insn & 0xff;
7033 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7034 if (shift) {
9ee6e8bb 7035 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7036 }
7d1b0095 7037 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7038 tcg_gen_movi_i32(tmp2, val);
7039 if (logic_cc && shift) {
7040 gen_set_CF_bit31(tmp2);
7041 }
9ee6e8bb
PB
7042 } else {
7043 /* register */
7044 rm = (insn) & 0xf;
e9bb4aa9 7045 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7046 shiftop = (insn >> 5) & 3;
7047 if (!(insn & (1 << 4))) {
7048 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7049 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7050 } else {
7051 rs = (insn >> 8) & 0xf;
8984bd2e 7052 tmp = load_reg(s, rs);
e9bb4aa9 7053 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7054 }
7055 }
7056 if (op1 != 0x0f && op1 != 0x0d) {
7057 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7058 tmp = load_reg(s, rn);
7059 } else {
7060 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7061 }
7062 rd = (insn >> 12) & 0xf;
7063 switch(op1) {
7064 case 0x00:
e9bb4aa9
JR
7065 tcg_gen_and_i32(tmp, tmp, tmp2);
7066 if (logic_cc) {
7067 gen_logic_CC(tmp);
7068 }
21aeb343 7069 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7070 break;
7071 case 0x01:
e9bb4aa9
JR
7072 tcg_gen_xor_i32(tmp, tmp, tmp2);
7073 if (logic_cc) {
7074 gen_logic_CC(tmp);
7075 }
21aeb343 7076 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7077 break;
7078 case 0x02:
7079 if (set_cc && rd == 15) {
7080 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7081 if (IS_USER(s)) {
9ee6e8bb 7082 goto illegal_op;
e9bb4aa9 7083 }
72485ec4 7084 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7085 gen_exception_return(s, tmp);
9ee6e8bb 7086 } else {
e9bb4aa9 7087 if (set_cc) {
72485ec4 7088 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7089 } else {
7090 tcg_gen_sub_i32(tmp, tmp, tmp2);
7091 }
21aeb343 7092 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7093 }
7094 break;
7095 case 0x03:
e9bb4aa9 7096 if (set_cc) {
72485ec4 7097 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7098 } else {
7099 tcg_gen_sub_i32(tmp, tmp2, tmp);
7100 }
21aeb343 7101 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7102 break;
7103 case 0x04:
e9bb4aa9 7104 if (set_cc) {
72485ec4 7105 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7106 } else {
7107 tcg_gen_add_i32(tmp, tmp, tmp2);
7108 }
21aeb343 7109 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7110 break;
7111 case 0x05:
e9bb4aa9 7112 if (set_cc) {
49b4c31e 7113 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7114 } else {
7115 gen_add_carry(tmp, tmp, tmp2);
7116 }
21aeb343 7117 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7118 break;
7119 case 0x06:
e9bb4aa9 7120 if (set_cc) {
2de68a49 7121 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7122 } else {
7123 gen_sub_carry(tmp, tmp, tmp2);
7124 }
21aeb343 7125 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7126 break;
7127 case 0x07:
e9bb4aa9 7128 if (set_cc) {
2de68a49 7129 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7130 } else {
7131 gen_sub_carry(tmp, tmp2, tmp);
7132 }
21aeb343 7133 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7134 break;
7135 case 0x08:
7136 if (set_cc) {
e9bb4aa9
JR
7137 tcg_gen_and_i32(tmp, tmp, tmp2);
7138 gen_logic_CC(tmp);
9ee6e8bb 7139 }
7d1b0095 7140 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7141 break;
7142 case 0x09:
7143 if (set_cc) {
e9bb4aa9
JR
7144 tcg_gen_xor_i32(tmp, tmp, tmp2);
7145 gen_logic_CC(tmp);
9ee6e8bb 7146 }
7d1b0095 7147 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7148 break;
7149 case 0x0a:
7150 if (set_cc) {
72485ec4 7151 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7152 }
7d1b0095 7153 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7154 break;
7155 case 0x0b:
7156 if (set_cc) {
72485ec4 7157 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7158 }
7d1b0095 7159 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7160 break;
7161 case 0x0c:
e9bb4aa9
JR
7162 tcg_gen_or_i32(tmp, tmp, tmp2);
7163 if (logic_cc) {
7164 gen_logic_CC(tmp);
7165 }
21aeb343 7166 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7167 break;
7168 case 0x0d:
7169 if (logic_cc && rd == 15) {
7170 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7171 if (IS_USER(s)) {
9ee6e8bb 7172 goto illegal_op;
e9bb4aa9
JR
7173 }
7174 gen_exception_return(s, tmp2);
9ee6e8bb 7175 } else {
e9bb4aa9
JR
7176 if (logic_cc) {
7177 gen_logic_CC(tmp2);
7178 }
21aeb343 7179 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7180 }
7181 break;
7182 case 0x0e:
f669df27 7183 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7184 if (logic_cc) {
7185 gen_logic_CC(tmp);
7186 }
21aeb343 7187 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7188 break;
7189 default:
7190 case 0x0f:
e9bb4aa9
JR
7191 tcg_gen_not_i32(tmp2, tmp2);
7192 if (logic_cc) {
7193 gen_logic_CC(tmp2);
7194 }
21aeb343 7195 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7196 break;
7197 }
e9bb4aa9 7198 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7199 tcg_temp_free_i32(tmp2);
e9bb4aa9 7200 }
9ee6e8bb
PB
7201 } else {
7202 /* other instructions */
7203 op1 = (insn >> 24) & 0xf;
7204 switch(op1) {
7205 case 0x0:
7206 case 0x1:
7207 /* multiplies, extra load/stores */
7208 sh = (insn >> 5) & 3;
7209 if (sh == 0) {
7210 if (op1 == 0x0) {
7211 rd = (insn >> 16) & 0xf;
7212 rn = (insn >> 12) & 0xf;
7213 rs = (insn >> 8) & 0xf;
7214 rm = (insn) & 0xf;
7215 op1 = (insn >> 20) & 0xf;
7216 switch (op1) {
7217 case 0: case 1: case 2: case 3: case 6:
7218 /* 32 bit mul */
5e3f878a
PB
7219 tmp = load_reg(s, rs);
7220 tmp2 = load_reg(s, rm);
7221 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7222 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7223 if (insn & (1 << 22)) {
7224 /* Subtract (mls) */
7225 ARCH(6T2);
5e3f878a
PB
7226 tmp2 = load_reg(s, rn);
7227 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7228 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7229 } else if (insn & (1 << 21)) {
7230 /* Add */
5e3f878a
PB
7231 tmp2 = load_reg(s, rn);
7232 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7233 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7234 }
7235 if (insn & (1 << 20))
5e3f878a
PB
7236 gen_logic_CC(tmp);
7237 store_reg(s, rd, tmp);
9ee6e8bb 7238 break;
8aac08b1
AJ
7239 case 4:
7240 /* 64 bit mul double accumulate (UMAAL) */
7241 ARCH(6);
7242 tmp = load_reg(s, rs);
7243 tmp2 = load_reg(s, rm);
7244 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7245 gen_addq_lo(s, tmp64, rn);
7246 gen_addq_lo(s, tmp64, rd);
7247 gen_storeq_reg(s, rn, rd, tmp64);
7248 tcg_temp_free_i64(tmp64);
7249 break;
7250 case 8: case 9: case 10: case 11:
7251 case 12: case 13: case 14: case 15:
7252 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7253 tmp = load_reg(s, rs);
7254 tmp2 = load_reg(s, rm);
8aac08b1 7255 if (insn & (1 << 22)) {
c9f10124 7256 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7257 } else {
c9f10124 7258 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7259 }
7260 if (insn & (1 << 21)) { /* mult accumulate */
c9f10124
RH
7261 TCGv al = load_reg(s, rn);
7262 TCGv ah = load_reg(s, rd);
7263 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7264 tcg_temp_free(al);
7265 tcg_temp_free(ah);
9ee6e8bb 7266 }
8aac08b1 7267 if (insn & (1 << 20)) {
c9f10124 7268 gen_logicq_cc(tmp, tmp2);
8aac08b1 7269 }
c9f10124
RH
7270 store_reg(s, rn, tmp);
7271 store_reg(s, rd, tmp2);
9ee6e8bb 7272 break;
8aac08b1
AJ
7273 default:
7274 goto illegal_op;
9ee6e8bb
PB
7275 }
7276 } else {
7277 rn = (insn >> 16) & 0xf;
7278 rd = (insn >> 12) & 0xf;
7279 if (insn & (1 << 23)) {
7280 /* load/store exclusive */
86753403
PB
7281 op1 = (insn >> 21) & 0x3;
7282 if (op1)
a47f43d2 7283 ARCH(6K);
86753403
PB
7284 else
7285 ARCH(6);
3174f8e9 7286 addr = tcg_temp_local_new_i32();
98a46317 7287 load_reg_var(s, addr, rn);
9ee6e8bb 7288 if (insn & (1 << 20)) {
86753403
PB
7289 switch (op1) {
7290 case 0: /* ldrex */
426f5abc 7291 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7292 break;
7293 case 1: /* ldrexd */
426f5abc 7294 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7295 break;
7296 case 2: /* ldrexb */
426f5abc 7297 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7298 break;
7299 case 3: /* ldrexh */
426f5abc 7300 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7301 break;
7302 default:
7303 abort();
7304 }
9ee6e8bb
PB
7305 } else {
7306 rm = insn & 0xf;
86753403
PB
7307 switch (op1) {
7308 case 0: /* strex */
426f5abc 7309 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7310 break;
7311 case 1: /* strexd */
502e64fe 7312 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7313 break;
7314 case 2: /* strexb */
426f5abc 7315 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7316 break;
7317 case 3: /* strexh */
426f5abc 7318 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7319 break;
7320 default:
7321 abort();
7322 }
9ee6e8bb 7323 }
3174f8e9 7324 tcg_temp_free(addr);
9ee6e8bb
PB
7325 } else {
7326 /* SWP instruction */
7327 rm = (insn) & 0xf;
7328
8984bd2e
PB
7329 /* ??? This is not really atomic. However we know
7330 we never have multiple CPUs running in parallel,
7331 so it is good enough. */
7332 addr = load_reg(s, rn);
7333 tmp = load_reg(s, rm);
9ee6e8bb 7334 if (insn & (1 << 22)) {
8984bd2e
PB
7335 tmp2 = gen_ld8u(addr, IS_USER(s));
7336 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7337 } else {
8984bd2e
PB
7338 tmp2 = gen_ld32(addr, IS_USER(s));
7339 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7340 }
7d1b0095 7341 tcg_temp_free_i32(addr);
8984bd2e 7342 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7343 }
7344 }
7345 } else {
7346 int address_offset;
7347 int load;
7348 /* Misc load/store */
7349 rn = (insn >> 16) & 0xf;
7350 rd = (insn >> 12) & 0xf;
b0109805 7351 addr = load_reg(s, rn);
9ee6e8bb 7352 if (insn & (1 << 24))
b0109805 7353 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7354 address_offset = 0;
7355 if (insn & (1 << 20)) {
7356 /* load */
7357 switch(sh) {
7358 case 1:
b0109805 7359 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7360 break;
7361 case 2:
b0109805 7362 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7363 break;
7364 default:
7365 case 3:
b0109805 7366 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7367 break;
7368 }
7369 load = 1;
7370 } else if (sh & 2) {
be5e7a76 7371 ARCH(5TE);
9ee6e8bb
PB
7372 /* doubleword */
7373 if (sh & 1) {
7374 /* store */
b0109805
PB
7375 tmp = load_reg(s, rd);
7376 gen_st32(tmp, addr, IS_USER(s));
7377 tcg_gen_addi_i32(addr, addr, 4);
7378 tmp = load_reg(s, rd + 1);
7379 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7380 load = 0;
7381 } else {
7382 /* load */
b0109805
PB
7383 tmp = gen_ld32(addr, IS_USER(s));
7384 store_reg(s, rd, tmp);
7385 tcg_gen_addi_i32(addr, addr, 4);
7386 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7387 rd++;
7388 load = 1;
7389 }
7390 address_offset = -4;
7391 } else {
7392 /* store */
b0109805
PB
7393 tmp = load_reg(s, rd);
7394 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7395 load = 0;
7396 }
7397 /* Perform base writeback before the loaded value to
7398 ensure correct behavior with overlapping index registers.
7399 ldrd with base writeback is is undefined if the
7400 destination and index registers overlap. */
7401 if (!(insn & (1 << 24))) {
b0109805
PB
7402 gen_add_datah_offset(s, insn, address_offset, addr);
7403 store_reg(s, rn, addr);
9ee6e8bb
PB
7404 } else if (insn & (1 << 21)) {
7405 if (address_offset)
b0109805
PB
7406 tcg_gen_addi_i32(addr, addr, address_offset);
7407 store_reg(s, rn, addr);
7408 } else {
7d1b0095 7409 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7410 }
7411 if (load) {
7412 /* Complete the load. */
b0109805 7413 store_reg(s, rd, tmp);
9ee6e8bb
PB
7414 }
7415 }
7416 break;
7417 case 0x4:
7418 case 0x5:
7419 goto do_ldst;
7420 case 0x6:
7421 case 0x7:
7422 if (insn & (1 << 4)) {
7423 ARCH(6);
7424 /* Armv6 Media instructions. */
7425 rm = insn & 0xf;
7426 rn = (insn >> 16) & 0xf;
2c0262af 7427 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7428 rs = (insn >> 8) & 0xf;
7429 switch ((insn >> 23) & 3) {
7430 case 0: /* Parallel add/subtract. */
7431 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7432 tmp = load_reg(s, rn);
7433 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7434 sh = (insn >> 5) & 7;
7435 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7436 goto illegal_op;
6ddbc6e4 7437 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7438 tcg_temp_free_i32(tmp2);
6ddbc6e4 7439 store_reg(s, rd, tmp);
9ee6e8bb
PB
7440 break;
7441 case 1:
7442 if ((insn & 0x00700020) == 0) {
6c95676b 7443 /* Halfword pack. */
3670669c
PB
7444 tmp = load_reg(s, rn);
7445 tmp2 = load_reg(s, rm);
9ee6e8bb 7446 shift = (insn >> 7) & 0x1f;
3670669c
PB
7447 if (insn & (1 << 6)) {
7448 /* pkhtb */
22478e79
AZ
7449 if (shift == 0)
7450 shift = 31;
7451 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7452 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7453 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7454 } else {
7455 /* pkhbt */
22478e79
AZ
7456 if (shift)
7457 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7458 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7459 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7460 }
7461 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7462 tcg_temp_free_i32(tmp2);
3670669c 7463 store_reg(s, rd, tmp);
9ee6e8bb
PB
7464 } else if ((insn & 0x00200020) == 0x00200000) {
7465 /* [us]sat */
6ddbc6e4 7466 tmp = load_reg(s, rm);
9ee6e8bb
PB
7467 shift = (insn >> 7) & 0x1f;
7468 if (insn & (1 << 6)) {
7469 if (shift == 0)
7470 shift = 31;
6ddbc6e4 7471 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7472 } else {
6ddbc6e4 7473 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7474 }
7475 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7476 tmp2 = tcg_const_i32(sh);
7477 if (insn & (1 << 22))
9ef39277 7478 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7479 else
9ef39277 7480 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7481 tcg_temp_free_i32(tmp2);
6ddbc6e4 7482 store_reg(s, rd, tmp);
9ee6e8bb
PB
7483 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7484 /* [us]sat16 */
6ddbc6e4 7485 tmp = load_reg(s, rm);
9ee6e8bb 7486 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7487 tmp2 = tcg_const_i32(sh);
7488 if (insn & (1 << 22))
9ef39277 7489 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7490 else
9ef39277 7491 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7492 tcg_temp_free_i32(tmp2);
6ddbc6e4 7493 store_reg(s, rd, tmp);
9ee6e8bb
PB
7494 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7495 /* Select bytes. */
6ddbc6e4
PB
7496 tmp = load_reg(s, rn);
7497 tmp2 = load_reg(s, rm);
7d1b0095 7498 tmp3 = tcg_temp_new_i32();
0ecb72a5 7499 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7500 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7501 tcg_temp_free_i32(tmp3);
7502 tcg_temp_free_i32(tmp2);
6ddbc6e4 7503 store_reg(s, rd, tmp);
9ee6e8bb 7504 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7505 tmp = load_reg(s, rm);
9ee6e8bb 7506 shift = (insn >> 10) & 3;
1301f322 7507 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7508 rotate, a shift is sufficient. */
7509 if (shift != 0)
f669df27 7510 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7511 op1 = (insn >> 20) & 7;
7512 switch (op1) {
5e3f878a
PB
7513 case 0: gen_sxtb16(tmp); break;
7514 case 2: gen_sxtb(tmp); break;
7515 case 3: gen_sxth(tmp); break;
7516 case 4: gen_uxtb16(tmp); break;
7517 case 6: gen_uxtb(tmp); break;
7518 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7519 default: goto illegal_op;
7520 }
7521 if (rn != 15) {
5e3f878a 7522 tmp2 = load_reg(s, rn);
9ee6e8bb 7523 if ((op1 & 3) == 0) {
5e3f878a 7524 gen_add16(tmp, tmp2);
9ee6e8bb 7525 } else {
5e3f878a 7526 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7527 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7528 }
7529 }
6c95676b 7530 store_reg(s, rd, tmp);
9ee6e8bb
PB
7531 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7532 /* rev */
b0109805 7533 tmp = load_reg(s, rm);
9ee6e8bb
PB
7534 if (insn & (1 << 22)) {
7535 if (insn & (1 << 7)) {
b0109805 7536 gen_revsh(tmp);
9ee6e8bb
PB
7537 } else {
7538 ARCH(6T2);
b0109805 7539 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7540 }
7541 } else {
7542 if (insn & (1 << 7))
b0109805 7543 gen_rev16(tmp);
9ee6e8bb 7544 else
66896cb8 7545 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7546 }
b0109805 7547 store_reg(s, rd, tmp);
9ee6e8bb
PB
7548 } else {
7549 goto illegal_op;
7550 }
7551 break;
7552 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7553 switch ((insn >> 20) & 0x7) {
7554 case 5:
7555 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7556 /* op2 not 00x or 11x : UNDEF */
7557 goto illegal_op;
7558 }
838fa72d
AJ
7559 /* Signed multiply most significant [accumulate].
7560 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7561 tmp = load_reg(s, rm);
7562 tmp2 = load_reg(s, rs);
a7812ae4 7563 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7564
955a7dd5 7565 if (rd != 15) {
838fa72d 7566 tmp = load_reg(s, rd);
9ee6e8bb 7567 if (insn & (1 << 6)) {
838fa72d 7568 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7569 } else {
838fa72d 7570 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7571 }
7572 }
838fa72d
AJ
7573 if (insn & (1 << 5)) {
7574 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7575 }
7576 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7577 tmp = tcg_temp_new_i32();
838fa72d
AJ
7578 tcg_gen_trunc_i64_i32(tmp, tmp64);
7579 tcg_temp_free_i64(tmp64);
955a7dd5 7580 store_reg(s, rn, tmp);
41e9564d
PM
7581 break;
7582 case 0:
7583 case 4:
7584 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7585 if (insn & (1 << 7)) {
7586 goto illegal_op;
7587 }
7588 tmp = load_reg(s, rm);
7589 tmp2 = load_reg(s, rs);
9ee6e8bb 7590 if (insn & (1 << 5))
5e3f878a
PB
7591 gen_swap_half(tmp2);
7592 gen_smul_dual(tmp, tmp2);
5e3f878a 7593 if (insn & (1 << 6)) {
e1d177b9 7594 /* This subtraction cannot overflow. */
5e3f878a
PB
7595 tcg_gen_sub_i32(tmp, tmp, tmp2);
7596 } else {
e1d177b9
PM
7597 /* This addition cannot overflow 32 bits;
7598 * however it may overflow considered as a signed
7599 * operation, in which case we must set the Q flag.
7600 */
9ef39277 7601 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7602 }
7d1b0095 7603 tcg_temp_free_i32(tmp2);
9ee6e8bb 7604 if (insn & (1 << 22)) {
5e3f878a 7605 /* smlald, smlsld */
a7812ae4
PB
7606 tmp64 = tcg_temp_new_i64();
7607 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7608 tcg_temp_free_i32(tmp);
a7812ae4
PB
7609 gen_addq(s, tmp64, rd, rn);
7610 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7611 tcg_temp_free_i64(tmp64);
9ee6e8bb 7612 } else {
5e3f878a 7613 /* smuad, smusd, smlad, smlsd */
22478e79 7614 if (rd != 15)
9ee6e8bb 7615 {
22478e79 7616 tmp2 = load_reg(s, rd);
9ef39277 7617 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7618 tcg_temp_free_i32(tmp2);
9ee6e8bb 7619 }
22478e79 7620 store_reg(s, rn, tmp);
9ee6e8bb 7621 }
41e9564d 7622 break;
b8b8ea05
PM
7623 case 1:
7624 case 3:
7625 /* SDIV, UDIV */
7626 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7627 goto illegal_op;
7628 }
7629 if (((insn >> 5) & 7) || (rd != 15)) {
7630 goto illegal_op;
7631 }
7632 tmp = load_reg(s, rm);
7633 tmp2 = load_reg(s, rs);
7634 if (insn & (1 << 21)) {
7635 gen_helper_udiv(tmp, tmp, tmp2);
7636 } else {
7637 gen_helper_sdiv(tmp, tmp, tmp2);
7638 }
7639 tcg_temp_free_i32(tmp2);
7640 store_reg(s, rn, tmp);
7641 break;
41e9564d
PM
7642 default:
7643 goto illegal_op;
9ee6e8bb
PB
7644 }
7645 break;
7646 case 3:
7647 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7648 switch (op1) {
7649 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7650 ARCH(6);
7651 tmp = load_reg(s, rm);
7652 tmp2 = load_reg(s, rs);
7653 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7654 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7655 if (rd != 15) {
7656 tmp2 = load_reg(s, rd);
6ddbc6e4 7657 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7658 tcg_temp_free_i32(tmp2);
9ee6e8bb 7659 }
ded9d295 7660 store_reg(s, rn, tmp);
9ee6e8bb
PB
7661 break;
7662 case 0x20: case 0x24: case 0x28: case 0x2c:
7663 /* Bitfield insert/clear. */
7664 ARCH(6T2);
7665 shift = (insn >> 7) & 0x1f;
7666 i = (insn >> 16) & 0x1f;
7667 i = i + 1 - shift;
7668 if (rm == 15) {
7d1b0095 7669 tmp = tcg_temp_new_i32();
5e3f878a 7670 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7671 } else {
5e3f878a 7672 tmp = load_reg(s, rm);
9ee6e8bb
PB
7673 }
7674 if (i != 32) {
5e3f878a 7675 tmp2 = load_reg(s, rd);
d593c48e 7676 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7677 tcg_temp_free_i32(tmp2);
9ee6e8bb 7678 }
5e3f878a 7679 store_reg(s, rd, tmp);
9ee6e8bb
PB
7680 break;
7681 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7682 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7683 ARCH(6T2);
5e3f878a 7684 tmp = load_reg(s, rm);
9ee6e8bb
PB
7685 shift = (insn >> 7) & 0x1f;
7686 i = ((insn >> 16) & 0x1f) + 1;
7687 if (shift + i > 32)
7688 goto illegal_op;
7689 if (i < 32) {
7690 if (op1 & 0x20) {
5e3f878a 7691 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7692 } else {
5e3f878a 7693 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7694 }
7695 }
5e3f878a 7696 store_reg(s, rd, tmp);
9ee6e8bb
PB
7697 break;
7698 default:
7699 goto illegal_op;
7700 }
7701 break;
7702 }
7703 break;
7704 }
7705 do_ldst:
7706 /* Check for undefined extension instructions
7707 * per the ARM Bible IE:
7708 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7709 */
7710 sh = (0xf << 20) | (0xf << 4);
7711 if (op1 == 0x7 && ((insn & sh) == sh))
7712 {
7713 goto illegal_op;
7714 }
7715 /* load/store byte/word */
7716 rn = (insn >> 16) & 0xf;
7717 rd = (insn >> 12) & 0xf;
b0109805 7718 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7719 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7720 if (insn & (1 << 24))
b0109805 7721 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7722 if (insn & (1 << 20)) {
7723 /* load */
9ee6e8bb 7724 if (insn & (1 << 22)) {
b0109805 7725 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7726 } else {
b0109805 7727 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7728 }
9ee6e8bb
PB
7729 } else {
7730 /* store */
b0109805 7731 tmp = load_reg(s, rd);
9ee6e8bb 7732 if (insn & (1 << 22))
b0109805 7733 gen_st8(tmp, tmp2, i);
9ee6e8bb 7734 else
b0109805 7735 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7736 }
7737 if (!(insn & (1 << 24))) {
b0109805
PB
7738 gen_add_data_offset(s, insn, tmp2);
7739 store_reg(s, rn, tmp2);
7740 } else if (insn & (1 << 21)) {
7741 store_reg(s, rn, tmp2);
7742 } else {
7d1b0095 7743 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7744 }
7745 if (insn & (1 << 20)) {
7746 /* Complete the load. */
be5e7a76 7747 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7748 }
7749 break;
7750 case 0x08:
7751 case 0x09:
7752 {
7753 int j, n, user, loaded_base;
b0109805 7754 TCGv loaded_var;
9ee6e8bb
PB
7755 /* load/store multiple words */
7756 /* XXX: store correct base if write back */
7757 user = 0;
7758 if (insn & (1 << 22)) {
7759 if (IS_USER(s))
7760 goto illegal_op; /* only usable in supervisor mode */
7761
7762 if ((insn & (1 << 15)) == 0)
7763 user = 1;
7764 }
7765 rn = (insn >> 16) & 0xf;
b0109805 7766 addr = load_reg(s, rn);
9ee6e8bb
PB
7767
7768 /* compute total size */
7769 loaded_base = 0;
a50f5b91 7770 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7771 n = 0;
7772 for(i=0;i<16;i++) {
7773 if (insn & (1 << i))
7774 n++;
7775 }
7776 /* XXX: test invalid n == 0 case ? */
7777 if (insn & (1 << 23)) {
7778 if (insn & (1 << 24)) {
7779 /* pre increment */
b0109805 7780 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7781 } else {
7782 /* post increment */
7783 }
7784 } else {
7785 if (insn & (1 << 24)) {
7786 /* pre decrement */
b0109805 7787 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7788 } else {
7789 /* post decrement */
7790 if (n != 1)
b0109805 7791 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7792 }
7793 }
7794 j = 0;
7795 for(i=0;i<16;i++) {
7796 if (insn & (1 << i)) {
7797 if (insn & (1 << 20)) {
7798 /* load */
b0109805 7799 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7800 if (user) {
b75263d6 7801 tmp2 = tcg_const_i32(i);
1ce94f81 7802 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7803 tcg_temp_free_i32(tmp2);
7d1b0095 7804 tcg_temp_free_i32(tmp);
9ee6e8bb 7805 } else if (i == rn) {
b0109805 7806 loaded_var = tmp;
9ee6e8bb
PB
7807 loaded_base = 1;
7808 } else {
be5e7a76 7809 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7810 }
7811 } else {
7812 /* store */
7813 if (i == 15) {
7814 /* special case: r15 = PC + 8 */
7815 val = (long)s->pc + 4;
7d1b0095 7816 tmp = tcg_temp_new_i32();
b0109805 7817 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7818 } else if (user) {
7d1b0095 7819 tmp = tcg_temp_new_i32();
b75263d6 7820 tmp2 = tcg_const_i32(i);
9ef39277 7821 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7822 tcg_temp_free_i32(tmp2);
9ee6e8bb 7823 } else {
b0109805 7824 tmp = load_reg(s, i);
9ee6e8bb 7825 }
b0109805 7826 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7827 }
7828 j++;
7829 /* no need to add after the last transfer */
7830 if (j != n)
b0109805 7831 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7832 }
7833 }
7834 if (insn & (1 << 21)) {
7835 /* write back */
7836 if (insn & (1 << 23)) {
7837 if (insn & (1 << 24)) {
7838 /* pre increment */
7839 } else {
7840 /* post increment */
b0109805 7841 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7842 }
7843 } else {
7844 if (insn & (1 << 24)) {
7845 /* pre decrement */
7846 if (n != 1)
b0109805 7847 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7848 } else {
7849 /* post decrement */
b0109805 7850 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7851 }
7852 }
b0109805
PB
7853 store_reg(s, rn, addr);
7854 } else {
7d1b0095 7855 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7856 }
7857 if (loaded_base) {
b0109805 7858 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7859 }
7860 if ((insn & (1 << 22)) && !user) {
7861 /* Restore CPSR from SPSR. */
d9ba4830
PB
7862 tmp = load_cpu_field(spsr);
7863 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7864 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7865 s->is_jmp = DISAS_UPDATE;
7866 }
7867 }
7868 break;
7869 case 0xa:
7870 case 0xb:
7871 {
7872 int32_t offset;
7873
7874 /* branch (and link) */
7875 val = (int32_t)s->pc;
7876 if (insn & (1 << 24)) {
7d1b0095 7877 tmp = tcg_temp_new_i32();
5e3f878a
PB
7878 tcg_gen_movi_i32(tmp, val);
7879 store_reg(s, 14, tmp);
9ee6e8bb
PB
7880 }
7881 offset = (((int32_t)insn << 8) >> 8);
7882 val += (offset << 2) + 4;
7883 gen_jmp(s, val);
7884 }
7885 break;
7886 case 0xc:
7887 case 0xd:
7888 case 0xe:
7889 /* Coprocessor. */
7890 if (disas_coproc_insn(env, s, insn))
7891 goto illegal_op;
7892 break;
7893 case 0xf:
7894 /* swi */
5e3f878a 7895 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7896 s->is_jmp = DISAS_SWI;
7897 break;
7898 default:
7899 illegal_op:
bc4a0de0 7900 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7901 break;
7902 }
7903 }
7904}
7905
7906/* Return true if this is a Thumb-2 logical op. */
7907static int
7908thumb2_logic_op(int op)
7909{
7910 return (op < 8);
7911}
7912
7913/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7914 then set condition code flags based on the result of the operation.
7915 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7916 to the high bit of T1.
7917 Returns zero if the opcode is valid. */
7918
7919static int
396e467c 7920gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7921{
7922 int logic_cc;
7923
7924 logic_cc = 0;
7925 switch (op) {
7926 case 0: /* and */
396e467c 7927 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7928 logic_cc = conds;
7929 break;
7930 case 1: /* bic */
f669df27 7931 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7932 logic_cc = conds;
7933 break;
7934 case 2: /* orr */
396e467c 7935 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7936 logic_cc = conds;
7937 break;
7938 case 3: /* orn */
29501f1b 7939 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7940 logic_cc = conds;
7941 break;
7942 case 4: /* eor */
396e467c 7943 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7944 logic_cc = conds;
7945 break;
7946 case 8: /* add */
7947 if (conds)
72485ec4 7948 gen_add_CC(t0, t0, t1);
9ee6e8bb 7949 else
396e467c 7950 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7951 break;
7952 case 10: /* adc */
7953 if (conds)
49b4c31e 7954 gen_adc_CC(t0, t0, t1);
9ee6e8bb 7955 else
396e467c 7956 gen_adc(t0, t1);
9ee6e8bb
PB
7957 break;
7958 case 11: /* sbc */
2de68a49
RH
7959 if (conds) {
7960 gen_sbc_CC(t0, t0, t1);
7961 } else {
396e467c 7962 gen_sub_carry(t0, t0, t1);
2de68a49 7963 }
9ee6e8bb
PB
7964 break;
7965 case 13: /* sub */
7966 if (conds)
72485ec4 7967 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7968 else
396e467c 7969 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7970 break;
7971 case 14: /* rsb */
7972 if (conds)
72485ec4 7973 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7974 else
396e467c 7975 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7976 break;
7977 default: /* 5, 6, 7, 9, 12, 15. */
7978 return 1;
7979 }
7980 if (logic_cc) {
396e467c 7981 gen_logic_CC(t0);
9ee6e8bb 7982 if (shifter_out)
396e467c 7983 gen_set_CF_bit31(t1);
9ee6e8bb
PB
7984 }
7985 return 0;
7986}
7987
7988/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7989 is not legal. */
0ecb72a5 7990static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 7991{
b0109805 7992 uint32_t insn, imm, shift, offset;
9ee6e8bb 7993 uint32_t rd, rn, rm, rs;
b26eefb6 7994 TCGv tmp;
6ddbc6e4
PB
7995 TCGv tmp2;
7996 TCGv tmp3;
b0109805 7997 TCGv addr;
a7812ae4 7998 TCGv_i64 tmp64;
9ee6e8bb
PB
7999 int op;
8000 int shiftop;
8001 int conds;
8002 int logic_cc;
8003
8004 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8005 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8006 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8007 16-bit instructions to get correct prefetch abort behavior. */
8008 insn = insn_hw1;
8009 if ((insn & (1 << 12)) == 0) {
be5e7a76 8010 ARCH(5);
9ee6e8bb
PB
8011 /* Second half of blx. */
8012 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8013 tmp = load_reg(s, 14);
8014 tcg_gen_addi_i32(tmp, tmp, offset);
8015 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8016
7d1b0095 8017 tmp2 = tcg_temp_new_i32();
b0109805 8018 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8019 store_reg(s, 14, tmp2);
8020 gen_bx(s, tmp);
9ee6e8bb
PB
8021 return 0;
8022 }
8023 if (insn & (1 << 11)) {
8024 /* Second half of bl. */
8025 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8026 tmp = load_reg(s, 14);
6a0d8a1d 8027 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8028
7d1b0095 8029 tmp2 = tcg_temp_new_i32();
b0109805 8030 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8031 store_reg(s, 14, tmp2);
8032 gen_bx(s, tmp);
9ee6e8bb
PB
8033 return 0;
8034 }
8035 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8036 /* Instruction spans a page boundary. Implement it as two
8037 16-bit instructions in case the second half causes an
8038 prefetch abort. */
8039 offset = ((int32_t)insn << 21) >> 9;
396e467c 8040 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8041 return 0;
8042 }
8043 /* Fall through to 32-bit decode. */
8044 }
8045
d31dd73e 8046 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8047 s->pc += 2;
8048 insn |= (uint32_t)insn_hw1 << 16;
8049
8050 if ((insn & 0xf800e800) != 0xf000e800) {
8051 ARCH(6T2);
8052 }
8053
8054 rn = (insn >> 16) & 0xf;
8055 rs = (insn >> 12) & 0xf;
8056 rd = (insn >> 8) & 0xf;
8057 rm = insn & 0xf;
8058 switch ((insn >> 25) & 0xf) {
8059 case 0: case 1: case 2: case 3:
8060 /* 16-bit instructions. Should never happen. */
8061 abort();
8062 case 4:
8063 if (insn & (1 << 22)) {
8064 /* Other load/store, table branch. */
8065 if (insn & 0x01200000) {
8066 /* Load/store doubleword. */
8067 if (rn == 15) {
7d1b0095 8068 addr = tcg_temp_new_i32();
b0109805 8069 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8070 } else {
b0109805 8071 addr = load_reg(s, rn);
9ee6e8bb
PB
8072 }
8073 offset = (insn & 0xff) * 4;
8074 if ((insn & (1 << 23)) == 0)
8075 offset = -offset;
8076 if (insn & (1 << 24)) {
b0109805 8077 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8078 offset = 0;
8079 }
8080 if (insn & (1 << 20)) {
8081 /* ldrd */
b0109805
PB
8082 tmp = gen_ld32(addr, IS_USER(s));
8083 store_reg(s, rs, tmp);
8084 tcg_gen_addi_i32(addr, addr, 4);
8085 tmp = gen_ld32(addr, IS_USER(s));
8086 store_reg(s, rd, tmp);
9ee6e8bb
PB
8087 } else {
8088 /* strd */
b0109805
PB
8089 tmp = load_reg(s, rs);
8090 gen_st32(tmp, addr, IS_USER(s));
8091 tcg_gen_addi_i32(addr, addr, 4);
8092 tmp = load_reg(s, rd);
8093 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8094 }
8095 if (insn & (1 << 21)) {
8096 /* Base writeback. */
8097 if (rn == 15)
8098 goto illegal_op;
b0109805
PB
8099 tcg_gen_addi_i32(addr, addr, offset - 4);
8100 store_reg(s, rn, addr);
8101 } else {
7d1b0095 8102 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8103 }
8104 } else if ((insn & (1 << 23)) == 0) {
8105 /* Load/store exclusive word. */
3174f8e9 8106 addr = tcg_temp_local_new();
98a46317 8107 load_reg_var(s, addr, rn);
426f5abc 8108 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8109 if (insn & (1 << 20)) {
426f5abc 8110 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8111 } else {
426f5abc 8112 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8113 }
3174f8e9 8114 tcg_temp_free(addr);
9ee6e8bb
PB
8115 } else if ((insn & (1 << 6)) == 0) {
8116 /* Table Branch. */
8117 if (rn == 15) {
7d1b0095 8118 addr = tcg_temp_new_i32();
b0109805 8119 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8120 } else {
b0109805 8121 addr = load_reg(s, rn);
9ee6e8bb 8122 }
b26eefb6 8123 tmp = load_reg(s, rm);
b0109805 8124 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8125 if (insn & (1 << 4)) {
8126 /* tbh */
b0109805 8127 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8128 tcg_temp_free_i32(tmp);
b0109805 8129 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8130 } else { /* tbb */
7d1b0095 8131 tcg_temp_free_i32(tmp);
b0109805 8132 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8133 }
7d1b0095 8134 tcg_temp_free_i32(addr);
b0109805
PB
8135 tcg_gen_shli_i32(tmp, tmp, 1);
8136 tcg_gen_addi_i32(tmp, tmp, s->pc);
8137 store_reg(s, 15, tmp);
9ee6e8bb
PB
8138 } else {
8139 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8140 ARCH(7);
9ee6e8bb 8141 op = (insn >> 4) & 0x3;
426f5abc
PB
8142 if (op == 2) {
8143 goto illegal_op;
8144 }
3174f8e9 8145 addr = tcg_temp_local_new();
98a46317 8146 load_reg_var(s, addr, rn);
9ee6e8bb 8147 if (insn & (1 << 20)) {
426f5abc 8148 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8149 } else {
426f5abc 8150 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8151 }
3174f8e9 8152 tcg_temp_free(addr);
9ee6e8bb
PB
8153 }
8154 } else {
8155 /* Load/store multiple, RFE, SRS. */
8156 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8157 /* Not available in user mode. */
b0109805 8158 if (IS_USER(s))
9ee6e8bb
PB
8159 goto illegal_op;
8160 if (insn & (1 << 20)) {
8161 /* rfe */
b0109805
PB
8162 addr = load_reg(s, rn);
8163 if ((insn & (1 << 24)) == 0)
8164 tcg_gen_addi_i32(addr, addr, -8);
8165 /* Load PC into tmp and CPSR into tmp2. */
8166 tmp = gen_ld32(addr, 0);
8167 tcg_gen_addi_i32(addr, addr, 4);
8168 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8169 if (insn & (1 << 21)) {
8170 /* Base writeback. */
b0109805
PB
8171 if (insn & (1 << 24)) {
8172 tcg_gen_addi_i32(addr, addr, 4);
8173 } else {
8174 tcg_gen_addi_i32(addr, addr, -4);
8175 }
8176 store_reg(s, rn, addr);
8177 } else {
7d1b0095 8178 tcg_temp_free_i32(addr);
9ee6e8bb 8179 }
b0109805 8180 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8181 } else {
8182 /* srs */
8183 op = (insn & 0x1f);
7d1b0095 8184 addr = tcg_temp_new_i32();
39ea3d4e
PM
8185 tmp = tcg_const_i32(op);
8186 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8187 tcg_temp_free_i32(tmp);
9ee6e8bb 8188 if ((insn & (1 << 24)) == 0) {
b0109805 8189 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8190 }
b0109805
PB
8191 tmp = load_reg(s, 14);
8192 gen_st32(tmp, addr, 0);
8193 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8194 tmp = tcg_temp_new_i32();
9ef39277 8195 gen_helper_cpsr_read(tmp, cpu_env);
b0109805 8196 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8197 if (insn & (1 << 21)) {
8198 if ((insn & (1 << 24)) == 0) {
b0109805 8199 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8200 } else {
b0109805 8201 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8202 }
39ea3d4e
PM
8203 tmp = tcg_const_i32(op);
8204 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8205 tcg_temp_free_i32(tmp);
b0109805 8206 } else {
7d1b0095 8207 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8208 }
8209 }
8210 } else {
5856d44e
YO
8211 int i, loaded_base = 0;
8212 TCGv loaded_var;
9ee6e8bb 8213 /* Load/store multiple. */
b0109805 8214 addr = load_reg(s, rn);
9ee6e8bb
PB
8215 offset = 0;
8216 for (i = 0; i < 16; i++) {
8217 if (insn & (1 << i))
8218 offset += 4;
8219 }
8220 if (insn & (1 << 24)) {
b0109805 8221 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8222 }
8223
5856d44e 8224 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8225 for (i = 0; i < 16; i++) {
8226 if ((insn & (1 << i)) == 0)
8227 continue;
8228 if (insn & (1 << 20)) {
8229 /* Load. */
b0109805 8230 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8231 if (i == 15) {
b0109805 8232 gen_bx(s, tmp);
5856d44e
YO
8233 } else if (i == rn) {
8234 loaded_var = tmp;
8235 loaded_base = 1;
9ee6e8bb 8236 } else {
b0109805 8237 store_reg(s, i, tmp);
9ee6e8bb
PB
8238 }
8239 } else {
8240 /* Store. */
b0109805
PB
8241 tmp = load_reg(s, i);
8242 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8243 }
b0109805 8244 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8245 }
5856d44e
YO
8246 if (loaded_base) {
8247 store_reg(s, rn, loaded_var);
8248 }
9ee6e8bb
PB
8249 if (insn & (1 << 21)) {
8250 /* Base register writeback. */
8251 if (insn & (1 << 24)) {
b0109805 8252 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8253 }
8254 /* Fault if writeback register is in register list. */
8255 if (insn & (1 << rn))
8256 goto illegal_op;
b0109805
PB
8257 store_reg(s, rn, addr);
8258 } else {
7d1b0095 8259 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8260 }
8261 }
8262 }
8263 break;
2af9ab77
JB
8264 case 5:
8265
9ee6e8bb 8266 op = (insn >> 21) & 0xf;
2af9ab77
JB
8267 if (op == 6) {
8268 /* Halfword pack. */
8269 tmp = load_reg(s, rn);
8270 tmp2 = load_reg(s, rm);
8271 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8272 if (insn & (1 << 5)) {
8273 /* pkhtb */
8274 if (shift == 0)
8275 shift = 31;
8276 tcg_gen_sari_i32(tmp2, tmp2, shift);
8277 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8278 tcg_gen_ext16u_i32(tmp2, tmp2);
8279 } else {
8280 /* pkhbt */
8281 if (shift)
8282 tcg_gen_shli_i32(tmp2, tmp2, shift);
8283 tcg_gen_ext16u_i32(tmp, tmp);
8284 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8285 }
8286 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8287 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8288 store_reg(s, rd, tmp);
8289 } else {
2af9ab77
JB
8290 /* Data processing register constant shift. */
8291 if (rn == 15) {
7d1b0095 8292 tmp = tcg_temp_new_i32();
2af9ab77
JB
8293 tcg_gen_movi_i32(tmp, 0);
8294 } else {
8295 tmp = load_reg(s, rn);
8296 }
8297 tmp2 = load_reg(s, rm);
8298
8299 shiftop = (insn >> 4) & 3;
8300 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8301 conds = (insn & (1 << 20)) != 0;
8302 logic_cc = (conds && thumb2_logic_op(op));
8303 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8304 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8305 goto illegal_op;
7d1b0095 8306 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8307 if (rd != 15) {
8308 store_reg(s, rd, tmp);
8309 } else {
7d1b0095 8310 tcg_temp_free_i32(tmp);
2af9ab77 8311 }
3174f8e9 8312 }
9ee6e8bb
PB
8313 break;
8314 case 13: /* Misc data processing. */
8315 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8316 if (op < 4 && (insn & 0xf000) != 0xf000)
8317 goto illegal_op;
8318 switch (op) {
8319 case 0: /* Register controlled shift. */
8984bd2e
PB
8320 tmp = load_reg(s, rn);
8321 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8322 if ((insn & 0x70) != 0)
8323 goto illegal_op;
8324 op = (insn >> 21) & 3;
8984bd2e
PB
8325 logic_cc = (insn & (1 << 20)) != 0;
8326 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8327 if (logic_cc)
8328 gen_logic_CC(tmp);
21aeb343 8329 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8330 break;
8331 case 1: /* Sign/zero extend. */
5e3f878a 8332 tmp = load_reg(s, rm);
9ee6e8bb 8333 shift = (insn >> 4) & 3;
1301f322 8334 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8335 rotate, a shift is sufficient. */
8336 if (shift != 0)
f669df27 8337 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8338 op = (insn >> 20) & 7;
8339 switch (op) {
5e3f878a
PB
8340 case 0: gen_sxth(tmp); break;
8341 case 1: gen_uxth(tmp); break;
8342 case 2: gen_sxtb16(tmp); break;
8343 case 3: gen_uxtb16(tmp); break;
8344 case 4: gen_sxtb(tmp); break;
8345 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8346 default: goto illegal_op;
8347 }
8348 if (rn != 15) {
5e3f878a 8349 tmp2 = load_reg(s, rn);
9ee6e8bb 8350 if ((op >> 1) == 1) {
5e3f878a 8351 gen_add16(tmp, tmp2);
9ee6e8bb 8352 } else {
5e3f878a 8353 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8354 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8355 }
8356 }
5e3f878a 8357 store_reg(s, rd, tmp);
9ee6e8bb
PB
8358 break;
8359 case 2: /* SIMD add/subtract. */
8360 op = (insn >> 20) & 7;
8361 shift = (insn >> 4) & 7;
8362 if ((op & 3) == 3 || (shift & 3) == 3)
8363 goto illegal_op;
6ddbc6e4
PB
8364 tmp = load_reg(s, rn);
8365 tmp2 = load_reg(s, rm);
8366 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8367 tcg_temp_free_i32(tmp2);
6ddbc6e4 8368 store_reg(s, rd, tmp);
9ee6e8bb
PB
8369 break;
8370 case 3: /* Other data processing. */
8371 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8372 if (op < 4) {
8373 /* Saturating add/subtract. */
d9ba4830
PB
8374 tmp = load_reg(s, rn);
8375 tmp2 = load_reg(s, rm);
9ee6e8bb 8376 if (op & 1)
9ef39277 8377 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8378 if (op & 2)
9ef39277 8379 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8380 else
9ef39277 8381 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8382 tcg_temp_free_i32(tmp2);
9ee6e8bb 8383 } else {
d9ba4830 8384 tmp = load_reg(s, rn);
9ee6e8bb
PB
8385 switch (op) {
8386 case 0x0a: /* rbit */
d9ba4830 8387 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8388 break;
8389 case 0x08: /* rev */
66896cb8 8390 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8391 break;
8392 case 0x09: /* rev16 */
d9ba4830 8393 gen_rev16(tmp);
9ee6e8bb
PB
8394 break;
8395 case 0x0b: /* revsh */
d9ba4830 8396 gen_revsh(tmp);
9ee6e8bb
PB
8397 break;
8398 case 0x10: /* sel */
d9ba4830 8399 tmp2 = load_reg(s, rm);
7d1b0095 8400 tmp3 = tcg_temp_new_i32();
0ecb72a5 8401 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8402 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8403 tcg_temp_free_i32(tmp3);
8404 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8405 break;
8406 case 0x18: /* clz */
d9ba4830 8407 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8408 break;
8409 default:
8410 goto illegal_op;
8411 }
8412 }
d9ba4830 8413 store_reg(s, rd, tmp);
9ee6e8bb
PB
8414 break;
8415 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8416 op = (insn >> 4) & 0xf;
d9ba4830
PB
8417 tmp = load_reg(s, rn);
8418 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8419 switch ((insn >> 20) & 7) {
8420 case 0: /* 32 x 32 -> 32 */
d9ba4830 8421 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8422 tcg_temp_free_i32(tmp2);
9ee6e8bb 8423 if (rs != 15) {
d9ba4830 8424 tmp2 = load_reg(s, rs);
9ee6e8bb 8425 if (op)
d9ba4830 8426 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8427 else
d9ba4830 8428 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8429 tcg_temp_free_i32(tmp2);
9ee6e8bb 8430 }
9ee6e8bb
PB
8431 break;
8432 case 1: /* 16 x 16 -> 32 */
d9ba4830 8433 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8434 tcg_temp_free_i32(tmp2);
9ee6e8bb 8435 if (rs != 15) {
d9ba4830 8436 tmp2 = load_reg(s, rs);
9ef39277 8437 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8438 tcg_temp_free_i32(tmp2);
9ee6e8bb 8439 }
9ee6e8bb
PB
8440 break;
8441 case 2: /* Dual multiply add. */
8442 case 4: /* Dual multiply subtract. */
8443 if (op)
d9ba4830
PB
8444 gen_swap_half(tmp2);
8445 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8446 if (insn & (1 << 22)) {
e1d177b9 8447 /* This subtraction cannot overflow. */
d9ba4830 8448 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8449 } else {
e1d177b9
PM
8450 /* This addition cannot overflow 32 bits;
8451 * however it may overflow considered as a signed
8452 * operation, in which case we must set the Q flag.
8453 */
9ef39277 8454 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8455 }
7d1b0095 8456 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8457 if (rs != 15)
8458 {
d9ba4830 8459 tmp2 = load_reg(s, rs);
9ef39277 8460 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8461 tcg_temp_free_i32(tmp2);
9ee6e8bb 8462 }
9ee6e8bb
PB
8463 break;
8464 case 3: /* 32 * 16 -> 32msb */
8465 if (op)
d9ba4830 8466 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8467 else
d9ba4830 8468 gen_sxth(tmp2);
a7812ae4
PB
8469 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8470 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8471 tmp = tcg_temp_new_i32();
a7812ae4 8472 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8473 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8474 if (rs != 15)
8475 {
d9ba4830 8476 tmp2 = load_reg(s, rs);
9ef39277 8477 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8478 tcg_temp_free_i32(tmp2);
9ee6e8bb 8479 }
9ee6e8bb 8480 break;
838fa72d
AJ
8481 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8482 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8483 if (rs != 15) {
838fa72d
AJ
8484 tmp = load_reg(s, rs);
8485 if (insn & (1 << 20)) {
8486 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8487 } else {
838fa72d 8488 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8489 }
2c0262af 8490 }
838fa72d
AJ
8491 if (insn & (1 << 4)) {
8492 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8493 }
8494 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8495 tmp = tcg_temp_new_i32();
838fa72d
AJ
8496 tcg_gen_trunc_i64_i32(tmp, tmp64);
8497 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8498 break;
8499 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8500 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8501 tcg_temp_free_i32(tmp2);
9ee6e8bb 8502 if (rs != 15) {
d9ba4830
PB
8503 tmp2 = load_reg(s, rs);
8504 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8505 tcg_temp_free_i32(tmp2);
5fd46862 8506 }
9ee6e8bb 8507 break;
2c0262af 8508 }
d9ba4830 8509 store_reg(s, rd, tmp);
2c0262af 8510 break;
9ee6e8bb
PB
8511 case 6: case 7: /* 64-bit multiply, Divide. */
8512 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8513 tmp = load_reg(s, rn);
8514 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8515 if ((op & 0x50) == 0x10) {
8516 /* sdiv, udiv */
47789990 8517 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8518 goto illegal_op;
47789990 8519 }
9ee6e8bb 8520 if (op & 0x20)
5e3f878a 8521 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8522 else
5e3f878a 8523 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8524 tcg_temp_free_i32(tmp2);
5e3f878a 8525 store_reg(s, rd, tmp);
9ee6e8bb
PB
8526 } else if ((op & 0xe) == 0xc) {
8527 /* Dual multiply accumulate long. */
8528 if (op & 1)
5e3f878a
PB
8529 gen_swap_half(tmp2);
8530 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8531 if (op & 0x10) {
5e3f878a 8532 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8533 } else {
5e3f878a 8534 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8535 }
7d1b0095 8536 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8537 /* BUGFIX */
8538 tmp64 = tcg_temp_new_i64();
8539 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8540 tcg_temp_free_i32(tmp);
a7812ae4
PB
8541 gen_addq(s, tmp64, rs, rd);
8542 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8543 tcg_temp_free_i64(tmp64);
2c0262af 8544 } else {
9ee6e8bb
PB
8545 if (op & 0x20) {
8546 /* Unsigned 64-bit multiply */
a7812ae4 8547 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8548 } else {
9ee6e8bb
PB
8549 if (op & 8) {
8550 /* smlalxy */
5e3f878a 8551 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8552 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8553 tmp64 = tcg_temp_new_i64();
8554 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8555 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8556 } else {
8557 /* Signed 64-bit multiply */
a7812ae4 8558 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8559 }
b5ff1b31 8560 }
9ee6e8bb
PB
8561 if (op & 4) {
8562 /* umaal */
a7812ae4
PB
8563 gen_addq_lo(s, tmp64, rs);
8564 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8565 } else if (op & 0x40) {
8566 /* 64-bit accumulate. */
a7812ae4 8567 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8568 }
a7812ae4 8569 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8570 tcg_temp_free_i64(tmp64);
5fd46862 8571 }
2c0262af 8572 break;
9ee6e8bb
PB
8573 }
8574 break;
8575 case 6: case 7: case 14: case 15:
8576 /* Coprocessor. */
8577 if (((insn >> 24) & 3) == 3) {
8578 /* Translate into the equivalent ARM encoding. */
f06053e3 8579 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8580 if (disas_neon_data_insn(env, s, insn))
8581 goto illegal_op;
8582 } else {
8583 if (insn & (1 << 28))
8584 goto illegal_op;
8585 if (disas_coproc_insn (env, s, insn))
8586 goto illegal_op;
8587 }
8588 break;
8589 case 8: case 9: case 10: case 11:
8590 if (insn & (1 << 15)) {
8591 /* Branches, misc control. */
8592 if (insn & 0x5000) {
8593 /* Unconditional branch. */
8594 /* signextend(hw1[10:0]) -> offset[:12]. */
8595 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8596 /* hw1[10:0] -> offset[11:1]. */
8597 offset |= (insn & 0x7ff) << 1;
8598 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8599 offset[24:22] already have the same value because of the
8600 sign extension above. */
8601 offset ^= ((~insn) & (1 << 13)) << 10;
8602 offset ^= ((~insn) & (1 << 11)) << 11;
8603
9ee6e8bb
PB
8604 if (insn & (1 << 14)) {
8605 /* Branch and link. */
3174f8e9 8606 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8607 }
3b46e624 8608
b0109805 8609 offset += s->pc;
9ee6e8bb
PB
8610 if (insn & (1 << 12)) {
8611 /* b/bl */
b0109805 8612 gen_jmp(s, offset);
9ee6e8bb
PB
8613 } else {
8614 /* blx */
b0109805 8615 offset &= ~(uint32_t)2;
be5e7a76 8616 /* thumb2 bx, no need to check */
b0109805 8617 gen_bx_im(s, offset);
2c0262af 8618 }
9ee6e8bb
PB
8619 } else if (((insn >> 23) & 7) == 7) {
8620 /* Misc control */
8621 if (insn & (1 << 13))
8622 goto illegal_op;
8623
8624 if (insn & (1 << 26)) {
8625 /* Secure monitor call (v6Z) */
8626 goto illegal_op; /* not implemented. */
2c0262af 8627 } else {
9ee6e8bb
PB
8628 op = (insn >> 20) & 7;
8629 switch (op) {
8630 case 0: /* msr cpsr. */
8631 if (IS_M(env)) {
8984bd2e
PB
8632 tmp = load_reg(s, rn);
8633 addr = tcg_const_i32(insn & 0xff);
8634 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8635 tcg_temp_free_i32(addr);
7d1b0095 8636 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8637 gen_lookup_tb(s);
8638 break;
8639 }
8640 /* fall through */
8641 case 1: /* msr spsr. */
8642 if (IS_M(env))
8643 goto illegal_op;
2fbac54b
FN
8644 tmp = load_reg(s, rn);
8645 if (gen_set_psr(s,
9ee6e8bb 8646 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8647 op == 1, tmp))
9ee6e8bb
PB
8648 goto illegal_op;
8649 break;
8650 case 2: /* cps, nop-hint. */
8651 if (((insn >> 8) & 7) == 0) {
8652 gen_nop_hint(s, insn & 0xff);
8653 }
8654 /* Implemented as NOP in user mode. */
8655 if (IS_USER(s))
8656 break;
8657 offset = 0;
8658 imm = 0;
8659 if (insn & (1 << 10)) {
8660 if (insn & (1 << 7))
8661 offset |= CPSR_A;
8662 if (insn & (1 << 6))
8663 offset |= CPSR_I;
8664 if (insn & (1 << 5))
8665 offset |= CPSR_F;
8666 if (insn & (1 << 9))
8667 imm = CPSR_A | CPSR_I | CPSR_F;
8668 }
8669 if (insn & (1 << 8)) {
8670 offset |= 0x1f;
8671 imm |= (insn & 0x1f);
8672 }
8673 if (offset) {
2fbac54b 8674 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8675 }
8676 break;
8677 case 3: /* Special control operations. */
426f5abc 8678 ARCH(7);
9ee6e8bb
PB
8679 op = (insn >> 4) & 0xf;
8680 switch (op) {
8681 case 2: /* clrex */
426f5abc 8682 gen_clrex(s);
9ee6e8bb
PB
8683 break;
8684 case 4: /* dsb */
8685 case 5: /* dmb */
8686 case 6: /* isb */
8687 /* These execute as NOPs. */
9ee6e8bb
PB
8688 break;
8689 default:
8690 goto illegal_op;
8691 }
8692 break;
8693 case 4: /* bxj */
8694 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8695 tmp = load_reg(s, rn);
8696 gen_bx(s, tmp);
9ee6e8bb
PB
8697 break;
8698 case 5: /* Exception return. */
b8b45b68
RV
8699 if (IS_USER(s)) {
8700 goto illegal_op;
8701 }
8702 if (rn != 14 || rd != 15) {
8703 goto illegal_op;
8704 }
8705 tmp = load_reg(s, rn);
8706 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8707 gen_exception_return(s, tmp);
8708 break;
9ee6e8bb 8709 case 6: /* mrs cpsr. */
7d1b0095 8710 tmp = tcg_temp_new_i32();
9ee6e8bb 8711 if (IS_M(env)) {
8984bd2e
PB
8712 addr = tcg_const_i32(insn & 0xff);
8713 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8714 tcg_temp_free_i32(addr);
9ee6e8bb 8715 } else {
9ef39277 8716 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8717 }
8984bd2e 8718 store_reg(s, rd, tmp);
9ee6e8bb
PB
8719 break;
8720 case 7: /* mrs spsr. */
8721 /* Not accessible in user mode. */
8722 if (IS_USER(s) || IS_M(env))
8723 goto illegal_op;
d9ba4830
PB
8724 tmp = load_cpu_field(spsr);
8725 store_reg(s, rd, tmp);
9ee6e8bb 8726 break;
2c0262af
FB
8727 }
8728 }
9ee6e8bb
PB
8729 } else {
8730 /* Conditional branch. */
8731 op = (insn >> 22) & 0xf;
8732 /* Generate a conditional jump to next instruction. */
8733 s->condlabel = gen_new_label();
d9ba4830 8734 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8735 s->condjmp = 1;
8736
8737 /* offset[11:1] = insn[10:0] */
8738 offset = (insn & 0x7ff) << 1;
8739 /* offset[17:12] = insn[21:16]. */
8740 offset |= (insn & 0x003f0000) >> 4;
8741 /* offset[31:20] = insn[26]. */
8742 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8743 /* offset[18] = insn[13]. */
8744 offset |= (insn & (1 << 13)) << 5;
8745 /* offset[19] = insn[11]. */
8746 offset |= (insn & (1 << 11)) << 8;
8747
8748 /* jump to the offset */
b0109805 8749 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8750 }
8751 } else {
8752 /* Data processing immediate. */
8753 if (insn & (1 << 25)) {
8754 if (insn & (1 << 24)) {
8755 if (insn & (1 << 20))
8756 goto illegal_op;
8757 /* Bitfield/Saturate. */
8758 op = (insn >> 21) & 7;
8759 imm = insn & 0x1f;
8760 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8761 if (rn == 15) {
7d1b0095 8762 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8763 tcg_gen_movi_i32(tmp, 0);
8764 } else {
8765 tmp = load_reg(s, rn);
8766 }
9ee6e8bb
PB
8767 switch (op) {
8768 case 2: /* Signed bitfield extract. */
8769 imm++;
8770 if (shift + imm > 32)
8771 goto illegal_op;
8772 if (imm < 32)
6ddbc6e4 8773 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8774 break;
8775 case 6: /* Unsigned bitfield extract. */
8776 imm++;
8777 if (shift + imm > 32)
8778 goto illegal_op;
8779 if (imm < 32)
6ddbc6e4 8780 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8781 break;
8782 case 3: /* Bitfield insert/clear. */
8783 if (imm < shift)
8784 goto illegal_op;
8785 imm = imm + 1 - shift;
8786 if (imm != 32) {
6ddbc6e4 8787 tmp2 = load_reg(s, rd);
d593c48e 8788 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8789 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8790 }
8791 break;
8792 case 7:
8793 goto illegal_op;
8794 default: /* Saturate. */
9ee6e8bb
PB
8795 if (shift) {
8796 if (op & 1)
6ddbc6e4 8797 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8798 else
6ddbc6e4 8799 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8800 }
6ddbc6e4 8801 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8802 if (op & 4) {
8803 /* Unsigned. */
9ee6e8bb 8804 if ((op & 1) && shift == 0)
9ef39277 8805 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8806 else
9ef39277 8807 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8808 } else {
9ee6e8bb 8809 /* Signed. */
9ee6e8bb 8810 if ((op & 1) && shift == 0)
9ef39277 8811 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8812 else
9ef39277 8813 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8814 }
b75263d6 8815 tcg_temp_free_i32(tmp2);
9ee6e8bb 8816 break;
2c0262af 8817 }
6ddbc6e4 8818 store_reg(s, rd, tmp);
9ee6e8bb
PB
8819 } else {
8820 imm = ((insn & 0x04000000) >> 15)
8821 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8822 if (insn & (1 << 22)) {
8823 /* 16-bit immediate. */
8824 imm |= (insn >> 4) & 0xf000;
8825 if (insn & (1 << 23)) {
8826 /* movt */
5e3f878a 8827 tmp = load_reg(s, rd);
86831435 8828 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8829 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8830 } else {
9ee6e8bb 8831 /* movw */
7d1b0095 8832 tmp = tcg_temp_new_i32();
5e3f878a 8833 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8834 }
8835 } else {
9ee6e8bb
PB
8836 /* Add/sub 12-bit immediate. */
8837 if (rn == 15) {
b0109805 8838 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8839 if (insn & (1 << 23))
b0109805 8840 offset -= imm;
9ee6e8bb 8841 else
b0109805 8842 offset += imm;
7d1b0095 8843 tmp = tcg_temp_new_i32();
5e3f878a 8844 tcg_gen_movi_i32(tmp, offset);
2c0262af 8845 } else {
5e3f878a 8846 tmp = load_reg(s, rn);
9ee6e8bb 8847 if (insn & (1 << 23))
5e3f878a 8848 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8849 else
5e3f878a 8850 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8851 }
9ee6e8bb 8852 }
5e3f878a 8853 store_reg(s, rd, tmp);
191abaa2 8854 }
9ee6e8bb
PB
8855 } else {
8856 int shifter_out = 0;
8857 /* modified 12-bit immediate. */
8858 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8859 imm = (insn & 0xff);
8860 switch (shift) {
8861 case 0: /* XY */
8862 /* Nothing to do. */
8863 break;
8864 case 1: /* 00XY00XY */
8865 imm |= imm << 16;
8866 break;
8867 case 2: /* XY00XY00 */
8868 imm |= imm << 16;
8869 imm <<= 8;
8870 break;
8871 case 3: /* XYXYXYXY */
8872 imm |= imm << 16;
8873 imm |= imm << 8;
8874 break;
8875 default: /* Rotated constant. */
8876 shift = (shift << 1) | (imm >> 7);
8877 imm |= 0x80;
8878 imm = imm << (32 - shift);
8879 shifter_out = 1;
8880 break;
b5ff1b31 8881 }
7d1b0095 8882 tmp2 = tcg_temp_new_i32();
3174f8e9 8883 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8884 rn = (insn >> 16) & 0xf;
3174f8e9 8885 if (rn == 15) {
7d1b0095 8886 tmp = tcg_temp_new_i32();
3174f8e9
FN
8887 tcg_gen_movi_i32(tmp, 0);
8888 } else {
8889 tmp = load_reg(s, rn);
8890 }
9ee6e8bb
PB
8891 op = (insn >> 21) & 0xf;
8892 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8893 shifter_out, tmp, tmp2))
9ee6e8bb 8894 goto illegal_op;
7d1b0095 8895 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8896 rd = (insn >> 8) & 0xf;
8897 if (rd != 15) {
3174f8e9
FN
8898 store_reg(s, rd, tmp);
8899 } else {
7d1b0095 8900 tcg_temp_free_i32(tmp);
2c0262af 8901 }
2c0262af 8902 }
9ee6e8bb
PB
8903 }
8904 break;
8905 case 12: /* Load/store single data item. */
8906 {
8907 int postinc = 0;
8908 int writeback = 0;
b0109805 8909 int user;
9ee6e8bb
PB
8910 if ((insn & 0x01100000) == 0x01000000) {
8911 if (disas_neon_ls_insn(env, s, insn))
c1713132 8912 goto illegal_op;
9ee6e8bb
PB
8913 break;
8914 }
a2fdc890
PM
8915 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8916 if (rs == 15) {
8917 if (!(insn & (1 << 20))) {
8918 goto illegal_op;
8919 }
8920 if (op != 2) {
8921 /* Byte or halfword load space with dest == r15 : memory hints.
8922 * Catch them early so we don't emit pointless addressing code.
8923 * This space is a mix of:
8924 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8925 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8926 * cores)
8927 * unallocated hints, which must be treated as NOPs
8928 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8929 * which is easiest for the decoding logic
8930 * Some space which must UNDEF
8931 */
8932 int op1 = (insn >> 23) & 3;
8933 int op2 = (insn >> 6) & 0x3f;
8934 if (op & 2) {
8935 goto illegal_op;
8936 }
8937 if (rn == 15) {
02afbf64
PM
8938 /* UNPREDICTABLE, unallocated hint or
8939 * PLD/PLDW/PLI (literal)
8940 */
a2fdc890
PM
8941 return 0;
8942 }
8943 if (op1 & 1) {
02afbf64 8944 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8945 }
8946 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8947 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8948 }
8949 /* UNDEF space, or an UNPREDICTABLE */
8950 return 1;
8951 }
8952 }
b0109805 8953 user = IS_USER(s);
9ee6e8bb 8954 if (rn == 15) {
7d1b0095 8955 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8956 /* PC relative. */
8957 /* s->pc has already been incremented by 4. */
8958 imm = s->pc & 0xfffffffc;
8959 if (insn & (1 << 23))
8960 imm += insn & 0xfff;
8961 else
8962 imm -= insn & 0xfff;
b0109805 8963 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8964 } else {
b0109805 8965 addr = load_reg(s, rn);
9ee6e8bb
PB
8966 if (insn & (1 << 23)) {
8967 /* Positive offset. */
8968 imm = insn & 0xfff;
b0109805 8969 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8970 } else {
9ee6e8bb 8971 imm = insn & 0xff;
2a0308c5
PM
8972 switch ((insn >> 8) & 0xf) {
8973 case 0x0: /* Shifted Register. */
9ee6e8bb 8974 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8975 if (shift > 3) {
8976 tcg_temp_free_i32(addr);
18c9b560 8977 goto illegal_op;
2a0308c5 8978 }
b26eefb6 8979 tmp = load_reg(s, rm);
9ee6e8bb 8980 if (shift)
b26eefb6 8981 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 8982 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8983 tcg_temp_free_i32(tmp);
9ee6e8bb 8984 break;
2a0308c5 8985 case 0xc: /* Negative offset. */
b0109805 8986 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 8987 break;
2a0308c5 8988 case 0xe: /* User privilege. */
b0109805
PB
8989 tcg_gen_addi_i32(addr, addr, imm);
8990 user = 1;
9ee6e8bb 8991 break;
2a0308c5 8992 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
8993 imm = -imm;
8994 /* Fall through. */
2a0308c5 8995 case 0xb: /* Post-increment. */
9ee6e8bb
PB
8996 postinc = 1;
8997 writeback = 1;
8998 break;
2a0308c5 8999 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9000 imm = -imm;
9001 /* Fall through. */
2a0308c5 9002 case 0xf: /* Pre-increment. */
b0109805 9003 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9004 writeback = 1;
9005 break;
9006 default:
2a0308c5 9007 tcg_temp_free_i32(addr);
b7bcbe95 9008 goto illegal_op;
9ee6e8bb
PB
9009 }
9010 }
9011 }
9ee6e8bb
PB
9012 if (insn & (1 << 20)) {
9013 /* Load. */
a2fdc890
PM
9014 switch (op) {
9015 case 0: tmp = gen_ld8u(addr, user); break;
9016 case 4: tmp = gen_ld8s(addr, user); break;
9017 case 1: tmp = gen_ld16u(addr, user); break;
9018 case 5: tmp = gen_ld16s(addr, user); break;
9019 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
9020 default:
9021 tcg_temp_free_i32(addr);
9022 goto illegal_op;
a2fdc890
PM
9023 }
9024 if (rs == 15) {
9025 gen_bx(s, tmp);
9ee6e8bb 9026 } else {
a2fdc890 9027 store_reg(s, rs, tmp);
9ee6e8bb
PB
9028 }
9029 } else {
9030 /* Store. */
b0109805 9031 tmp = load_reg(s, rs);
9ee6e8bb 9032 switch (op) {
b0109805
PB
9033 case 0: gen_st8(tmp, addr, user); break;
9034 case 1: gen_st16(tmp, addr, user); break;
9035 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
9036 default:
9037 tcg_temp_free_i32(addr);
9038 goto illegal_op;
b7bcbe95 9039 }
2c0262af 9040 }
9ee6e8bb 9041 if (postinc)
b0109805
PB
9042 tcg_gen_addi_i32(addr, addr, imm);
9043 if (writeback) {
9044 store_reg(s, rn, addr);
9045 } else {
7d1b0095 9046 tcg_temp_free_i32(addr);
b0109805 9047 }
9ee6e8bb
PB
9048 }
9049 break;
9050 default:
9051 goto illegal_op;
2c0262af 9052 }
9ee6e8bb
PB
9053 return 0;
9054illegal_op:
9055 return 1;
2c0262af
FB
9056}
9057
0ecb72a5 9058static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9059{
9060 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9061 int32_t offset;
9062 int i;
b26eefb6 9063 TCGv tmp;
d9ba4830 9064 TCGv tmp2;
b0109805 9065 TCGv addr;
99c475ab 9066
9ee6e8bb
PB
9067 if (s->condexec_mask) {
9068 cond = s->condexec_cond;
bedd2912
JB
9069 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9070 s->condlabel = gen_new_label();
9071 gen_test_cc(cond ^ 1, s->condlabel);
9072 s->condjmp = 1;
9073 }
9ee6e8bb
PB
9074 }
9075
d31dd73e 9076 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9077 s->pc += 2;
b5ff1b31 9078
99c475ab
FB
9079 switch (insn >> 12) {
9080 case 0: case 1:
396e467c 9081
99c475ab
FB
9082 rd = insn & 7;
9083 op = (insn >> 11) & 3;
9084 if (op == 3) {
9085 /* add/subtract */
9086 rn = (insn >> 3) & 7;
396e467c 9087 tmp = load_reg(s, rn);
99c475ab
FB
9088 if (insn & (1 << 10)) {
9089 /* immediate */
7d1b0095 9090 tmp2 = tcg_temp_new_i32();
396e467c 9091 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9092 } else {
9093 /* reg */
9094 rm = (insn >> 6) & 7;
396e467c 9095 tmp2 = load_reg(s, rm);
99c475ab 9096 }
9ee6e8bb
PB
9097 if (insn & (1 << 9)) {
9098 if (s->condexec_mask)
396e467c 9099 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9100 else
72485ec4 9101 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9102 } else {
9103 if (s->condexec_mask)
396e467c 9104 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9105 else
72485ec4 9106 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9107 }
7d1b0095 9108 tcg_temp_free_i32(tmp2);
396e467c 9109 store_reg(s, rd, tmp);
99c475ab
FB
9110 } else {
9111 /* shift immediate */
9112 rm = (insn >> 3) & 7;
9113 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9114 tmp = load_reg(s, rm);
9115 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9116 if (!s->condexec_mask)
9117 gen_logic_CC(tmp);
9118 store_reg(s, rd, tmp);
99c475ab
FB
9119 }
9120 break;
9121 case 2: case 3:
9122 /* arithmetic large immediate */
9123 op = (insn >> 11) & 3;
9124 rd = (insn >> 8) & 0x7;
396e467c 9125 if (op == 0) { /* mov */
7d1b0095 9126 tmp = tcg_temp_new_i32();
396e467c 9127 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9128 if (!s->condexec_mask)
396e467c
FN
9129 gen_logic_CC(tmp);
9130 store_reg(s, rd, tmp);
9131 } else {
9132 tmp = load_reg(s, rd);
7d1b0095 9133 tmp2 = tcg_temp_new_i32();
396e467c
FN
9134 tcg_gen_movi_i32(tmp2, insn & 0xff);
9135 switch (op) {
9136 case 1: /* cmp */
72485ec4 9137 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9138 tcg_temp_free_i32(tmp);
9139 tcg_temp_free_i32(tmp2);
396e467c
FN
9140 break;
9141 case 2: /* add */
9142 if (s->condexec_mask)
9143 tcg_gen_add_i32(tmp, tmp, tmp2);
9144 else
72485ec4 9145 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9146 tcg_temp_free_i32(tmp2);
396e467c
FN
9147 store_reg(s, rd, tmp);
9148 break;
9149 case 3: /* sub */
9150 if (s->condexec_mask)
9151 tcg_gen_sub_i32(tmp, tmp, tmp2);
9152 else
72485ec4 9153 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9154 tcg_temp_free_i32(tmp2);
396e467c
FN
9155 store_reg(s, rd, tmp);
9156 break;
9157 }
99c475ab 9158 }
99c475ab
FB
9159 break;
9160 case 4:
9161 if (insn & (1 << 11)) {
9162 rd = (insn >> 8) & 7;
5899f386
FB
9163 /* load pc-relative. Bit 1 of PC is ignored. */
9164 val = s->pc + 2 + ((insn & 0xff) * 4);
9165 val &= ~(uint32_t)2;
7d1b0095 9166 addr = tcg_temp_new_i32();
b0109805
PB
9167 tcg_gen_movi_i32(addr, val);
9168 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9169 tcg_temp_free_i32(addr);
b0109805 9170 store_reg(s, rd, tmp);
99c475ab
FB
9171 break;
9172 }
9173 if (insn & (1 << 10)) {
9174 /* data processing extended or blx */
9175 rd = (insn & 7) | ((insn >> 4) & 8);
9176 rm = (insn >> 3) & 0xf;
9177 op = (insn >> 8) & 3;
9178 switch (op) {
9179 case 0: /* add */
396e467c
FN
9180 tmp = load_reg(s, rd);
9181 tmp2 = load_reg(s, rm);
9182 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9183 tcg_temp_free_i32(tmp2);
396e467c 9184 store_reg(s, rd, tmp);
99c475ab
FB
9185 break;
9186 case 1: /* cmp */
396e467c
FN
9187 tmp = load_reg(s, rd);
9188 tmp2 = load_reg(s, rm);
72485ec4 9189 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9190 tcg_temp_free_i32(tmp2);
9191 tcg_temp_free_i32(tmp);
99c475ab
FB
9192 break;
9193 case 2: /* mov/cpy */
396e467c
FN
9194 tmp = load_reg(s, rm);
9195 store_reg(s, rd, tmp);
99c475ab
FB
9196 break;
9197 case 3:/* branch [and link] exchange thumb register */
b0109805 9198 tmp = load_reg(s, rm);
99c475ab 9199 if (insn & (1 << 7)) {
be5e7a76 9200 ARCH(5);
99c475ab 9201 val = (uint32_t)s->pc | 1;
7d1b0095 9202 tmp2 = tcg_temp_new_i32();
b0109805
PB
9203 tcg_gen_movi_i32(tmp2, val);
9204 store_reg(s, 14, tmp2);
99c475ab 9205 }
be5e7a76 9206 /* already thumb, no need to check */
d9ba4830 9207 gen_bx(s, tmp);
99c475ab
FB
9208 break;
9209 }
9210 break;
9211 }
9212
9213 /* data processing register */
9214 rd = insn & 7;
9215 rm = (insn >> 3) & 7;
9216 op = (insn >> 6) & 0xf;
9217 if (op == 2 || op == 3 || op == 4 || op == 7) {
9218 /* the shift/rotate ops want the operands backwards */
9219 val = rm;
9220 rm = rd;
9221 rd = val;
9222 val = 1;
9223 } else {
9224 val = 0;
9225 }
9226
396e467c 9227 if (op == 9) { /* neg */
7d1b0095 9228 tmp = tcg_temp_new_i32();
396e467c
FN
9229 tcg_gen_movi_i32(tmp, 0);
9230 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9231 tmp = load_reg(s, rd);
9232 } else {
9233 TCGV_UNUSED(tmp);
9234 }
99c475ab 9235
396e467c 9236 tmp2 = load_reg(s, rm);
5899f386 9237 switch (op) {
99c475ab 9238 case 0x0: /* and */
396e467c 9239 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9240 if (!s->condexec_mask)
396e467c 9241 gen_logic_CC(tmp);
99c475ab
FB
9242 break;
9243 case 0x1: /* eor */
396e467c 9244 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9245 if (!s->condexec_mask)
396e467c 9246 gen_logic_CC(tmp);
99c475ab
FB
9247 break;
9248 case 0x2: /* lsl */
9ee6e8bb 9249 if (s->condexec_mask) {
365af80e 9250 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9251 } else {
9ef39277 9252 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9253 gen_logic_CC(tmp2);
9ee6e8bb 9254 }
99c475ab
FB
9255 break;
9256 case 0x3: /* lsr */
9ee6e8bb 9257 if (s->condexec_mask) {
365af80e 9258 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9259 } else {
9ef39277 9260 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9261 gen_logic_CC(tmp2);
9ee6e8bb 9262 }
99c475ab
FB
9263 break;
9264 case 0x4: /* asr */
9ee6e8bb 9265 if (s->condexec_mask) {
365af80e 9266 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9267 } else {
9ef39277 9268 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9269 gen_logic_CC(tmp2);
9ee6e8bb 9270 }
99c475ab
FB
9271 break;
9272 case 0x5: /* adc */
49b4c31e 9273 if (s->condexec_mask) {
396e467c 9274 gen_adc(tmp, tmp2);
49b4c31e
RH
9275 } else {
9276 gen_adc_CC(tmp, tmp, tmp2);
9277 }
99c475ab
FB
9278 break;
9279 case 0x6: /* sbc */
2de68a49 9280 if (s->condexec_mask) {
396e467c 9281 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9282 } else {
9283 gen_sbc_CC(tmp, tmp, tmp2);
9284 }
99c475ab
FB
9285 break;
9286 case 0x7: /* ror */
9ee6e8bb 9287 if (s->condexec_mask) {
f669df27
AJ
9288 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9289 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9290 } else {
9ef39277 9291 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9292 gen_logic_CC(tmp2);
9ee6e8bb 9293 }
99c475ab
FB
9294 break;
9295 case 0x8: /* tst */
396e467c
FN
9296 tcg_gen_and_i32(tmp, tmp, tmp2);
9297 gen_logic_CC(tmp);
99c475ab 9298 rd = 16;
5899f386 9299 break;
99c475ab 9300 case 0x9: /* neg */
9ee6e8bb 9301 if (s->condexec_mask)
396e467c 9302 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9303 else
72485ec4 9304 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9305 break;
9306 case 0xa: /* cmp */
72485ec4 9307 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9308 rd = 16;
9309 break;
9310 case 0xb: /* cmn */
72485ec4 9311 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9312 rd = 16;
9313 break;
9314 case 0xc: /* orr */
396e467c 9315 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9316 if (!s->condexec_mask)
396e467c 9317 gen_logic_CC(tmp);
99c475ab
FB
9318 break;
9319 case 0xd: /* mul */
7b2919a0 9320 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9321 if (!s->condexec_mask)
396e467c 9322 gen_logic_CC(tmp);
99c475ab
FB
9323 break;
9324 case 0xe: /* bic */
f669df27 9325 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9326 if (!s->condexec_mask)
396e467c 9327 gen_logic_CC(tmp);
99c475ab
FB
9328 break;
9329 case 0xf: /* mvn */
396e467c 9330 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9331 if (!s->condexec_mask)
396e467c 9332 gen_logic_CC(tmp2);
99c475ab 9333 val = 1;
5899f386 9334 rm = rd;
99c475ab
FB
9335 break;
9336 }
9337 if (rd != 16) {
396e467c
FN
9338 if (val) {
9339 store_reg(s, rm, tmp2);
9340 if (op != 0xf)
7d1b0095 9341 tcg_temp_free_i32(tmp);
396e467c
FN
9342 } else {
9343 store_reg(s, rd, tmp);
7d1b0095 9344 tcg_temp_free_i32(tmp2);
396e467c
FN
9345 }
9346 } else {
7d1b0095
PM
9347 tcg_temp_free_i32(tmp);
9348 tcg_temp_free_i32(tmp2);
99c475ab
FB
9349 }
9350 break;
9351
9352 case 5:
9353 /* load/store register offset. */
9354 rd = insn & 7;
9355 rn = (insn >> 3) & 7;
9356 rm = (insn >> 6) & 7;
9357 op = (insn >> 9) & 7;
b0109805 9358 addr = load_reg(s, rn);
b26eefb6 9359 tmp = load_reg(s, rm);
b0109805 9360 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9361 tcg_temp_free_i32(tmp);
99c475ab
FB
9362
9363 if (op < 3) /* store */
b0109805 9364 tmp = load_reg(s, rd);
99c475ab
FB
9365
9366 switch (op) {
9367 case 0: /* str */
b0109805 9368 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9369 break;
9370 case 1: /* strh */
b0109805 9371 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9372 break;
9373 case 2: /* strb */
b0109805 9374 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9375 break;
9376 case 3: /* ldrsb */
b0109805 9377 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9378 break;
9379 case 4: /* ldr */
b0109805 9380 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9381 break;
9382 case 5: /* ldrh */
b0109805 9383 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9384 break;
9385 case 6: /* ldrb */
b0109805 9386 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9387 break;
9388 case 7: /* ldrsh */
b0109805 9389 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9390 break;
9391 }
9392 if (op >= 3) /* load */
b0109805 9393 store_reg(s, rd, tmp);
7d1b0095 9394 tcg_temp_free_i32(addr);
99c475ab
FB
9395 break;
9396
9397 case 6:
9398 /* load/store word immediate offset */
9399 rd = insn & 7;
9400 rn = (insn >> 3) & 7;
b0109805 9401 addr = load_reg(s, rn);
99c475ab 9402 val = (insn >> 4) & 0x7c;
b0109805 9403 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9404
9405 if (insn & (1 << 11)) {
9406 /* load */
b0109805
PB
9407 tmp = gen_ld32(addr, IS_USER(s));
9408 store_reg(s, rd, tmp);
99c475ab
FB
9409 } else {
9410 /* store */
b0109805
PB
9411 tmp = load_reg(s, rd);
9412 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9413 }
7d1b0095 9414 tcg_temp_free_i32(addr);
99c475ab
FB
9415 break;
9416
9417 case 7:
9418 /* load/store byte immediate offset */
9419 rd = insn & 7;
9420 rn = (insn >> 3) & 7;
b0109805 9421 addr = load_reg(s, rn);
99c475ab 9422 val = (insn >> 6) & 0x1f;
b0109805 9423 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9424
9425 if (insn & (1 << 11)) {
9426 /* load */
b0109805
PB
9427 tmp = gen_ld8u(addr, IS_USER(s));
9428 store_reg(s, rd, tmp);
99c475ab
FB
9429 } else {
9430 /* store */
b0109805
PB
9431 tmp = load_reg(s, rd);
9432 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9433 }
7d1b0095 9434 tcg_temp_free_i32(addr);
99c475ab
FB
9435 break;
9436
9437 case 8:
9438 /* load/store halfword immediate offset */
9439 rd = insn & 7;
9440 rn = (insn >> 3) & 7;
b0109805 9441 addr = load_reg(s, rn);
99c475ab 9442 val = (insn >> 5) & 0x3e;
b0109805 9443 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9444
9445 if (insn & (1 << 11)) {
9446 /* load */
b0109805
PB
9447 tmp = gen_ld16u(addr, IS_USER(s));
9448 store_reg(s, rd, tmp);
99c475ab
FB
9449 } else {
9450 /* store */
b0109805
PB
9451 tmp = load_reg(s, rd);
9452 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9453 }
7d1b0095 9454 tcg_temp_free_i32(addr);
99c475ab
FB
9455 break;
9456
9457 case 9:
9458 /* load/store from stack */
9459 rd = (insn >> 8) & 7;
b0109805 9460 addr = load_reg(s, 13);
99c475ab 9461 val = (insn & 0xff) * 4;
b0109805 9462 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9463
9464 if (insn & (1 << 11)) {
9465 /* load */
b0109805
PB
9466 tmp = gen_ld32(addr, IS_USER(s));
9467 store_reg(s, rd, tmp);
99c475ab
FB
9468 } else {
9469 /* store */
b0109805
PB
9470 tmp = load_reg(s, rd);
9471 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9472 }
7d1b0095 9473 tcg_temp_free_i32(addr);
99c475ab
FB
9474 break;
9475
9476 case 10:
9477 /* add to high reg */
9478 rd = (insn >> 8) & 7;
5899f386
FB
9479 if (insn & (1 << 11)) {
9480 /* SP */
5e3f878a 9481 tmp = load_reg(s, 13);
5899f386
FB
9482 } else {
9483 /* PC. bit 1 is ignored. */
7d1b0095 9484 tmp = tcg_temp_new_i32();
5e3f878a 9485 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9486 }
99c475ab 9487 val = (insn & 0xff) * 4;
5e3f878a
PB
9488 tcg_gen_addi_i32(tmp, tmp, val);
9489 store_reg(s, rd, tmp);
99c475ab
FB
9490 break;
9491
9492 case 11:
9493 /* misc */
9494 op = (insn >> 8) & 0xf;
9495 switch (op) {
9496 case 0:
9497 /* adjust stack pointer */
b26eefb6 9498 tmp = load_reg(s, 13);
99c475ab
FB
9499 val = (insn & 0x7f) * 4;
9500 if (insn & (1 << 7))
6a0d8a1d 9501 val = -(int32_t)val;
b26eefb6
PB
9502 tcg_gen_addi_i32(tmp, tmp, val);
9503 store_reg(s, 13, tmp);
99c475ab
FB
9504 break;
9505
9ee6e8bb
PB
9506 case 2: /* sign/zero extend. */
9507 ARCH(6);
9508 rd = insn & 7;
9509 rm = (insn >> 3) & 7;
b0109805 9510 tmp = load_reg(s, rm);
9ee6e8bb 9511 switch ((insn >> 6) & 3) {
b0109805
PB
9512 case 0: gen_sxth(tmp); break;
9513 case 1: gen_sxtb(tmp); break;
9514 case 2: gen_uxth(tmp); break;
9515 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9516 }
b0109805 9517 store_reg(s, rd, tmp);
9ee6e8bb 9518 break;
99c475ab
FB
9519 case 4: case 5: case 0xc: case 0xd:
9520 /* push/pop */
b0109805 9521 addr = load_reg(s, 13);
5899f386
FB
9522 if (insn & (1 << 8))
9523 offset = 4;
99c475ab 9524 else
5899f386
FB
9525 offset = 0;
9526 for (i = 0; i < 8; i++) {
9527 if (insn & (1 << i))
9528 offset += 4;
9529 }
9530 if ((insn & (1 << 11)) == 0) {
b0109805 9531 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9532 }
99c475ab
FB
9533 for (i = 0; i < 8; i++) {
9534 if (insn & (1 << i)) {
9535 if (insn & (1 << 11)) {
9536 /* pop */
b0109805
PB
9537 tmp = gen_ld32(addr, IS_USER(s));
9538 store_reg(s, i, tmp);
99c475ab
FB
9539 } else {
9540 /* push */
b0109805
PB
9541 tmp = load_reg(s, i);
9542 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9543 }
5899f386 9544 /* advance to the next address. */
b0109805 9545 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9546 }
9547 }
a50f5b91 9548 TCGV_UNUSED(tmp);
99c475ab
FB
9549 if (insn & (1 << 8)) {
9550 if (insn & (1 << 11)) {
9551 /* pop pc */
b0109805 9552 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9553 /* don't set the pc until the rest of the instruction
9554 has completed */
9555 } else {
9556 /* push lr */
b0109805
PB
9557 tmp = load_reg(s, 14);
9558 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9559 }
b0109805 9560 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9561 }
5899f386 9562 if ((insn & (1 << 11)) == 0) {
b0109805 9563 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9564 }
99c475ab 9565 /* write back the new stack pointer */
b0109805 9566 store_reg(s, 13, addr);
99c475ab 9567 /* set the new PC value */
be5e7a76
DES
9568 if ((insn & 0x0900) == 0x0900) {
9569 store_reg_from_load(env, s, 15, tmp);
9570 }
99c475ab
FB
9571 break;
9572
9ee6e8bb
PB
9573 case 1: case 3: case 9: case 11: /* czb */
9574 rm = insn & 7;
d9ba4830 9575 tmp = load_reg(s, rm);
9ee6e8bb
PB
9576 s->condlabel = gen_new_label();
9577 s->condjmp = 1;
9578 if (insn & (1 << 11))
cb63669a 9579 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9580 else
cb63669a 9581 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9582 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9583 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9584 val = (uint32_t)s->pc + 2;
9585 val += offset;
9586 gen_jmp(s, val);
9587 break;
9588
9589 case 15: /* IT, nop-hint. */
9590 if ((insn & 0xf) == 0) {
9591 gen_nop_hint(s, (insn >> 4) & 0xf);
9592 break;
9593 }
9594 /* If Then. */
9595 s->condexec_cond = (insn >> 4) & 0xe;
9596 s->condexec_mask = insn & 0x1f;
9597 /* No actual code generated for this insn, just setup state. */
9598 break;
9599
06c949e6 9600 case 0xe: /* bkpt */
be5e7a76 9601 ARCH(5);
bc4a0de0 9602 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9603 break;
9604
9ee6e8bb
PB
9605 case 0xa: /* rev */
9606 ARCH(6);
9607 rn = (insn >> 3) & 0x7;
9608 rd = insn & 0x7;
b0109805 9609 tmp = load_reg(s, rn);
9ee6e8bb 9610 switch ((insn >> 6) & 3) {
66896cb8 9611 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9612 case 1: gen_rev16(tmp); break;
9613 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9614 default: goto illegal_op;
9615 }
b0109805 9616 store_reg(s, rd, tmp);
9ee6e8bb
PB
9617 break;
9618
d9e028c1
PM
9619 case 6:
9620 switch ((insn >> 5) & 7) {
9621 case 2:
9622 /* setend */
9623 ARCH(6);
10962fd5
PM
9624 if (((insn >> 3) & 1) != s->bswap_code) {
9625 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9626 goto illegal_op;
9627 }
9ee6e8bb 9628 break;
d9e028c1
PM
9629 case 3:
9630 /* cps */
9631 ARCH(6);
9632 if (IS_USER(s)) {
9633 break;
8984bd2e 9634 }
d9e028c1
PM
9635 if (IS_M(env)) {
9636 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9637 /* FAULTMASK */
9638 if (insn & 1) {
9639 addr = tcg_const_i32(19);
9640 gen_helper_v7m_msr(cpu_env, addr, tmp);
9641 tcg_temp_free_i32(addr);
9642 }
9643 /* PRIMASK */
9644 if (insn & 2) {
9645 addr = tcg_const_i32(16);
9646 gen_helper_v7m_msr(cpu_env, addr, tmp);
9647 tcg_temp_free_i32(addr);
9648 }
9649 tcg_temp_free_i32(tmp);
9650 gen_lookup_tb(s);
9651 } else {
9652 if (insn & (1 << 4)) {
9653 shift = CPSR_A | CPSR_I | CPSR_F;
9654 } else {
9655 shift = 0;
9656 }
9657 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9658 }
d9e028c1
PM
9659 break;
9660 default:
9661 goto undef;
9ee6e8bb
PB
9662 }
9663 break;
9664
99c475ab
FB
9665 default:
9666 goto undef;
9667 }
9668 break;
9669
9670 case 12:
a7d3970d 9671 {
99c475ab 9672 /* load/store multiple */
a7d3970d
PM
9673 TCGv loaded_var;
9674 TCGV_UNUSED(loaded_var);
99c475ab 9675 rn = (insn >> 8) & 0x7;
b0109805 9676 addr = load_reg(s, rn);
99c475ab
FB
9677 for (i = 0; i < 8; i++) {
9678 if (insn & (1 << i)) {
99c475ab
FB
9679 if (insn & (1 << 11)) {
9680 /* load */
b0109805 9681 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9682 if (i == rn) {
9683 loaded_var = tmp;
9684 } else {
9685 store_reg(s, i, tmp);
9686 }
99c475ab
FB
9687 } else {
9688 /* store */
b0109805
PB
9689 tmp = load_reg(s, i);
9690 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9691 }
5899f386 9692 /* advance to the next address */
b0109805 9693 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9694 }
9695 }
b0109805 9696 if ((insn & (1 << rn)) == 0) {
a7d3970d 9697 /* base reg not in list: base register writeback */
b0109805
PB
9698 store_reg(s, rn, addr);
9699 } else {
a7d3970d
PM
9700 /* base reg in list: if load, complete it now */
9701 if (insn & (1 << 11)) {
9702 store_reg(s, rn, loaded_var);
9703 }
7d1b0095 9704 tcg_temp_free_i32(addr);
b0109805 9705 }
99c475ab 9706 break;
a7d3970d 9707 }
99c475ab
FB
9708 case 13:
9709 /* conditional branch or swi */
9710 cond = (insn >> 8) & 0xf;
9711 if (cond == 0xe)
9712 goto undef;
9713
9714 if (cond == 0xf) {
9715 /* swi */
422ebf69 9716 gen_set_pc_im(s->pc);
9ee6e8bb 9717 s->is_jmp = DISAS_SWI;
99c475ab
FB
9718 break;
9719 }
9720 /* generate a conditional jump to next instruction */
e50e6a20 9721 s->condlabel = gen_new_label();
d9ba4830 9722 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9723 s->condjmp = 1;
99c475ab
FB
9724
9725 /* jump to the offset */
5899f386 9726 val = (uint32_t)s->pc + 2;
99c475ab 9727 offset = ((int32_t)insn << 24) >> 24;
5899f386 9728 val += offset << 1;
8aaca4c0 9729 gen_jmp(s, val);
99c475ab
FB
9730 break;
9731
9732 case 14:
358bf29e 9733 if (insn & (1 << 11)) {
9ee6e8bb
PB
9734 if (disas_thumb2_insn(env, s, insn))
9735 goto undef32;
358bf29e
PB
9736 break;
9737 }
9ee6e8bb 9738 /* unconditional branch */
99c475ab
FB
9739 val = (uint32_t)s->pc;
9740 offset = ((int32_t)insn << 21) >> 21;
9741 val += (offset << 1) + 2;
8aaca4c0 9742 gen_jmp(s, val);
99c475ab
FB
9743 break;
9744
9745 case 15:
9ee6e8bb 9746 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9747 goto undef32;
9ee6e8bb 9748 break;
99c475ab
FB
9749 }
9750 return;
9ee6e8bb 9751undef32:
bc4a0de0 9752 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9753 return;
9754illegal_op:
99c475ab 9755undef:
bc4a0de0 9756 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9757}
9758
2c0262af
FB
9759/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9760 basic block 'tb'. If search_pc is TRUE, also generate PC
9761 information for each intermediate instruction. */
0ecb72a5 9762static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9763 TranslationBlock *tb,
9764 int search_pc)
2c0262af
FB
9765{
9766 DisasContext dc1, *dc = &dc1;
a1d1bb31 9767 CPUBreakpoint *bp;
2c0262af
FB
9768 uint16_t *gen_opc_end;
9769 int j, lj;
0fa85d43 9770 target_ulong pc_start;
b5ff1b31 9771 uint32_t next_page_start;
2e70f6ef
PB
9772 int num_insns;
9773 int max_insns;
3b46e624 9774
2c0262af 9775 /* generate intermediate code */
0fa85d43 9776 pc_start = tb->pc;
3b46e624 9777
2c0262af
FB
9778 dc->tb = tb;
9779
92414b31 9780 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9781
9782 dc->is_jmp = DISAS_NEXT;
9783 dc->pc = pc_start;
8aaca4c0 9784 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9785 dc->condjmp = 0;
7204ab88 9786 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9787 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9788 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9789 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9790#if !defined(CONFIG_USER_ONLY)
61f74d6a 9791 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9792#endif
5df8bac1 9793 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9794 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9795 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9796 cpu_F0s = tcg_temp_new_i32();
9797 cpu_F1s = tcg_temp_new_i32();
9798 cpu_F0d = tcg_temp_new_i64();
9799 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9800 cpu_V0 = cpu_F0d;
9801 cpu_V1 = cpu_F1d;
e677137d 9802 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9803 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9804 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9805 lj = -1;
2e70f6ef
PB
9806 num_insns = 0;
9807 max_insns = tb->cflags & CF_COUNT_MASK;
9808 if (max_insns == 0)
9809 max_insns = CF_COUNT_MASK;
9810
806f352d 9811 gen_tb_start();
e12ce78d 9812
3849902c
PM
9813 tcg_clear_temp_count();
9814
e12ce78d
PM
9815 /* A note on handling of the condexec (IT) bits:
9816 *
9817 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9818 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9819 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9820 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9821 * to do it at the end of the block. (For example if we don't do this
9822 * it's hard to identify whether we can safely skip writing condexec
9823 * at the end of the TB, which we definitely want to do for the case
9824 * where a TB doesn't do anything with the IT state at all.)
9825 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9826 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9827 * This is done both for leaving the TB at the end, and for leaving
9828 * it because of an exception we know will happen, which is done in
9829 * gen_exception_insn(). The latter is necessary because we need to
9830 * leave the TB with the PC/IT state just prior to execution of the
9831 * instruction which caused the exception.
9832 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9833 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9834 * This is handled in the same way as restoration of the
9835 * PC in these situations: we will be called again with search_pc=1
9836 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9837 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9838 * this to restore the condexec bits.
e12ce78d
PM
9839 *
9840 * Note that there are no instructions which can read the condexec
9841 * bits, and none which can write non-static values to them, so
0ecb72a5 9842 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9843 * middle of a TB.
9844 */
9845
9ee6e8bb
PB
9846 /* Reset the conditional execution bits immediately. This avoids
9847 complications trying to do it at the end of the block. */
98eac7ca 9848 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9849 {
7d1b0095 9850 TCGv tmp = tcg_temp_new_i32();
8f01245e 9851 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9852 store_cpu_field(tmp, condexec_bits);
8f01245e 9853 }
2c0262af 9854 do {
fbb4a2e3
PB
9855#ifdef CONFIG_USER_ONLY
9856 /* Intercept jump to the magic kernel page. */
9857 if (dc->pc >= 0xffff0000) {
9858 /* We always get here via a jump, so know we are not in a
9859 conditional execution block. */
9860 gen_exception(EXCP_KERNEL_TRAP);
9861 dc->is_jmp = DISAS_UPDATE;
9862 break;
9863 }
9864#else
9ee6e8bb
PB
9865 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9866 /* We always get here via a jump, so know we are not in a
9867 conditional execution block. */
d9ba4830 9868 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9869 dc->is_jmp = DISAS_UPDATE;
9870 break;
9ee6e8bb
PB
9871 }
9872#endif
9873
72cf2d4f
BS
9874 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9875 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9876 if (bp->pc == dc->pc) {
bc4a0de0 9877 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9878 /* Advance PC so that clearing the breakpoint will
9879 invalidate this TB. */
9880 dc->pc += 2;
9881 goto done_generating;
1fddef4b
FB
9882 break;
9883 }
9884 }
9885 }
2c0262af 9886 if (search_pc) {
92414b31 9887 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9888 if (lj < j) {
9889 lj++;
9890 while (lj < j)
ab1103de 9891 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9892 }
25983cad 9893 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9894 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9895 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9896 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9897 }
e50e6a20 9898
2e70f6ef
PB
9899 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9900 gen_io_start();
9901
fdefe51c 9902 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9903 tcg_gen_debug_insn_start(dc->pc);
9904 }
9905
7204ab88 9906 if (dc->thumb) {
9ee6e8bb
PB
9907 disas_thumb_insn(env, dc);
9908 if (dc->condexec_mask) {
9909 dc->condexec_cond = (dc->condexec_cond & 0xe)
9910 | ((dc->condexec_mask >> 4) & 1);
9911 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9912 if (dc->condexec_mask == 0) {
9913 dc->condexec_cond = 0;
9914 }
9915 }
9916 } else {
9917 disas_arm_insn(env, dc);
9918 }
e50e6a20
FB
9919
9920 if (dc->condjmp && !dc->is_jmp) {
9921 gen_set_label(dc->condlabel);
9922 dc->condjmp = 0;
9923 }
3849902c
PM
9924
9925 if (tcg_check_temp_count()) {
9926 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9927 }
9928
aaf2d97d 9929 /* Translation stops when a conditional branch is encountered.
e50e6a20 9930 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9931 * Also stop translation when a page boundary is reached. This
bf20dc07 9932 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9933 num_insns ++;
efd7f486 9934 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 9935 !env->singlestep_enabled &&
1b530a6d 9936 !singlestep &&
2e70f6ef
PB
9937 dc->pc < next_page_start &&
9938 num_insns < max_insns);
9939
9940 if (tb->cflags & CF_LAST_IO) {
9941 if (dc->condjmp) {
9942 /* FIXME: This can theoretically happen with self-modifying
9943 code. */
9944 cpu_abort(env, "IO on conditional branch instruction");
9945 }
9946 gen_io_end();
9947 }
9ee6e8bb 9948
b5ff1b31 9949 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9950 instruction was a conditional branch or trap, and the PC has
9951 already been written. */
551bd27f 9952 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9953 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9954 if (dc->condjmp) {
9ee6e8bb
PB
9955 gen_set_condexec(dc);
9956 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9957 gen_exception(EXCP_SWI);
9ee6e8bb 9958 } else {
d9ba4830 9959 gen_exception(EXCP_DEBUG);
9ee6e8bb 9960 }
e50e6a20
FB
9961 gen_set_label(dc->condlabel);
9962 }
9963 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9964 gen_set_pc_im(dc->pc);
e50e6a20 9965 dc->condjmp = 0;
8aaca4c0 9966 }
9ee6e8bb
PB
9967 gen_set_condexec(dc);
9968 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9969 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9970 } else {
9971 /* FIXME: Single stepping a WFI insn will not halt
9972 the CPU. */
d9ba4830 9973 gen_exception(EXCP_DEBUG);
9ee6e8bb 9974 }
8aaca4c0 9975 } else {
9ee6e8bb
PB
9976 /* While branches must always occur at the end of an IT block,
9977 there are a few other things that can cause us to terminate
65626741 9978 the TB in the middle of an IT block:
9ee6e8bb
PB
9979 - Exception generating instructions (bkpt, swi, undefined).
9980 - Page boundaries.
9981 - Hardware watchpoints.
9982 Hardware breakpoints have already been handled and skip this code.
9983 */
9984 gen_set_condexec(dc);
8aaca4c0 9985 switch(dc->is_jmp) {
8aaca4c0 9986 case DISAS_NEXT:
6e256c93 9987 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
9988 break;
9989 default:
9990 case DISAS_JUMP:
9991 case DISAS_UPDATE:
9992 /* indicate that the hash table must be used to find the next TB */
57fec1fe 9993 tcg_gen_exit_tb(0);
8aaca4c0
FB
9994 break;
9995 case DISAS_TB_JUMP:
9996 /* nothing more to generate */
9997 break;
9ee6e8bb 9998 case DISAS_WFI:
1ce94f81 9999 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10000 break;
10001 case DISAS_SWI:
d9ba4830 10002 gen_exception(EXCP_SWI);
9ee6e8bb 10003 break;
8aaca4c0 10004 }
e50e6a20
FB
10005 if (dc->condjmp) {
10006 gen_set_label(dc->condlabel);
9ee6e8bb 10007 gen_set_condexec(dc);
6e256c93 10008 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10009 dc->condjmp = 0;
10010 }
2c0262af 10011 }
2e70f6ef 10012
9ee6e8bb 10013done_generating:
806f352d 10014 gen_tb_end(tb, num_insns);
efd7f486 10015 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10016
10017#ifdef DEBUG_DISAS
8fec2b8c 10018 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10019 qemu_log("----------------\n");
10020 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10021 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10022 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10023 qemu_log("\n");
2c0262af
FB
10024 }
10025#endif
b5ff1b31 10026 if (search_pc) {
92414b31 10027 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10028 lj++;
10029 while (lj <= j)
ab1103de 10030 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10031 } else {
2c0262af 10032 tb->size = dc->pc - pc_start;
2e70f6ef 10033 tb->icount = num_insns;
b5ff1b31 10034 }
2c0262af
FB
10035}
10036
0ecb72a5 10037void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10038{
2cfc5f17 10039 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10040}
10041
0ecb72a5 10042void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10043{
2cfc5f17 10044 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10045}
10046
b5ff1b31
FB
10047static const char *cpu_mode_names[16] = {
10048 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10049 "???", "???", "???", "und", "???", "???", "???", "sys"
10050};
9ee6e8bb 10051
0ecb72a5 10052void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10053 int flags)
2c0262af
FB
10054{
10055 int i;
b5ff1b31 10056 uint32_t psr;
2c0262af
FB
10057
10058 for(i=0;i<16;i++) {
7fe48483 10059 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10060 if ((i % 4) == 3)
7fe48483 10061 cpu_fprintf(f, "\n");
2c0262af 10062 else
7fe48483 10063 cpu_fprintf(f, " ");
2c0262af 10064 }
b5ff1b31 10065 psr = cpsr_read(env);
687fa640
TS
10066 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10067 psr,
b5ff1b31
FB
10068 psr & (1 << 31) ? 'N' : '-',
10069 psr & (1 << 30) ? 'Z' : '-',
10070 psr & (1 << 29) ? 'C' : '-',
10071 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10072 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10073 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10074
f2617cfc
PM
10075 if (flags & CPU_DUMP_FPU) {
10076 int numvfpregs = 0;
10077 if (arm_feature(env, ARM_FEATURE_VFP)) {
10078 numvfpregs += 16;
10079 }
10080 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10081 numvfpregs += 16;
10082 }
10083 for (i = 0; i < numvfpregs; i++) {
10084 uint64_t v = float64_val(env->vfp.regs[i]);
10085 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10086 i * 2, (uint32_t)v,
10087 i * 2 + 1, (uint32_t)(v >> 32),
10088 i, v);
10089 }
10090 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10091 }
2c0262af 10092}
a6b025d3 10093
0ecb72a5 10094void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10095{
25983cad 10096 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10097 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10098}