]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
pflash_cfi02.c: fix debug macro
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 45#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 46
86753403 47#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 48
2c0262af
FB
49/* internal defines */
50typedef struct DisasContext {
0fa85d43 51 target_ulong pc;
2c0262af 52 int is_jmp;
e50e6a20
FB
53 /* Nonzero if this instruction has been conditionally skipped. */
54 int condjmp;
55 /* The label that will be jumped to when the instruction is skipped. */
56 int condlabel;
b90372ad 57 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
58 int condexec_mask;
59 int condexec_cond;
2c0262af 60 struct TranslationBlock *tb;
8aaca4c0 61 int singlestep_enabled;
5899f386 62 int thumb;
d8fd2954 63 int bswap_code;
b5ff1b31
FB
64#if !defined(CONFIG_USER_ONLY)
65 int user;
66#endif
5df8bac1 67 int vfp_enabled;
69d1fc22
PM
68 int vec_len;
69 int vec_stride;
2c0262af
FB
70} DisasContext;
71
e12ce78d
PM
72static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
73
b5ff1b31
FB
74#if defined(CONFIG_USER_ONLY)
75#define IS_USER(s) 1
76#else
77#define IS_USER(s) (s->user)
78#endif
79
9ee6e8bb 80/* These instructions trap after executing, so defer them until after the
b90372ad 81 conditional execution state has been updated. */
9ee6e8bb
PB
82#define DISAS_WFI 4
83#define DISAS_SWI 5
2c0262af 84
a7812ae4 85static TCGv_ptr cpu_env;
ad69471c 86/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 87static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 88static TCGv_i32 cpu_R[16];
66c374de 89static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
90static TCGv_i32 cpu_exclusive_addr;
91static TCGv_i32 cpu_exclusive_val;
92static TCGv_i32 cpu_exclusive_high;
93#ifdef CONFIG_USER_ONLY
94static TCGv_i32 cpu_exclusive_test;
95static TCGv_i32 cpu_exclusive_info;
96#endif
ad69471c 97
b26eefb6 98/* FIXME: These should be removed. */
39d5492a 99static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 100static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 101
022c62cb 102#include "exec/gen-icount.h"
2e70f6ef 103
155c3eac
FN
104static const char *regnames[] =
105 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
106 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
107
b26eefb6
PB
108/* initialize TCG globals. */
109void arm_translate_init(void)
110{
155c3eac
FN
111 int i;
112
a7812ae4
PB
113 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
114
155c3eac
FN
115 for (i = 0; i < 16; i++) {
116 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 117 offsetof(CPUARMState, regs[i]),
155c3eac
FN
118 regnames[i]);
119 }
66c374de
AJ
120 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
121 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
122 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
123 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
124
426f5abc 125 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 126 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 127 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 128 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 129 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 130 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
131#ifdef CONFIG_USER_ONLY
132 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 133 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 134 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 135 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 136#endif
155c3eac 137
a7812ae4 138#define GEN_HELPER 2
7b59220e 139#include "helper.h"
b26eefb6
PB
140}
141
39d5492a 142static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 143{
39d5492a 144 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147}
148
0ecb72a5 149#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 150
39d5492a 151static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
152{
153 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 154 tcg_temp_free_i32(var);
d9ba4830
PB
155}
156
157#define store_cpu_field(var, name) \
0ecb72a5 158 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 159
b26eefb6 160/* Set a variable to the value of a CPU register. */
39d5492a 161static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
162{
163 if (reg == 15) {
164 uint32_t addr;
b90372ad 165 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
155c3eac 172 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
173 }
174}
175
176/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 177static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 178{
39d5492a 179 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
180 load_reg_var(s, tmp, reg);
181 return tmp;
182}
183
184/* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
39d5492a 186static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
187{
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
155c3eac 192 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 193 tcg_temp_free_i32(var);
b26eefb6
PB
194}
195
b26eefb6 196/* Value extensions. */
86831435
PB
197#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
199#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
1497c961
PB
202#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 204
b26eefb6 205
39d5492a 206static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 207{
39d5492a 208 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 209 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
210 tcg_temp_free_i32(tmp_mask);
211}
d9ba4830
PB
212/* Set NZCV flags from the high 4 bits of var. */
213#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215static void gen_exception(int excp)
216{
39d5492a 217 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 218 tcg_gen_movi_i32(tmp, excp);
1ce94f81 219 gen_helper_exception(cpu_env, tmp);
7d1b0095 220 tcg_temp_free_i32(tmp);
d9ba4830
PB
221}
222
39d5492a 223static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 224{
39d5492a
PM
225 TCGv_i32 tmp1 = tcg_temp_new_i32();
226 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
3670669c 229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 230 tcg_temp_free_i32(tmp2);
3670669c
PB
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
7d1b0095 235 tcg_temp_free_i32(tmp1);
3670669c
PB
236}
237
238/* Byteswap each halfword. */
39d5492a 239static void gen_rev16(TCGv_i32 var)
3670669c 240{
39d5492a 241 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
7d1b0095 247 tcg_temp_free_i32(tmp);
3670669c
PB
248}
249
250/* Byteswap low halfword and sign extend. */
39d5492a 251static void gen_revsh(TCGv_i32 var)
3670669c 252{
1a855029
AJ
253 tcg_gen_ext16u_i32(var, var);
254 tcg_gen_bswap16_i32(var, var);
255 tcg_gen_ext16s_i32(var, var);
3670669c
PB
256}
257
258/* Unsigned bitfield extract. */
39d5492a 259static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
260{
261 if (shift)
262 tcg_gen_shri_i32(var, var, shift);
263 tcg_gen_andi_i32(var, var, mask);
264}
265
266/* Signed bitfield extract. */
39d5492a 267static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
268{
269 uint32_t signbit;
270
271 if (shift)
272 tcg_gen_sari_i32(var, var, shift);
273 if (shift + width < 32) {
274 signbit = 1u << (width - 1);
275 tcg_gen_andi_i32(var, var, (1u << width) - 1);
276 tcg_gen_xori_i32(var, var, signbit);
277 tcg_gen_subi_i32(var, var, signbit);
278 }
279}
280
838fa72d 281/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 282static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 283{
838fa72d
AJ
284 TCGv_i64 tmp64 = tcg_temp_new_i64();
285
286 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 287 tcg_temp_free_i32(b);
838fa72d
AJ
288 tcg_gen_shli_i64(tmp64, tmp64, 32);
289 tcg_gen_add_i64(a, tmp64, a);
290
291 tcg_temp_free_i64(tmp64);
292 return a;
293}
294
295/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 296static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
297{
298 TCGv_i64 tmp64 = tcg_temp_new_i64();
299
300 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 301 tcg_temp_free_i32(b);
838fa72d
AJ
302 tcg_gen_shli_i64(tmp64, tmp64, 32);
303 tcg_gen_sub_i64(a, tmp64, a);
304
305 tcg_temp_free_i64(tmp64);
306 return a;
3670669c
PB
307}
308
5e3f878a 309/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 310static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 311{
39d5492a
PM
312 TCGv_i32 lo = tcg_temp_new_i32();
313 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 314 TCGv_i64 ret;
5e3f878a 315
831d7fe8 316 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 317 tcg_temp_free_i32(a);
7d1b0095 318 tcg_temp_free_i32(b);
831d7fe8
RH
319
320 ret = tcg_temp_new_i64();
321 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
322 tcg_temp_free_i32(lo);
323 tcg_temp_free_i32(hi);
831d7fe8
RH
324
325 return ret;
5e3f878a
PB
326}
327
39d5492a 328static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 329{
39d5492a
PM
330 TCGv_i32 lo = tcg_temp_new_i32();
331 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 332 TCGv_i64 ret;
5e3f878a 333
831d7fe8 334 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 335 tcg_temp_free_i32(a);
7d1b0095 336 tcg_temp_free_i32(b);
831d7fe8
RH
337
338 ret = tcg_temp_new_i64();
339 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
340 tcg_temp_free_i32(lo);
341 tcg_temp_free_i32(hi);
831d7fe8
RH
342
343 return ret;
5e3f878a
PB
344}
345
8f01245e 346/* Swap low and high halfwords. */
39d5492a 347static void gen_swap_half(TCGv_i32 var)
8f01245e 348{
39d5492a 349 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
350 tcg_gen_shri_i32(tmp, var, 16);
351 tcg_gen_shli_i32(var, var, 16);
352 tcg_gen_or_i32(var, var, tmp);
7d1b0095 353 tcg_temp_free_i32(tmp);
8f01245e
PB
354}
355
b26eefb6
PB
356/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
357 tmp = (t0 ^ t1) & 0x8000;
358 t0 &= ~0x8000;
359 t1 &= ~0x8000;
360 t0 = (t0 + t1) ^ tmp;
361 */
362
39d5492a 363static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 364{
39d5492a 365 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
366 tcg_gen_xor_i32(tmp, t0, t1);
367 tcg_gen_andi_i32(tmp, tmp, 0x8000);
368 tcg_gen_andi_i32(t0, t0, ~0x8000);
369 tcg_gen_andi_i32(t1, t1, ~0x8000);
370 tcg_gen_add_i32(t0, t0, t1);
371 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
372 tcg_temp_free_i32(tmp);
373 tcg_temp_free_i32(t1);
b26eefb6
PB
374}
375
376/* Set CF to the top bit of var. */
39d5492a 377static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 378{
66c374de 379 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
380}
381
382/* Set N and Z flags from var. */
39d5492a 383static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 384{
66c374de
AJ
385 tcg_gen_mov_i32(cpu_NF, var);
386 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
387}
388
389/* T0 += T1 + CF. */
39d5492a 390static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 391{
396e467c 392 tcg_gen_add_i32(t0, t0, t1);
66c374de 393 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
394}
395
e9bb4aa9 396/* dest = T0 + T1 + CF. */
39d5492a 397static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 398{
e9bb4aa9 399 tcg_gen_add_i32(dest, t0, t1);
66c374de 400 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
401}
402
3670669c 403/* dest = T0 - T1 + CF - 1. */
39d5492a 404static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 405{
3670669c 406 tcg_gen_sub_i32(dest, t0, t1);
66c374de 407 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 408 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
409}
410
72485ec4 411/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 412static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 413{
39d5492a 414 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
415 tcg_gen_movi_i32(tmp, 0);
416 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 417 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 418 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
419 tcg_gen_xor_i32(tmp, t0, t1);
420 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
421 tcg_temp_free_i32(tmp);
422 tcg_gen_mov_i32(dest, cpu_NF);
423}
424
49b4c31e 425/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 426static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 427{
39d5492a 428 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
429 if (TCG_TARGET_HAS_add2_i32) {
430 tcg_gen_movi_i32(tmp, 0);
431 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 432 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
433 } else {
434 TCGv_i64 q0 = tcg_temp_new_i64();
435 TCGv_i64 q1 = tcg_temp_new_i64();
436 tcg_gen_extu_i32_i64(q0, t0);
437 tcg_gen_extu_i32_i64(q1, t1);
438 tcg_gen_add_i64(q0, q0, q1);
439 tcg_gen_extu_i32_i64(q1, cpu_CF);
440 tcg_gen_add_i64(q0, q0, q1);
441 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
442 tcg_temp_free_i64(q0);
443 tcg_temp_free_i64(q1);
444 }
445 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
446 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
447 tcg_gen_xor_i32(tmp, t0, t1);
448 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
449 tcg_temp_free_i32(tmp);
450 tcg_gen_mov_i32(dest, cpu_NF);
451}
452
72485ec4 453/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 454static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 455{
39d5492a 456 TCGv_i32 tmp;
72485ec4
AJ
457 tcg_gen_sub_i32(cpu_NF, t0, t1);
458 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
459 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
460 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
461 tmp = tcg_temp_new_i32();
462 tcg_gen_xor_i32(tmp, t0, t1);
463 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
464 tcg_temp_free_i32(tmp);
465 tcg_gen_mov_i32(dest, cpu_NF);
466}
467
e77f0832 468/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 469static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 470{
39d5492a 471 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
472 tcg_gen_not_i32(tmp, t1);
473 gen_adc_CC(dest, t0, tmp);
39d5492a 474 tcg_temp_free_i32(tmp);
2de68a49
RH
475}
476
365af80e 477#define GEN_SHIFT(name) \
39d5492a 478static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 479{ \
39d5492a 480 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
481 tmp1 = tcg_temp_new_i32(); \
482 tcg_gen_andi_i32(tmp1, t1, 0xff); \
483 tmp2 = tcg_const_i32(0); \
484 tmp3 = tcg_const_i32(0x1f); \
485 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
486 tcg_temp_free_i32(tmp3); \
487 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
488 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
489 tcg_temp_free_i32(tmp2); \
490 tcg_temp_free_i32(tmp1); \
491}
492GEN_SHIFT(shl)
493GEN_SHIFT(shr)
494#undef GEN_SHIFT
495
39d5492a 496static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 497{
39d5492a 498 TCGv_i32 tmp1, tmp2;
365af80e
AJ
499 tmp1 = tcg_temp_new_i32();
500 tcg_gen_andi_i32(tmp1, t1, 0xff);
501 tmp2 = tcg_const_i32(0x1f);
502 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
503 tcg_temp_free_i32(tmp2);
504 tcg_gen_sar_i32(dest, t0, tmp1);
505 tcg_temp_free_i32(tmp1);
506}
507
39d5492a 508static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 509{
39d5492a
PM
510 TCGv_i32 c0 = tcg_const_i32(0);
511 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
512 tcg_gen_neg_i32(tmp, src);
513 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
514 tcg_temp_free_i32(c0);
515 tcg_temp_free_i32(tmp);
516}
ad69471c 517
39d5492a 518static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 519{
9a119ff6 520 if (shift == 0) {
66c374de 521 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 522 } else {
66c374de
AJ
523 tcg_gen_shri_i32(cpu_CF, var, shift);
524 if (shift != 31) {
525 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
526 }
9a119ff6 527 }
9a119ff6 528}
b26eefb6 529
9a119ff6 530/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
531static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
532 int shift, int flags)
9a119ff6
PB
533{
534 switch (shiftop) {
535 case 0: /* LSL */
536 if (shift != 0) {
537 if (flags)
538 shifter_out_im(var, 32 - shift);
539 tcg_gen_shli_i32(var, var, shift);
540 }
541 break;
542 case 1: /* LSR */
543 if (shift == 0) {
544 if (flags) {
66c374de 545 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
546 }
547 tcg_gen_movi_i32(var, 0);
548 } else {
549 if (flags)
550 shifter_out_im(var, shift - 1);
551 tcg_gen_shri_i32(var, var, shift);
552 }
553 break;
554 case 2: /* ASR */
555 if (shift == 0)
556 shift = 32;
557 if (flags)
558 shifter_out_im(var, shift - 1);
559 if (shift == 32)
560 shift = 31;
561 tcg_gen_sari_i32(var, var, shift);
562 break;
563 case 3: /* ROR/RRX */
564 if (shift != 0) {
565 if (flags)
566 shifter_out_im(var, shift - 1);
f669df27 567 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 568 } else {
39d5492a 569 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 570 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
571 if (flags)
572 shifter_out_im(var, 0);
573 tcg_gen_shri_i32(var, var, 1);
b26eefb6 574 tcg_gen_or_i32(var, var, tmp);
7d1b0095 575 tcg_temp_free_i32(tmp);
b26eefb6
PB
576 }
577 }
578};
579
39d5492a
PM
580static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
581 TCGv_i32 shift, int flags)
8984bd2e
PB
582{
583 if (flags) {
584 switch (shiftop) {
9ef39277
BS
585 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
586 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
587 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
588 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
589 }
590 } else {
591 switch (shiftop) {
365af80e
AJ
592 case 0:
593 gen_shl(var, var, shift);
594 break;
595 case 1:
596 gen_shr(var, var, shift);
597 break;
598 case 2:
599 gen_sar(var, var, shift);
600 break;
f669df27
AJ
601 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
602 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
603 }
604 }
7d1b0095 605 tcg_temp_free_i32(shift);
8984bd2e
PB
606}
607
6ddbc6e4
PB
608#define PAS_OP(pfx) \
609 switch (op2) { \
610 case 0: gen_pas_helper(glue(pfx,add16)); break; \
611 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
612 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
613 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
614 case 4: gen_pas_helper(glue(pfx,add8)); break; \
615 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
616 }
39d5492a 617static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 618{
a7812ae4 619 TCGv_ptr tmp;
6ddbc6e4
PB
620
621 switch (op1) {
622#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
623 case 1:
a7812ae4 624 tmp = tcg_temp_new_ptr();
0ecb72a5 625 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 626 PAS_OP(s)
b75263d6 627 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
628 break;
629 case 5:
a7812ae4 630 tmp = tcg_temp_new_ptr();
0ecb72a5 631 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 632 PAS_OP(u)
b75263d6 633 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
634 break;
635#undef gen_pas_helper
636#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
637 case 2:
638 PAS_OP(q);
639 break;
640 case 3:
641 PAS_OP(sh);
642 break;
643 case 6:
644 PAS_OP(uq);
645 break;
646 case 7:
647 PAS_OP(uh);
648 break;
649#undef gen_pas_helper
650 }
651}
9ee6e8bb
PB
652#undef PAS_OP
653
6ddbc6e4
PB
654/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
655#define PAS_OP(pfx) \
ed89a2f1 656 switch (op1) { \
6ddbc6e4
PB
657 case 0: gen_pas_helper(glue(pfx,add8)); break; \
658 case 1: gen_pas_helper(glue(pfx,add16)); break; \
659 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
660 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
661 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
662 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
663 }
39d5492a 664static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 665{
a7812ae4 666 TCGv_ptr tmp;
6ddbc6e4 667
ed89a2f1 668 switch (op2) {
6ddbc6e4
PB
669#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
670 case 0:
a7812ae4 671 tmp = tcg_temp_new_ptr();
0ecb72a5 672 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 673 PAS_OP(s)
b75263d6 674 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
675 break;
676 case 4:
a7812ae4 677 tmp = tcg_temp_new_ptr();
0ecb72a5 678 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 679 PAS_OP(u)
b75263d6 680 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
681 break;
682#undef gen_pas_helper
683#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
684 case 1:
685 PAS_OP(q);
686 break;
687 case 2:
688 PAS_OP(sh);
689 break;
690 case 5:
691 PAS_OP(uq);
692 break;
693 case 6:
694 PAS_OP(uh);
695 break;
696#undef gen_pas_helper
697 }
698}
9ee6e8bb
PB
699#undef PAS_OP
700
d9ba4830
PB
701static void gen_test_cc(int cc, int label)
702{
39d5492a 703 TCGv_i32 tmp;
d9ba4830
PB
704 int inv;
705
d9ba4830
PB
706 switch (cc) {
707 case 0: /* eq: Z */
66c374de 708 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
709 break;
710 case 1: /* ne: !Z */
66c374de 711 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
712 break;
713 case 2: /* cs: C */
66c374de 714 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
715 break;
716 case 3: /* cc: !C */
66c374de 717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
718 break;
719 case 4: /* mi: N */
66c374de 720 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
721 break;
722 case 5: /* pl: !N */
66c374de 723 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
724 break;
725 case 6: /* vs: V */
66c374de 726 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
727 break;
728 case 7: /* vc: !V */
66c374de 729 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
730 break;
731 case 8: /* hi: C && !Z */
732 inv = gen_new_label();
66c374de
AJ
733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
734 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
735 gen_set_label(inv);
736 break;
737 case 9: /* ls: !C || Z */
66c374de
AJ
738 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
739 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
740 break;
741 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 745 tcg_temp_free_i32(tmp);
d9ba4830
PB
746 break;
747 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
748 tmp = tcg_temp_new_i32();
749 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 750 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 751 tcg_temp_free_i32(tmp);
d9ba4830
PB
752 break;
753 case 12: /* gt: !Z && N == V */
754 inv = gen_new_label();
66c374de
AJ
755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
756 tmp = tcg_temp_new_i32();
757 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 758 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 759 tcg_temp_free_i32(tmp);
d9ba4830
PB
760 gen_set_label(inv);
761 break;
762 case 13: /* le: Z || N != V */
66c374de
AJ
763 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
764 tmp = tcg_temp_new_i32();
765 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 766 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 767 tcg_temp_free_i32(tmp);
d9ba4830
PB
768 break;
769 default:
770 fprintf(stderr, "Bad condition code 0x%x\n", cc);
771 abort();
772 }
d9ba4830 773}
2c0262af 774
b1d8e52e 775static const uint8_t table_logic_cc[16] = {
2c0262af
FB
776 1, /* and */
777 1, /* xor */
778 0, /* sub */
779 0, /* rsb */
780 0, /* add */
781 0, /* adc */
782 0, /* sbc */
783 0, /* rsc */
784 1, /* andl */
785 1, /* xorl */
786 0, /* cmp */
787 0, /* cmn */
788 1, /* orr */
789 1, /* mov */
790 1, /* bic */
791 1, /* mvn */
792};
3b46e624 793
d9ba4830
PB
794/* Set PC and Thumb state from an immediate address. */
795static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 796{
39d5492a 797 TCGv_i32 tmp;
99c475ab 798
b26eefb6 799 s->is_jmp = DISAS_UPDATE;
d9ba4830 800 if (s->thumb != (addr & 1)) {
7d1b0095 801 tmp = tcg_temp_new_i32();
d9ba4830 802 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 803 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 804 tcg_temp_free_i32(tmp);
d9ba4830 805 }
155c3eac 806 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
807}
808
809/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 810static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 811{
d9ba4830 812 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
813 tcg_gen_andi_i32(cpu_R[15], var, ~1);
814 tcg_gen_andi_i32(var, var, 1);
815 store_cpu_field(var, thumb);
d9ba4830
PB
816}
817
21aeb343
JR
818/* Variant of store_reg which uses branch&exchange logic when storing
819 to r15 in ARM architecture v7 and above. The source must be a temporary
820 and will be marked as dead. */
0ecb72a5 821static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 822 int reg, TCGv_i32 var)
21aeb343
JR
823{
824 if (reg == 15 && ENABLE_ARCH_7) {
825 gen_bx(s, var);
826 } else {
827 store_reg(s, reg, var);
828 }
829}
830
be5e7a76
DES
831/* Variant of store_reg which uses branch&exchange logic when storing
832 * to r15 in ARM architecture v5T and above. This is used for storing
833 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
834 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 835static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 836 int reg, TCGv_i32 var)
be5e7a76
DES
837{
838 if (reg == 15 && ENABLE_ARCH_5) {
839 gen_bx(s, var);
840 } else {
841 store_reg(s, reg, var);
842 }
843}
844
5e3f878a
PB
845static inline void gen_set_pc_im(uint32_t val)
846{
155c3eac 847 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
848}
849
b5ff1b31
FB
850/* Force a TB lookup after an instruction that changes the CPU state. */
851static inline void gen_lookup_tb(DisasContext *s)
852{
a6445c52 853 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
854 s->is_jmp = DISAS_UPDATE;
855}
856
b0109805 857static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 858 TCGv_i32 var)
2c0262af 859{
1e8d4eec 860 int val, rm, shift, shiftop;
39d5492a 861 TCGv_i32 offset;
2c0262af
FB
862
863 if (!(insn & (1 << 25))) {
864 /* immediate */
865 val = insn & 0xfff;
866 if (!(insn & (1 << 23)))
867 val = -val;
537730b9 868 if (val != 0)
b0109805 869 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
870 } else {
871 /* shift/register */
872 rm = (insn) & 0xf;
873 shift = (insn >> 7) & 0x1f;
1e8d4eec 874 shiftop = (insn >> 5) & 3;
b26eefb6 875 offset = load_reg(s, rm);
9a119ff6 876 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 877 if (!(insn & (1 << 23)))
b0109805 878 tcg_gen_sub_i32(var, var, offset);
2c0262af 879 else
b0109805 880 tcg_gen_add_i32(var, var, offset);
7d1b0095 881 tcg_temp_free_i32(offset);
2c0262af
FB
882 }
883}
884
191f9a93 885static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 886 int extra, TCGv_i32 var)
2c0262af
FB
887{
888 int val, rm;
39d5492a 889 TCGv_i32 offset;
3b46e624 890
2c0262af
FB
891 if (insn & (1 << 22)) {
892 /* immediate */
893 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
894 if (!(insn & (1 << 23)))
895 val = -val;
18acad92 896 val += extra;
537730b9 897 if (val != 0)
b0109805 898 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
899 } else {
900 /* register */
191f9a93 901 if (extra)
b0109805 902 tcg_gen_addi_i32(var, var, extra);
2c0262af 903 rm = (insn) & 0xf;
b26eefb6 904 offset = load_reg(s, rm);
2c0262af 905 if (!(insn & (1 << 23)))
b0109805 906 tcg_gen_sub_i32(var, var, offset);
2c0262af 907 else
b0109805 908 tcg_gen_add_i32(var, var, offset);
7d1b0095 909 tcg_temp_free_i32(offset);
2c0262af
FB
910 }
911}
912
5aaebd13
PM
913static TCGv_ptr get_fpstatus_ptr(int neon)
914{
915 TCGv_ptr statusptr = tcg_temp_new_ptr();
916 int offset;
917 if (neon) {
0ecb72a5 918 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 919 } else {
0ecb72a5 920 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
921 }
922 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
923 return statusptr;
924}
925
4373f3ce
PB
926#define VFP_OP2(name) \
927static inline void gen_vfp_##name(int dp) \
928{ \
ae1857ec
PM
929 TCGv_ptr fpst = get_fpstatus_ptr(0); \
930 if (dp) { \
931 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
932 } else { \
933 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
934 } \
935 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
936}
937
4373f3ce
PB
938VFP_OP2(add)
939VFP_OP2(sub)
940VFP_OP2(mul)
941VFP_OP2(div)
942
943#undef VFP_OP2
944
605a6aed
PM
945static inline void gen_vfp_F1_mul(int dp)
946{
947 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 948 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 949 if (dp) {
ae1857ec 950 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 951 } else {
ae1857ec 952 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 953 }
ae1857ec 954 tcg_temp_free_ptr(fpst);
605a6aed
PM
955}
956
957static inline void gen_vfp_F1_neg(int dp)
958{
959 /* Like gen_vfp_neg() but put result in F1 */
960 if (dp) {
961 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
962 } else {
963 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
964 }
965}
966
4373f3ce
PB
967static inline void gen_vfp_abs(int dp)
968{
969 if (dp)
970 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
971 else
972 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
973}
974
975static inline void gen_vfp_neg(int dp)
976{
977 if (dp)
978 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
979 else
980 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
981}
982
983static inline void gen_vfp_sqrt(int dp)
984{
985 if (dp)
986 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
987 else
988 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
989}
990
991static inline void gen_vfp_cmp(int dp)
992{
993 if (dp)
994 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
995 else
996 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
997}
998
999static inline void gen_vfp_cmpe(int dp)
1000{
1001 if (dp)
1002 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1003 else
1004 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1005}
1006
1007static inline void gen_vfp_F1_ld0(int dp)
1008{
1009 if (dp)
5b340b51 1010 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1011 else
5b340b51 1012 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1013}
1014
5500b06c
PM
1015#define VFP_GEN_ITOF(name) \
1016static inline void gen_vfp_##name(int dp, int neon) \
1017{ \
5aaebd13 1018 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1019 if (dp) { \
1020 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1021 } else { \
1022 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1023 } \
b7fa9214 1024 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1025}
1026
5500b06c
PM
1027VFP_GEN_ITOF(uito)
1028VFP_GEN_ITOF(sito)
1029#undef VFP_GEN_ITOF
4373f3ce 1030
5500b06c
PM
1031#define VFP_GEN_FTOI(name) \
1032static inline void gen_vfp_##name(int dp, int neon) \
1033{ \
5aaebd13 1034 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1035 if (dp) { \
1036 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1037 } else { \
1038 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1039 } \
b7fa9214 1040 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1041}
1042
5500b06c
PM
1043VFP_GEN_FTOI(toui)
1044VFP_GEN_FTOI(touiz)
1045VFP_GEN_FTOI(tosi)
1046VFP_GEN_FTOI(tosiz)
1047#undef VFP_GEN_FTOI
4373f3ce
PB
1048
1049#define VFP_GEN_FIX(name) \
5500b06c 1050static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1051{ \
39d5492a 1052 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1053 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1054 if (dp) { \
1055 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1056 } else { \
1057 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1058 } \
b75263d6 1059 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1060 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1061}
4373f3ce
PB
1062VFP_GEN_FIX(tosh)
1063VFP_GEN_FIX(tosl)
1064VFP_GEN_FIX(touh)
1065VFP_GEN_FIX(toul)
1066VFP_GEN_FIX(shto)
1067VFP_GEN_FIX(slto)
1068VFP_GEN_FIX(uhto)
1069VFP_GEN_FIX(ulto)
1070#undef VFP_GEN_FIX
9ee6e8bb 1071
39d5492a 1072static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31
FB
1073{
1074 if (dp)
312eea9f 1075 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1076 else
312eea9f 1077 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1078}
1079
39d5492a 1080static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31
FB
1081{
1082 if (dp)
312eea9f 1083 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1084 else
312eea9f 1085 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1086}
1087
8e96005d
FB
1088static inline long
1089vfp_reg_offset (int dp, int reg)
1090{
1091 if (dp)
1092 return offsetof(CPUARMState, vfp.regs[reg]);
1093 else if (reg & 1) {
1094 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1095 + offsetof(CPU_DoubleU, l.upper);
1096 } else {
1097 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1098 + offsetof(CPU_DoubleU, l.lower);
1099 }
1100}
9ee6e8bb
PB
1101
1102/* Return the offset of a 32-bit piece of a NEON register.
1103 zero is the least significant end of the register. */
1104static inline long
1105neon_reg_offset (int reg, int n)
1106{
1107 int sreg;
1108 sreg = reg * 2 + n;
1109 return vfp_reg_offset(0, sreg);
1110}
1111
39d5492a 1112static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1113{
39d5492a 1114 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1115 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1116 return tmp;
1117}
1118
39d5492a 1119static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1120{
1121 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1122 tcg_temp_free_i32(var);
8f8e3aa4
PB
1123}
1124
a7812ae4 1125static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1126{
1127 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1128}
1129
a7812ae4 1130static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1131{
1132 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1133}
1134
4373f3ce
PB
1135#define tcg_gen_ld_f32 tcg_gen_ld_i32
1136#define tcg_gen_ld_f64 tcg_gen_ld_i64
1137#define tcg_gen_st_f32 tcg_gen_st_i32
1138#define tcg_gen_st_f64 tcg_gen_st_i64
1139
b7bcbe95
FB
1140static inline void gen_mov_F0_vreg(int dp, int reg)
1141{
1142 if (dp)
4373f3ce 1143 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1144 else
4373f3ce 1145 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1146}
1147
1148static inline void gen_mov_F1_vreg(int dp, int reg)
1149{
1150 if (dp)
4373f3ce 1151 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1152 else
4373f3ce 1153 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1154}
1155
1156static inline void gen_mov_vreg_F0(int dp, int reg)
1157{
1158 if (dp)
4373f3ce 1159 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1160 else
4373f3ce 1161 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1162}
1163
18c9b560
AZ
1164#define ARM_CP_RW_BIT (1 << 20)
1165
a7812ae4 1166static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1167{
0ecb72a5 1168 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1169}
1170
a7812ae4 1171static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1172{
0ecb72a5 1173 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1174}
1175
39d5492a 1176static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1177{
39d5492a 1178 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1179 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1180 return var;
e677137d
PB
1181}
1182
39d5492a 1183static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1184{
0ecb72a5 1185 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1186 tcg_temp_free_i32(var);
e677137d
PB
1187}
1188
1189static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1190{
1191 iwmmxt_store_reg(cpu_M0, rn);
1192}
1193
1194static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1195{
1196 iwmmxt_load_reg(cpu_M0, rn);
1197}
1198
1199static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1200{
1201 iwmmxt_load_reg(cpu_V1, rn);
1202 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1203}
1204
1205static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1206{
1207 iwmmxt_load_reg(cpu_V1, rn);
1208 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1209}
1210
1211static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1212{
1213 iwmmxt_load_reg(cpu_V1, rn);
1214 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1215}
1216
1217#define IWMMXT_OP(name) \
1218static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1219{ \
1220 iwmmxt_load_reg(cpu_V1, rn); \
1221 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1222}
1223
477955bd
PM
1224#define IWMMXT_OP_ENV(name) \
1225static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1226{ \
1227 iwmmxt_load_reg(cpu_V1, rn); \
1228 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1229}
1230
1231#define IWMMXT_OP_ENV_SIZE(name) \
1232IWMMXT_OP_ENV(name##b) \
1233IWMMXT_OP_ENV(name##w) \
1234IWMMXT_OP_ENV(name##l)
e677137d 1235
477955bd 1236#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1237static inline void gen_op_iwmmxt_##name##_M0(void) \
1238{ \
477955bd 1239 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1240}
1241
1242IWMMXT_OP(maddsq)
1243IWMMXT_OP(madduq)
1244IWMMXT_OP(sadb)
1245IWMMXT_OP(sadw)
1246IWMMXT_OP(mulslw)
1247IWMMXT_OP(mulshw)
1248IWMMXT_OP(mululw)
1249IWMMXT_OP(muluhw)
1250IWMMXT_OP(macsw)
1251IWMMXT_OP(macuw)
1252
477955bd
PM
1253IWMMXT_OP_ENV_SIZE(unpackl)
1254IWMMXT_OP_ENV_SIZE(unpackh)
1255
1256IWMMXT_OP_ENV1(unpacklub)
1257IWMMXT_OP_ENV1(unpackluw)
1258IWMMXT_OP_ENV1(unpacklul)
1259IWMMXT_OP_ENV1(unpackhub)
1260IWMMXT_OP_ENV1(unpackhuw)
1261IWMMXT_OP_ENV1(unpackhul)
1262IWMMXT_OP_ENV1(unpacklsb)
1263IWMMXT_OP_ENV1(unpacklsw)
1264IWMMXT_OP_ENV1(unpacklsl)
1265IWMMXT_OP_ENV1(unpackhsb)
1266IWMMXT_OP_ENV1(unpackhsw)
1267IWMMXT_OP_ENV1(unpackhsl)
1268
1269IWMMXT_OP_ENV_SIZE(cmpeq)
1270IWMMXT_OP_ENV_SIZE(cmpgtu)
1271IWMMXT_OP_ENV_SIZE(cmpgts)
1272
1273IWMMXT_OP_ENV_SIZE(mins)
1274IWMMXT_OP_ENV_SIZE(minu)
1275IWMMXT_OP_ENV_SIZE(maxs)
1276IWMMXT_OP_ENV_SIZE(maxu)
1277
1278IWMMXT_OP_ENV_SIZE(subn)
1279IWMMXT_OP_ENV_SIZE(addn)
1280IWMMXT_OP_ENV_SIZE(subu)
1281IWMMXT_OP_ENV_SIZE(addu)
1282IWMMXT_OP_ENV_SIZE(subs)
1283IWMMXT_OP_ENV_SIZE(adds)
1284
1285IWMMXT_OP_ENV(avgb0)
1286IWMMXT_OP_ENV(avgb1)
1287IWMMXT_OP_ENV(avgw0)
1288IWMMXT_OP_ENV(avgw1)
e677137d
PB
1289
1290IWMMXT_OP(msadb)
1291
477955bd
PM
1292IWMMXT_OP_ENV(packuw)
1293IWMMXT_OP_ENV(packul)
1294IWMMXT_OP_ENV(packuq)
1295IWMMXT_OP_ENV(packsw)
1296IWMMXT_OP_ENV(packsl)
1297IWMMXT_OP_ENV(packsq)
e677137d 1298
e677137d
PB
1299static void gen_op_iwmmxt_set_mup(void)
1300{
39d5492a 1301 TCGv_i32 tmp;
e677137d
PB
1302 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1303 tcg_gen_ori_i32(tmp, tmp, 2);
1304 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1305}
1306
1307static void gen_op_iwmmxt_set_cup(void)
1308{
39d5492a 1309 TCGv_i32 tmp;
e677137d
PB
1310 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1311 tcg_gen_ori_i32(tmp, tmp, 1);
1312 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1313}
1314
1315static void gen_op_iwmmxt_setpsr_nz(void)
1316{
39d5492a 1317 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1318 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1319 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1320}
1321
1322static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1323{
1324 iwmmxt_load_reg(cpu_V1, rn);
86831435 1325 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1326 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1327}
1328
39d5492a
PM
1329static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1330 TCGv_i32 dest)
18c9b560
AZ
1331{
1332 int rd;
1333 uint32_t offset;
39d5492a 1334 TCGv_i32 tmp;
18c9b560
AZ
1335
1336 rd = (insn >> 16) & 0xf;
da6b5335 1337 tmp = load_reg(s, rd);
18c9b560
AZ
1338
1339 offset = (insn & 0xff) << ((insn >> 7) & 2);
1340 if (insn & (1 << 24)) {
1341 /* Pre indexed */
1342 if (insn & (1 << 23))
da6b5335 1343 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1344 else
da6b5335
FN
1345 tcg_gen_addi_i32(tmp, tmp, -offset);
1346 tcg_gen_mov_i32(dest, tmp);
18c9b560 1347 if (insn & (1 << 21))
da6b5335
FN
1348 store_reg(s, rd, tmp);
1349 else
7d1b0095 1350 tcg_temp_free_i32(tmp);
18c9b560
AZ
1351 } else if (insn & (1 << 21)) {
1352 /* Post indexed */
da6b5335 1353 tcg_gen_mov_i32(dest, tmp);
18c9b560 1354 if (insn & (1 << 23))
da6b5335 1355 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1356 else
da6b5335
FN
1357 tcg_gen_addi_i32(tmp, tmp, -offset);
1358 store_reg(s, rd, tmp);
18c9b560
AZ
1359 } else if (!(insn & (1 << 23)))
1360 return 1;
1361 return 0;
1362}
1363
39d5492a 1364static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1365{
1366 int rd = (insn >> 0) & 0xf;
39d5492a 1367 TCGv_i32 tmp;
18c9b560 1368
da6b5335
FN
1369 if (insn & (1 << 8)) {
1370 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1371 return 1;
da6b5335
FN
1372 } else {
1373 tmp = iwmmxt_load_creg(rd);
1374 }
1375 } else {
7d1b0095 1376 tmp = tcg_temp_new_i32();
da6b5335
FN
1377 iwmmxt_load_reg(cpu_V0, rd);
1378 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1379 }
1380 tcg_gen_andi_i32(tmp, tmp, mask);
1381 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1382 tcg_temp_free_i32(tmp);
18c9b560
AZ
1383 return 0;
1384}
1385
a1c7273b 1386/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1387 (ie. an undefined instruction). */
0ecb72a5 1388static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1389{
1390 int rd, wrd;
1391 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1392 TCGv_i32 addr;
1393 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1394
1395 if ((insn & 0x0e000e00) == 0x0c000000) {
1396 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1397 wrd = insn & 0xf;
1398 rdlo = (insn >> 12) & 0xf;
1399 rdhi = (insn >> 16) & 0xf;
1400 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1401 iwmmxt_load_reg(cpu_V0, wrd);
1402 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1403 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1404 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1405 } else { /* TMCRR */
da6b5335
FN
1406 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1407 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1408 gen_op_iwmmxt_set_mup();
1409 }
1410 return 0;
1411 }
1412
1413 wrd = (insn >> 12) & 0xf;
7d1b0095 1414 addr = tcg_temp_new_i32();
da6b5335 1415 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1416 tcg_temp_free_i32(addr);
18c9b560 1417 return 1;
da6b5335 1418 }
18c9b560
AZ
1419 if (insn & ARM_CP_RW_BIT) {
1420 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1421 tmp = tcg_temp_new_i32();
da6b5335
FN
1422 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1423 iwmmxt_store_creg(wrd, tmp);
18c9b560 1424 } else {
e677137d
PB
1425 i = 1;
1426 if (insn & (1 << 8)) {
1427 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1428 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1429 i = 0;
1430 } else { /* WLDRW wRd */
29531141
PM
1431 tmp = tcg_temp_new_i32();
1432 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1433 }
1434 } else {
29531141 1435 tmp = tcg_temp_new_i32();
e677137d 1436 if (insn & (1 << 22)) { /* WLDRH */
29531141 1437 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
e677137d 1438 } else { /* WLDRB */
29531141 1439 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1440 }
1441 }
1442 if (i) {
1443 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1444 tcg_temp_free_i32(tmp);
e677137d 1445 }
18c9b560
AZ
1446 gen_op_iwmmxt_movq_wRn_M0(wrd);
1447 }
1448 } else {
1449 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1450 tmp = iwmmxt_load_creg(wrd);
29531141 1451 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1452 } else {
1453 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1454 tmp = tcg_temp_new_i32();
e677137d
PB
1455 if (insn & (1 << 8)) {
1456 if (insn & (1 << 22)) { /* WSTRD */
da6b5335 1457 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1458 } else { /* WSTRW wRd */
1459 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1460 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
e677137d
PB
1461 }
1462 } else {
1463 if (insn & (1 << 22)) { /* WSTRH */
1464 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1465 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
e677137d
PB
1466 } else { /* WSTRB */
1467 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1468 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
e677137d
PB
1469 }
1470 }
18c9b560 1471 }
29531141 1472 tcg_temp_free_i32(tmp);
18c9b560 1473 }
7d1b0095 1474 tcg_temp_free_i32(addr);
18c9b560
AZ
1475 return 0;
1476 }
1477
1478 if ((insn & 0x0f000000) != 0x0e000000)
1479 return 1;
1480
1481 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1482 case 0x000: /* WOR */
1483 wrd = (insn >> 12) & 0xf;
1484 rd0 = (insn >> 0) & 0xf;
1485 rd1 = (insn >> 16) & 0xf;
1486 gen_op_iwmmxt_movq_M0_wRn(rd0);
1487 gen_op_iwmmxt_orq_M0_wRn(rd1);
1488 gen_op_iwmmxt_setpsr_nz();
1489 gen_op_iwmmxt_movq_wRn_M0(wrd);
1490 gen_op_iwmmxt_set_mup();
1491 gen_op_iwmmxt_set_cup();
1492 break;
1493 case 0x011: /* TMCR */
1494 if (insn & 0xf)
1495 return 1;
1496 rd = (insn >> 12) & 0xf;
1497 wrd = (insn >> 16) & 0xf;
1498 switch (wrd) {
1499 case ARM_IWMMXT_wCID:
1500 case ARM_IWMMXT_wCASF:
1501 break;
1502 case ARM_IWMMXT_wCon:
1503 gen_op_iwmmxt_set_cup();
1504 /* Fall through. */
1505 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1506 tmp = iwmmxt_load_creg(wrd);
1507 tmp2 = load_reg(s, rd);
f669df27 1508 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1509 tcg_temp_free_i32(tmp2);
da6b5335 1510 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1511 break;
1512 case ARM_IWMMXT_wCGR0:
1513 case ARM_IWMMXT_wCGR1:
1514 case ARM_IWMMXT_wCGR2:
1515 case ARM_IWMMXT_wCGR3:
1516 gen_op_iwmmxt_set_cup();
da6b5335
FN
1517 tmp = load_reg(s, rd);
1518 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1519 break;
1520 default:
1521 return 1;
1522 }
1523 break;
1524 case 0x100: /* WXOR */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 0) & 0xf;
1527 rd1 = (insn >> 16) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1530 gen_op_iwmmxt_setpsr_nz();
1531 gen_op_iwmmxt_movq_wRn_M0(wrd);
1532 gen_op_iwmmxt_set_mup();
1533 gen_op_iwmmxt_set_cup();
1534 break;
1535 case 0x111: /* TMRC */
1536 if (insn & 0xf)
1537 return 1;
1538 rd = (insn >> 12) & 0xf;
1539 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1540 tmp = iwmmxt_load_creg(wrd);
1541 store_reg(s, rd, tmp);
18c9b560
AZ
1542 break;
1543 case 0x300: /* WANDN */
1544 wrd = (insn >> 12) & 0xf;
1545 rd0 = (insn >> 0) & 0xf;
1546 rd1 = (insn >> 16) & 0xf;
1547 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1548 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1549 gen_op_iwmmxt_andq_M0_wRn(rd1);
1550 gen_op_iwmmxt_setpsr_nz();
1551 gen_op_iwmmxt_movq_wRn_M0(wrd);
1552 gen_op_iwmmxt_set_mup();
1553 gen_op_iwmmxt_set_cup();
1554 break;
1555 case 0x200: /* WAND */
1556 wrd = (insn >> 12) & 0xf;
1557 rd0 = (insn >> 0) & 0xf;
1558 rd1 = (insn >> 16) & 0xf;
1559 gen_op_iwmmxt_movq_M0_wRn(rd0);
1560 gen_op_iwmmxt_andq_M0_wRn(rd1);
1561 gen_op_iwmmxt_setpsr_nz();
1562 gen_op_iwmmxt_movq_wRn_M0(wrd);
1563 gen_op_iwmmxt_set_mup();
1564 gen_op_iwmmxt_set_cup();
1565 break;
1566 case 0x810: case 0xa10: /* WMADD */
1567 wrd = (insn >> 12) & 0xf;
1568 rd0 = (insn >> 0) & 0xf;
1569 rd1 = (insn >> 16) & 0xf;
1570 gen_op_iwmmxt_movq_M0_wRn(rd0);
1571 if (insn & (1 << 21))
1572 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1573 else
1574 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1575 gen_op_iwmmxt_movq_wRn_M0(wrd);
1576 gen_op_iwmmxt_set_mup();
1577 break;
1578 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1579 wrd = (insn >> 12) & 0xf;
1580 rd0 = (insn >> 16) & 0xf;
1581 rd1 = (insn >> 0) & 0xf;
1582 gen_op_iwmmxt_movq_M0_wRn(rd0);
1583 switch ((insn >> 22) & 3) {
1584 case 0:
1585 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1586 break;
1587 case 1:
1588 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1589 break;
1590 case 2:
1591 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1592 break;
1593 case 3:
1594 return 1;
1595 }
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1599 break;
1600 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 16) & 0xf;
1603 rd1 = (insn >> 0) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 switch ((insn >> 22) & 3) {
1606 case 0:
1607 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1608 break;
1609 case 1:
1610 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1611 break;
1612 case 2:
1613 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1614 break;
1615 case 3:
1616 return 1;
1617 }
1618 gen_op_iwmmxt_movq_wRn_M0(wrd);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1621 break;
1622 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1623 wrd = (insn >> 12) & 0xf;
1624 rd0 = (insn >> 16) & 0xf;
1625 rd1 = (insn >> 0) & 0xf;
1626 gen_op_iwmmxt_movq_M0_wRn(rd0);
1627 if (insn & (1 << 22))
1628 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1629 else
1630 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1631 if (!(insn & (1 << 20)))
1632 gen_op_iwmmxt_addl_M0_wRn(wrd);
1633 gen_op_iwmmxt_movq_wRn_M0(wrd);
1634 gen_op_iwmmxt_set_mup();
1635 break;
1636 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1637 wrd = (insn >> 12) & 0xf;
1638 rd0 = (insn >> 16) & 0xf;
1639 rd1 = (insn >> 0) & 0xf;
1640 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1641 if (insn & (1 << 21)) {
1642 if (insn & (1 << 20))
1643 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1644 else
1645 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1646 } else {
1647 if (insn & (1 << 20))
1648 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1649 else
1650 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1651 }
18c9b560
AZ
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 break;
1655 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1656 wrd = (insn >> 12) & 0xf;
1657 rd0 = (insn >> 16) & 0xf;
1658 rd1 = (insn >> 0) & 0xf;
1659 gen_op_iwmmxt_movq_M0_wRn(rd0);
1660 if (insn & (1 << 21))
1661 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1662 else
1663 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1664 if (!(insn & (1 << 20))) {
e677137d
PB
1665 iwmmxt_load_reg(cpu_V1, wrd);
1666 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 rd1 = (insn >> 0) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 switch ((insn >> 22) & 3) {
1677 case 0:
1678 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1679 break;
1680 case 1:
1681 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1682 break;
1683 case 2:
1684 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1685 break;
1686 case 3:
1687 return 1;
1688 }
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 gen_op_iwmmxt_set_cup();
1692 break;
1693 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1694 wrd = (insn >> 12) & 0xf;
1695 rd0 = (insn >> 16) & 0xf;
1696 rd1 = (insn >> 0) & 0xf;
1697 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1698 if (insn & (1 << 22)) {
1699 if (insn & (1 << 20))
1700 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1703 } else {
1704 if (insn & (1 << 20))
1705 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1706 else
1707 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1708 }
18c9b560
AZ
1709 gen_op_iwmmxt_movq_wRn_M0(wrd);
1710 gen_op_iwmmxt_set_mup();
1711 gen_op_iwmmxt_set_cup();
1712 break;
1713 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1714 wrd = (insn >> 12) & 0xf;
1715 rd0 = (insn >> 16) & 0xf;
1716 rd1 = (insn >> 0) & 0xf;
1717 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1718 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1719 tcg_gen_andi_i32(tmp, tmp, 7);
1720 iwmmxt_load_reg(cpu_V1, rd1);
1721 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1722 tcg_temp_free_i32(tmp);
18c9b560
AZ
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1727 if (((insn >> 6) & 3) == 3)
1728 return 1;
18c9b560
AZ
1729 rd = (insn >> 12) & 0xf;
1730 wrd = (insn >> 16) & 0xf;
da6b5335 1731 tmp = load_reg(s, rd);
18c9b560
AZ
1732 gen_op_iwmmxt_movq_M0_wRn(wrd);
1733 switch ((insn >> 6) & 3) {
1734 case 0:
da6b5335
FN
1735 tmp2 = tcg_const_i32(0xff);
1736 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1737 break;
1738 case 1:
da6b5335
FN
1739 tmp2 = tcg_const_i32(0xffff);
1740 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1741 break;
1742 case 2:
da6b5335
FN
1743 tmp2 = tcg_const_i32(0xffffffff);
1744 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1745 break;
da6b5335 1746 default:
39d5492a
PM
1747 TCGV_UNUSED_I32(tmp2);
1748 TCGV_UNUSED_I32(tmp3);
18c9b560 1749 }
da6b5335 1750 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1751 tcg_temp_free_i32(tmp3);
1752 tcg_temp_free_i32(tmp2);
7d1b0095 1753 tcg_temp_free_i32(tmp);
18c9b560
AZ
1754 gen_op_iwmmxt_movq_wRn_M0(wrd);
1755 gen_op_iwmmxt_set_mup();
1756 break;
1757 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
da6b5335 1760 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1761 return 1;
1762 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1763 tmp = tcg_temp_new_i32();
18c9b560
AZ
1764 switch ((insn >> 22) & 3) {
1765 case 0:
da6b5335
FN
1766 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1767 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1768 if (insn & 8) {
1769 tcg_gen_ext8s_i32(tmp, tmp);
1770 } else {
1771 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1772 }
1773 break;
1774 case 1:
da6b5335
FN
1775 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1776 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1777 if (insn & 8) {
1778 tcg_gen_ext16s_i32(tmp, tmp);
1779 } else {
1780 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1781 }
1782 break;
1783 case 2:
da6b5335
FN
1784 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1785 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1786 break;
18c9b560 1787 }
da6b5335 1788 store_reg(s, rd, tmp);
18c9b560
AZ
1789 break;
1790 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1791 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1792 return 1;
da6b5335 1793 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1794 switch ((insn >> 22) & 3) {
1795 case 0:
da6b5335 1796 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1797 break;
1798 case 1:
da6b5335 1799 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1800 break;
1801 case 2:
da6b5335 1802 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1803 break;
18c9b560 1804 }
da6b5335
FN
1805 tcg_gen_shli_i32(tmp, tmp, 28);
1806 gen_set_nzcv(tmp);
7d1b0095 1807 tcg_temp_free_i32(tmp);
18c9b560
AZ
1808 break;
1809 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1810 if (((insn >> 6) & 3) == 3)
1811 return 1;
18c9b560
AZ
1812 rd = (insn >> 12) & 0xf;
1813 wrd = (insn >> 16) & 0xf;
da6b5335 1814 tmp = load_reg(s, rd);
18c9b560
AZ
1815 switch ((insn >> 6) & 3) {
1816 case 0:
da6b5335 1817 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1818 break;
1819 case 1:
da6b5335 1820 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1821 break;
1822 case 2:
da6b5335 1823 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1824 break;
18c9b560 1825 }
7d1b0095 1826 tcg_temp_free_i32(tmp);
18c9b560
AZ
1827 gen_op_iwmmxt_movq_wRn_M0(wrd);
1828 gen_op_iwmmxt_set_mup();
1829 break;
1830 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1831 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1832 return 1;
da6b5335 1833 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1834 tmp2 = tcg_temp_new_i32();
da6b5335 1835 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1836 switch ((insn >> 22) & 3) {
1837 case 0:
1838 for (i = 0; i < 7; i ++) {
da6b5335
FN
1839 tcg_gen_shli_i32(tmp2, tmp2, 4);
1840 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1841 }
1842 break;
1843 case 1:
1844 for (i = 0; i < 3; i ++) {
da6b5335
FN
1845 tcg_gen_shli_i32(tmp2, tmp2, 8);
1846 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1847 }
1848 break;
1849 case 2:
da6b5335
FN
1850 tcg_gen_shli_i32(tmp2, tmp2, 16);
1851 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1852 break;
18c9b560 1853 }
da6b5335 1854 gen_set_nzcv(tmp);
7d1b0095
PM
1855 tcg_temp_free_i32(tmp2);
1856 tcg_temp_free_i32(tmp);
18c9b560
AZ
1857 break;
1858 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1859 wrd = (insn >> 12) & 0xf;
1860 rd0 = (insn >> 16) & 0xf;
1861 gen_op_iwmmxt_movq_M0_wRn(rd0);
1862 switch ((insn >> 22) & 3) {
1863 case 0:
e677137d 1864 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1865 break;
1866 case 1:
e677137d 1867 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1868 break;
1869 case 2:
e677137d 1870 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1871 break;
1872 case 3:
1873 return 1;
1874 }
1875 gen_op_iwmmxt_movq_wRn_M0(wrd);
1876 gen_op_iwmmxt_set_mup();
1877 break;
1878 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1879 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1880 return 1;
da6b5335 1881 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1882 tmp2 = tcg_temp_new_i32();
da6b5335 1883 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1884 switch ((insn >> 22) & 3) {
1885 case 0:
1886 for (i = 0; i < 7; i ++) {
da6b5335
FN
1887 tcg_gen_shli_i32(tmp2, tmp2, 4);
1888 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1889 }
1890 break;
1891 case 1:
1892 for (i = 0; i < 3; i ++) {
da6b5335
FN
1893 tcg_gen_shli_i32(tmp2, tmp2, 8);
1894 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1895 }
1896 break;
1897 case 2:
da6b5335
FN
1898 tcg_gen_shli_i32(tmp2, tmp2, 16);
1899 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1900 break;
18c9b560 1901 }
da6b5335 1902 gen_set_nzcv(tmp);
7d1b0095
PM
1903 tcg_temp_free_i32(tmp2);
1904 tcg_temp_free_i32(tmp);
18c9b560
AZ
1905 break;
1906 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1907 rd = (insn >> 12) & 0xf;
1908 rd0 = (insn >> 16) & 0xf;
da6b5335 1909 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1910 return 1;
1911 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1912 tmp = tcg_temp_new_i32();
18c9b560
AZ
1913 switch ((insn >> 22) & 3) {
1914 case 0:
da6b5335 1915 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1916 break;
1917 case 1:
da6b5335 1918 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1919 break;
1920 case 2:
da6b5335 1921 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1922 break;
18c9b560 1923 }
da6b5335 1924 store_reg(s, rd, tmp);
18c9b560
AZ
1925 break;
1926 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1927 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1928 wrd = (insn >> 12) & 0xf;
1929 rd0 = (insn >> 16) & 0xf;
1930 rd1 = (insn >> 0) & 0xf;
1931 gen_op_iwmmxt_movq_M0_wRn(rd0);
1932 switch ((insn >> 22) & 3) {
1933 case 0:
1934 if (insn & (1 << 21))
1935 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1936 else
1937 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1938 break;
1939 case 1:
1940 if (insn & (1 << 21))
1941 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1942 else
1943 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1944 break;
1945 case 2:
1946 if (insn & (1 << 21))
1947 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1948 else
1949 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1950 break;
1951 case 3:
1952 return 1;
1953 }
1954 gen_op_iwmmxt_movq_wRn_M0(wrd);
1955 gen_op_iwmmxt_set_mup();
1956 gen_op_iwmmxt_set_cup();
1957 break;
1958 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1959 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1960 wrd = (insn >> 12) & 0xf;
1961 rd0 = (insn >> 16) & 0xf;
1962 gen_op_iwmmxt_movq_M0_wRn(rd0);
1963 switch ((insn >> 22) & 3) {
1964 case 0:
1965 if (insn & (1 << 21))
1966 gen_op_iwmmxt_unpacklsb_M0();
1967 else
1968 gen_op_iwmmxt_unpacklub_M0();
1969 break;
1970 case 1:
1971 if (insn & (1 << 21))
1972 gen_op_iwmmxt_unpacklsw_M0();
1973 else
1974 gen_op_iwmmxt_unpackluw_M0();
1975 break;
1976 case 2:
1977 if (insn & (1 << 21))
1978 gen_op_iwmmxt_unpacklsl_M0();
1979 else
1980 gen_op_iwmmxt_unpacklul_M0();
1981 break;
1982 case 3:
1983 return 1;
1984 }
1985 gen_op_iwmmxt_movq_wRn_M0(wrd);
1986 gen_op_iwmmxt_set_mup();
1987 gen_op_iwmmxt_set_cup();
1988 break;
1989 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1990 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1991 wrd = (insn >> 12) & 0xf;
1992 rd0 = (insn >> 16) & 0xf;
1993 gen_op_iwmmxt_movq_M0_wRn(rd0);
1994 switch ((insn >> 22) & 3) {
1995 case 0:
1996 if (insn & (1 << 21))
1997 gen_op_iwmmxt_unpackhsb_M0();
1998 else
1999 gen_op_iwmmxt_unpackhub_M0();
2000 break;
2001 case 1:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_unpackhsw_M0();
2004 else
2005 gen_op_iwmmxt_unpackhuw_M0();
2006 break;
2007 case 2:
2008 if (insn & (1 << 21))
2009 gen_op_iwmmxt_unpackhsl_M0();
2010 else
2011 gen_op_iwmmxt_unpackhul_M0();
2012 break;
2013 case 3:
2014 return 1;
2015 }
2016 gen_op_iwmmxt_movq_wRn_M0(wrd);
2017 gen_op_iwmmxt_set_mup();
2018 gen_op_iwmmxt_set_cup();
2019 break;
2020 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2021 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2022 if (((insn >> 22) & 3) == 0)
2023 return 1;
18c9b560
AZ
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2027 tmp = tcg_temp_new_i32();
da6b5335 2028 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2029 tcg_temp_free_i32(tmp);
18c9b560 2030 return 1;
da6b5335 2031 }
18c9b560 2032 switch ((insn >> 22) & 3) {
18c9b560 2033 case 1:
477955bd 2034 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2035 break;
2036 case 2:
477955bd 2037 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2038 break;
2039 case 3:
477955bd 2040 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2041 break;
2042 }
7d1b0095 2043 tcg_temp_free_i32(tmp);
18c9b560
AZ
2044 gen_op_iwmmxt_movq_wRn_M0(wrd);
2045 gen_op_iwmmxt_set_mup();
2046 gen_op_iwmmxt_set_cup();
2047 break;
2048 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2049 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2050 if (((insn >> 22) & 3) == 0)
2051 return 1;
18c9b560
AZ
2052 wrd = (insn >> 12) & 0xf;
2053 rd0 = (insn >> 16) & 0xf;
2054 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2055 tmp = tcg_temp_new_i32();
da6b5335 2056 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2057 tcg_temp_free_i32(tmp);
18c9b560 2058 return 1;
da6b5335 2059 }
18c9b560 2060 switch ((insn >> 22) & 3) {
18c9b560 2061 case 1:
477955bd 2062 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2063 break;
2064 case 2:
477955bd 2065 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2066 break;
2067 case 3:
477955bd 2068 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2069 break;
2070 }
7d1b0095 2071 tcg_temp_free_i32(tmp);
18c9b560
AZ
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2077 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2078 if (((insn >> 22) & 3) == 0)
2079 return 1;
18c9b560
AZ
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2083 tmp = tcg_temp_new_i32();
da6b5335 2084 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2085 tcg_temp_free_i32(tmp);
18c9b560 2086 return 1;
da6b5335 2087 }
18c9b560 2088 switch ((insn >> 22) & 3) {
18c9b560 2089 case 1:
477955bd 2090 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2091 break;
2092 case 2:
477955bd 2093 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2094 break;
2095 case 3:
477955bd 2096 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2097 break;
2098 }
7d1b0095 2099 tcg_temp_free_i32(tmp);
18c9b560
AZ
2100 gen_op_iwmmxt_movq_wRn_M0(wrd);
2101 gen_op_iwmmxt_set_mup();
2102 gen_op_iwmmxt_set_cup();
2103 break;
2104 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2105 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2106 if (((insn >> 22) & 3) == 0)
2107 return 1;
18c9b560
AZ
2108 wrd = (insn >> 12) & 0xf;
2109 rd0 = (insn >> 16) & 0xf;
2110 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2111 tmp = tcg_temp_new_i32();
18c9b560 2112 switch ((insn >> 22) & 3) {
18c9b560 2113 case 1:
da6b5335 2114 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2115 tcg_temp_free_i32(tmp);
18c9b560 2116 return 1;
da6b5335 2117 }
477955bd 2118 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2119 break;
2120 case 2:
da6b5335 2121 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2122 tcg_temp_free_i32(tmp);
18c9b560 2123 return 1;
da6b5335 2124 }
477955bd 2125 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2126 break;
2127 case 3:
da6b5335 2128 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2129 tcg_temp_free_i32(tmp);
18c9b560 2130 return 1;
da6b5335 2131 }
477955bd 2132 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2133 break;
2134 }
7d1b0095 2135 tcg_temp_free_i32(tmp);
18c9b560
AZ
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2141 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 rd1 = (insn >> 0) & 0xf;
2145 gen_op_iwmmxt_movq_M0_wRn(rd0);
2146 switch ((insn >> 22) & 3) {
2147 case 0:
2148 if (insn & (1 << 21))
2149 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2150 else
2151 gen_op_iwmmxt_minub_M0_wRn(rd1);
2152 break;
2153 case 1:
2154 if (insn & (1 << 21))
2155 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2156 else
2157 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2158 break;
2159 case 2:
2160 if (insn & (1 << 21))
2161 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2162 else
2163 gen_op_iwmmxt_minul_M0_wRn(rd1);
2164 break;
2165 case 3:
2166 return 1;
2167 }
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 break;
2171 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2172 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 rd1 = (insn >> 0) & 0xf;
2176 gen_op_iwmmxt_movq_M0_wRn(rd0);
2177 switch ((insn >> 22) & 3) {
2178 case 0:
2179 if (insn & (1 << 21))
2180 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2181 else
2182 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2183 break;
2184 case 1:
2185 if (insn & (1 << 21))
2186 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2187 else
2188 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2189 break;
2190 case 2:
2191 if (insn & (1 << 21))
2192 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2193 else
2194 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2195 break;
2196 case 3:
2197 return 1;
2198 }
2199 gen_op_iwmmxt_movq_wRn_M0(wrd);
2200 gen_op_iwmmxt_set_mup();
2201 break;
2202 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2203 case 0x402: case 0x502: case 0x602: case 0x702:
2204 wrd = (insn >> 12) & 0xf;
2205 rd0 = (insn >> 16) & 0xf;
2206 rd1 = (insn >> 0) & 0xf;
2207 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2208 tmp = tcg_const_i32((insn >> 20) & 3);
2209 iwmmxt_load_reg(cpu_V1, rd1);
2210 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2211 tcg_temp_free_i32(tmp);
18c9b560
AZ
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 break;
2215 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2216 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2217 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2218 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2219 wrd = (insn >> 12) & 0xf;
2220 rd0 = (insn >> 16) & 0xf;
2221 rd1 = (insn >> 0) & 0xf;
2222 gen_op_iwmmxt_movq_M0_wRn(rd0);
2223 switch ((insn >> 20) & 0xf) {
2224 case 0x0:
2225 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2226 break;
2227 case 0x1:
2228 gen_op_iwmmxt_subub_M0_wRn(rd1);
2229 break;
2230 case 0x3:
2231 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2232 break;
2233 case 0x4:
2234 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2235 break;
2236 case 0x5:
2237 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2238 break;
2239 case 0x7:
2240 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2241 break;
2242 case 0x8:
2243 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2244 break;
2245 case 0x9:
2246 gen_op_iwmmxt_subul_M0_wRn(rd1);
2247 break;
2248 case 0xb:
2249 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2250 break;
2251 default:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 gen_op_iwmmxt_set_cup();
2257 break;
2258 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2259 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2260 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2261 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2262 wrd = (insn >> 12) & 0xf;
2263 rd0 = (insn >> 16) & 0xf;
2264 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2265 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2266 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2267 tcg_temp_free_i32(tmp);
18c9b560
AZ
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 gen_op_iwmmxt_set_cup();
2271 break;
2272 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2273 case 0x418: case 0x518: case 0x618: case 0x718:
2274 case 0x818: case 0x918: case 0xa18: case 0xb18:
2275 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
2280 switch ((insn >> 20) & 0xf) {
2281 case 0x0:
2282 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2283 break;
2284 case 0x1:
2285 gen_op_iwmmxt_addub_M0_wRn(rd1);
2286 break;
2287 case 0x3:
2288 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2289 break;
2290 case 0x4:
2291 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2292 break;
2293 case 0x5:
2294 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2295 break;
2296 case 0x7:
2297 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2298 break;
2299 case 0x8:
2300 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2301 break;
2302 case 0x9:
2303 gen_op_iwmmxt_addul_M0_wRn(rd1);
2304 break;
2305 case 0xb:
2306 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2307 break;
2308 default:
2309 return 1;
2310 }
2311 gen_op_iwmmxt_movq_wRn_M0(wrd);
2312 gen_op_iwmmxt_set_mup();
2313 gen_op_iwmmxt_set_cup();
2314 break;
2315 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2316 case 0x408: case 0x508: case 0x608: case 0x708:
2317 case 0x808: case 0x908: case 0xa08: case 0xb08:
2318 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2319 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2320 return 1;
18c9b560
AZ
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 rd1 = (insn >> 0) & 0xf;
2324 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2325 switch ((insn >> 22) & 3) {
18c9b560
AZ
2326 case 1:
2327 if (insn & (1 << 21))
2328 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2329 else
2330 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2331 break;
2332 case 2:
2333 if (insn & (1 << 21))
2334 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2335 else
2336 gen_op_iwmmxt_packul_M0_wRn(rd1);
2337 break;
2338 case 3:
2339 if (insn & (1 << 21))
2340 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2341 else
2342 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2343 break;
2344 }
2345 gen_op_iwmmxt_movq_wRn_M0(wrd);
2346 gen_op_iwmmxt_set_mup();
2347 gen_op_iwmmxt_set_cup();
2348 break;
2349 case 0x201: case 0x203: case 0x205: case 0x207:
2350 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2351 case 0x211: case 0x213: case 0x215: case 0x217:
2352 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2353 wrd = (insn >> 5) & 0xf;
2354 rd0 = (insn >> 12) & 0xf;
2355 rd1 = (insn >> 0) & 0xf;
2356 if (rd0 == 0xf || rd1 == 0xf)
2357 return 1;
2358 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2359 tmp = load_reg(s, rd0);
2360 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2361 switch ((insn >> 16) & 0xf) {
2362 case 0x0: /* TMIA */
da6b5335 2363 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2364 break;
2365 case 0x8: /* TMIAPH */
da6b5335 2366 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2367 break;
2368 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2369 if (insn & (1 << 16))
da6b5335 2370 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2371 if (insn & (1 << 17))
da6b5335
FN
2372 tcg_gen_shri_i32(tmp2, tmp2, 16);
2373 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2374 break;
2375 default:
7d1b0095
PM
2376 tcg_temp_free_i32(tmp2);
2377 tcg_temp_free_i32(tmp);
18c9b560
AZ
2378 return 1;
2379 }
7d1b0095
PM
2380 tcg_temp_free_i32(tmp2);
2381 tcg_temp_free_i32(tmp);
18c9b560
AZ
2382 gen_op_iwmmxt_movq_wRn_M0(wrd);
2383 gen_op_iwmmxt_set_mup();
2384 break;
2385 default:
2386 return 1;
2387 }
2388
2389 return 0;
2390}
2391
a1c7273b 2392/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2393 (ie. an undefined instruction). */
0ecb72a5 2394static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2395{
2396 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2397 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2398
2399 if ((insn & 0x0ff00f10) == 0x0e200010) {
2400 /* Multiply with Internal Accumulate Format */
2401 rd0 = (insn >> 12) & 0xf;
2402 rd1 = insn & 0xf;
2403 acc = (insn >> 5) & 7;
2404
2405 if (acc != 0)
2406 return 1;
2407
3a554c0f
FN
2408 tmp = load_reg(s, rd0);
2409 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2410 switch ((insn >> 16) & 0xf) {
2411 case 0x0: /* MIA */
3a554c0f 2412 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2413 break;
2414 case 0x8: /* MIAPH */
3a554c0f 2415 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2416 break;
2417 case 0xc: /* MIABB */
2418 case 0xd: /* MIABT */
2419 case 0xe: /* MIATB */
2420 case 0xf: /* MIATT */
18c9b560 2421 if (insn & (1 << 16))
3a554c0f 2422 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2423 if (insn & (1 << 17))
3a554c0f
FN
2424 tcg_gen_shri_i32(tmp2, tmp2, 16);
2425 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2426 break;
2427 default:
2428 return 1;
2429 }
7d1b0095
PM
2430 tcg_temp_free_i32(tmp2);
2431 tcg_temp_free_i32(tmp);
18c9b560
AZ
2432
2433 gen_op_iwmmxt_movq_wRn_M0(acc);
2434 return 0;
2435 }
2436
2437 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2438 /* Internal Accumulator Access Format */
2439 rdhi = (insn >> 16) & 0xf;
2440 rdlo = (insn >> 12) & 0xf;
2441 acc = insn & 7;
2442
2443 if (acc != 0)
2444 return 1;
2445
2446 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2447 iwmmxt_load_reg(cpu_V0, acc);
2448 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2449 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2450 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2451 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2452 } else { /* MAR */
3a554c0f
FN
2453 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2454 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2455 }
2456 return 0;
2457 }
2458
2459 return 1;
2460}
2461
9ee6e8bb
PB
2462#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2463#define VFP_SREG(insn, bigbit, smallbit) \
2464 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2465#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2466 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2467 reg = (((insn) >> (bigbit)) & 0x0f) \
2468 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2469 } else { \
2470 if (insn & (1 << (smallbit))) \
2471 return 1; \
2472 reg = ((insn) >> (bigbit)) & 0x0f; \
2473 }} while (0)
2474
2475#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2476#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2477#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2478#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2479#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2480#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2481
4373f3ce 2482/* Move between integer and VFP cores. */
39d5492a 2483static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2484{
39d5492a 2485 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2486 tcg_gen_mov_i32(tmp, cpu_F0s);
2487 return tmp;
2488}
2489
39d5492a 2490static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2491{
2492 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2493 tcg_temp_free_i32(tmp);
4373f3ce
PB
2494}
2495
39d5492a 2496static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2497{
39d5492a 2498 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2499 if (shift)
2500 tcg_gen_shri_i32(var, var, shift);
86831435 2501 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2502 tcg_gen_shli_i32(tmp, var, 8);
2503 tcg_gen_or_i32(var, var, tmp);
2504 tcg_gen_shli_i32(tmp, var, 16);
2505 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2506 tcg_temp_free_i32(tmp);
ad69471c
PB
2507}
2508
39d5492a 2509static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2510{
39d5492a 2511 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2512 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2513 tcg_gen_shli_i32(tmp, var, 16);
2514 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2515 tcg_temp_free_i32(tmp);
ad69471c
PB
2516}
2517
39d5492a 2518static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2519{
39d5492a 2520 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2521 tcg_gen_andi_i32(var, var, 0xffff0000);
2522 tcg_gen_shri_i32(tmp, var, 16);
2523 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2524 tcg_temp_free_i32(tmp);
ad69471c
PB
2525}
2526
39d5492a 2527static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2528{
2529 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2530 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2531 switch (size) {
2532 case 0:
58ab8e96 2533 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2534 gen_neon_dup_u8(tmp, 0);
2535 break;
2536 case 1:
58ab8e96 2537 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2538 gen_neon_dup_low16(tmp);
2539 break;
2540 case 2:
58ab8e96 2541 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2542 break;
2543 default: /* Avoid compiler warnings. */
2544 abort();
2545 }
2546 return tmp;
2547}
2548
a1c7273b 2549/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2550 (ie. an undefined instruction). */
0ecb72a5 2551static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2552{
2553 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2554 int dp, veclen;
39d5492a
PM
2555 TCGv_i32 addr;
2556 TCGv_i32 tmp;
2557 TCGv_i32 tmp2;
b7bcbe95 2558
40f137e1
PB
2559 if (!arm_feature(env, ARM_FEATURE_VFP))
2560 return 1;
2561
5df8bac1 2562 if (!s->vfp_enabled) {
9ee6e8bb 2563 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2564 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2565 return 1;
2566 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2567 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2568 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2569 return 1;
2570 }
b7bcbe95
FB
2571 dp = ((insn & 0xf00) == 0xb00);
2572 switch ((insn >> 24) & 0xf) {
2573 case 0xe:
2574 if (insn & (1 << 4)) {
2575 /* single register transfer */
b7bcbe95
FB
2576 rd = (insn >> 12) & 0xf;
2577 if (dp) {
9ee6e8bb
PB
2578 int size;
2579 int pass;
2580
2581 VFP_DREG_N(rn, insn);
2582 if (insn & 0xf)
b7bcbe95 2583 return 1;
9ee6e8bb
PB
2584 if (insn & 0x00c00060
2585 && !arm_feature(env, ARM_FEATURE_NEON))
2586 return 1;
2587
2588 pass = (insn >> 21) & 1;
2589 if (insn & (1 << 22)) {
2590 size = 0;
2591 offset = ((insn >> 5) & 3) * 8;
2592 } else if (insn & (1 << 5)) {
2593 size = 1;
2594 offset = (insn & (1 << 6)) ? 16 : 0;
2595 } else {
2596 size = 2;
2597 offset = 0;
2598 }
18c9b560 2599 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2600 /* vfp->arm */
ad69471c 2601 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2602 switch (size) {
2603 case 0:
9ee6e8bb 2604 if (offset)
ad69471c 2605 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2606 if (insn & (1 << 23))
ad69471c 2607 gen_uxtb(tmp);
9ee6e8bb 2608 else
ad69471c 2609 gen_sxtb(tmp);
9ee6e8bb
PB
2610 break;
2611 case 1:
9ee6e8bb
PB
2612 if (insn & (1 << 23)) {
2613 if (offset) {
ad69471c 2614 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2615 } else {
ad69471c 2616 gen_uxth(tmp);
9ee6e8bb
PB
2617 }
2618 } else {
2619 if (offset) {
ad69471c 2620 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2621 } else {
ad69471c 2622 gen_sxth(tmp);
9ee6e8bb
PB
2623 }
2624 }
2625 break;
2626 case 2:
9ee6e8bb
PB
2627 break;
2628 }
ad69471c 2629 store_reg(s, rd, tmp);
b7bcbe95
FB
2630 } else {
2631 /* arm->vfp */
ad69471c 2632 tmp = load_reg(s, rd);
9ee6e8bb
PB
2633 if (insn & (1 << 23)) {
2634 /* VDUP */
2635 if (size == 0) {
ad69471c 2636 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2637 } else if (size == 1) {
ad69471c 2638 gen_neon_dup_low16(tmp);
9ee6e8bb 2639 }
cbbccffc 2640 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2641 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2642 tcg_gen_mov_i32(tmp2, tmp);
2643 neon_store_reg(rn, n, tmp2);
2644 }
2645 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2646 } else {
2647 /* VMOV */
2648 switch (size) {
2649 case 0:
ad69471c 2650 tmp2 = neon_load_reg(rn, pass);
d593c48e 2651 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2652 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2653 break;
2654 case 1:
ad69471c 2655 tmp2 = neon_load_reg(rn, pass);
d593c48e 2656 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2657 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2658 break;
2659 case 2:
9ee6e8bb
PB
2660 break;
2661 }
ad69471c 2662 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2663 }
b7bcbe95 2664 }
9ee6e8bb
PB
2665 } else { /* !dp */
2666 if ((insn & 0x6f) != 0x00)
2667 return 1;
2668 rn = VFP_SREG_N(insn);
18c9b560 2669 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2670 /* vfp->arm */
2671 if (insn & (1 << 21)) {
2672 /* system register */
40f137e1 2673 rn >>= 1;
9ee6e8bb 2674
b7bcbe95 2675 switch (rn) {
40f137e1 2676 case ARM_VFP_FPSID:
4373f3ce 2677 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2678 VFP3 restricts all id registers to privileged
2679 accesses. */
2680 if (IS_USER(s)
2681 && arm_feature(env, ARM_FEATURE_VFP3))
2682 return 1;
4373f3ce 2683 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2684 break;
40f137e1 2685 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2686 if (IS_USER(s))
2687 return 1;
4373f3ce 2688 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2689 break;
40f137e1
PB
2690 case ARM_VFP_FPINST:
2691 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2692 /* Not present in VFP3. */
2693 if (IS_USER(s)
2694 || arm_feature(env, ARM_FEATURE_VFP3))
2695 return 1;
4373f3ce 2696 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2697 break;
40f137e1 2698 case ARM_VFP_FPSCR:
601d70b9 2699 if (rd == 15) {
4373f3ce
PB
2700 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2701 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2702 } else {
7d1b0095 2703 tmp = tcg_temp_new_i32();
4373f3ce
PB
2704 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2705 }
b7bcbe95 2706 break;
9ee6e8bb
PB
2707 case ARM_VFP_MVFR0:
2708 case ARM_VFP_MVFR1:
2709 if (IS_USER(s)
06ed5d66 2710 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2711 return 1;
4373f3ce 2712 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2713 break;
b7bcbe95
FB
2714 default:
2715 return 1;
2716 }
2717 } else {
2718 gen_mov_F0_vreg(0, rn);
4373f3ce 2719 tmp = gen_vfp_mrs();
b7bcbe95
FB
2720 }
2721 if (rd == 15) {
b5ff1b31 2722 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2723 gen_set_nzcv(tmp);
7d1b0095 2724 tcg_temp_free_i32(tmp);
4373f3ce
PB
2725 } else {
2726 store_reg(s, rd, tmp);
2727 }
b7bcbe95
FB
2728 } else {
2729 /* arm->vfp */
b7bcbe95 2730 if (insn & (1 << 21)) {
40f137e1 2731 rn >>= 1;
b7bcbe95
FB
2732 /* system register */
2733 switch (rn) {
40f137e1 2734 case ARM_VFP_FPSID:
9ee6e8bb
PB
2735 case ARM_VFP_MVFR0:
2736 case ARM_VFP_MVFR1:
b7bcbe95
FB
2737 /* Writes are ignored. */
2738 break;
40f137e1 2739 case ARM_VFP_FPSCR:
e4c1cfa5 2740 tmp = load_reg(s, rd);
4373f3ce 2741 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2742 tcg_temp_free_i32(tmp);
b5ff1b31 2743 gen_lookup_tb(s);
b7bcbe95 2744 break;
40f137e1 2745 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2746 if (IS_USER(s))
2747 return 1;
71b3c3de
JR
2748 /* TODO: VFP subarchitecture support.
2749 * For now, keep the EN bit only */
e4c1cfa5 2750 tmp = load_reg(s, rd);
71b3c3de 2751 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2752 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2753 gen_lookup_tb(s);
2754 break;
2755 case ARM_VFP_FPINST:
2756 case ARM_VFP_FPINST2:
e4c1cfa5 2757 tmp = load_reg(s, rd);
4373f3ce 2758 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2759 break;
b7bcbe95
FB
2760 default:
2761 return 1;
2762 }
2763 } else {
e4c1cfa5 2764 tmp = load_reg(s, rd);
4373f3ce 2765 gen_vfp_msr(tmp);
b7bcbe95
FB
2766 gen_mov_vreg_F0(0, rn);
2767 }
2768 }
2769 }
2770 } else {
2771 /* data processing */
2772 /* The opcode is in bits 23, 21, 20 and 6. */
2773 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2774 if (dp) {
2775 if (op == 15) {
2776 /* rn is opcode */
2777 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2778 } else {
2779 /* rn is register number */
9ee6e8bb 2780 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2781 }
2782
04595bf6 2783 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2784 /* Integer or single precision destination. */
9ee6e8bb 2785 rd = VFP_SREG_D(insn);
b7bcbe95 2786 } else {
9ee6e8bb 2787 VFP_DREG_D(rd, insn);
b7bcbe95 2788 }
04595bf6
PM
2789 if (op == 15 &&
2790 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2791 /* VCVT from int is always from S reg regardless of dp bit.
2792 * VCVT with immediate frac_bits has same format as SREG_M
2793 */
2794 rm = VFP_SREG_M(insn);
b7bcbe95 2795 } else {
9ee6e8bb 2796 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2797 }
2798 } else {
9ee6e8bb 2799 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2800 if (op == 15 && rn == 15) {
2801 /* Double precision destination. */
9ee6e8bb
PB
2802 VFP_DREG_D(rd, insn);
2803 } else {
2804 rd = VFP_SREG_D(insn);
2805 }
04595bf6
PM
2806 /* NB that we implicitly rely on the encoding for the frac_bits
2807 * in VCVT of fixed to float being the same as that of an SREG_M
2808 */
9ee6e8bb 2809 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2810 }
2811
69d1fc22 2812 veclen = s->vec_len;
b7bcbe95
FB
2813 if (op == 15 && rn > 3)
2814 veclen = 0;
2815
2816 /* Shut up compiler warnings. */
2817 delta_m = 0;
2818 delta_d = 0;
2819 bank_mask = 0;
3b46e624 2820
b7bcbe95
FB
2821 if (veclen > 0) {
2822 if (dp)
2823 bank_mask = 0xc;
2824 else
2825 bank_mask = 0x18;
2826
2827 /* Figure out what type of vector operation this is. */
2828 if ((rd & bank_mask) == 0) {
2829 /* scalar */
2830 veclen = 0;
2831 } else {
2832 if (dp)
69d1fc22 2833 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2834 else
69d1fc22 2835 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2836
2837 if ((rm & bank_mask) == 0) {
2838 /* mixed scalar/vector */
2839 delta_m = 0;
2840 } else {
2841 /* vector */
2842 delta_m = delta_d;
2843 }
2844 }
2845 }
2846
2847 /* Load the initial operands. */
2848 if (op == 15) {
2849 switch (rn) {
2850 case 16:
2851 case 17:
2852 /* Integer source */
2853 gen_mov_F0_vreg(0, rm);
2854 break;
2855 case 8:
2856 case 9:
2857 /* Compare */
2858 gen_mov_F0_vreg(dp, rd);
2859 gen_mov_F1_vreg(dp, rm);
2860 break;
2861 case 10:
2862 case 11:
2863 /* Compare with zero */
2864 gen_mov_F0_vreg(dp, rd);
2865 gen_vfp_F1_ld0(dp);
2866 break;
9ee6e8bb
PB
2867 case 20:
2868 case 21:
2869 case 22:
2870 case 23:
644ad806
PB
2871 case 28:
2872 case 29:
2873 case 30:
2874 case 31:
9ee6e8bb
PB
2875 /* Source and destination the same. */
2876 gen_mov_F0_vreg(dp, rd);
2877 break;
6e0c0ed1
PM
2878 case 4:
2879 case 5:
2880 case 6:
2881 case 7:
2882 /* VCVTB, VCVTT: only present with the halfprec extension,
2883 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2884 */
2885 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2886 return 1;
2887 }
2888 /* Otherwise fall through */
b7bcbe95
FB
2889 default:
2890 /* One source operand. */
2891 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2892 break;
b7bcbe95
FB
2893 }
2894 } else {
2895 /* Two source operands. */
2896 gen_mov_F0_vreg(dp, rn);
2897 gen_mov_F1_vreg(dp, rm);
2898 }
2899
2900 for (;;) {
2901 /* Perform the calculation. */
2902 switch (op) {
605a6aed
PM
2903 case 0: /* VMLA: fd + (fn * fm) */
2904 /* Note that order of inputs to the add matters for NaNs */
2905 gen_vfp_F1_mul(dp);
2906 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2907 gen_vfp_add(dp);
2908 break;
605a6aed 2909 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2910 gen_vfp_mul(dp);
605a6aed
PM
2911 gen_vfp_F1_neg(dp);
2912 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2913 gen_vfp_add(dp);
2914 break;
605a6aed
PM
2915 case 2: /* VNMLS: -fd + (fn * fm) */
2916 /* Note that it isn't valid to replace (-A + B) with (B - A)
2917 * or similar plausible looking simplifications
2918 * because this will give wrong results for NaNs.
2919 */
2920 gen_vfp_F1_mul(dp);
2921 gen_mov_F0_vreg(dp, rd);
2922 gen_vfp_neg(dp);
2923 gen_vfp_add(dp);
b7bcbe95 2924 break;
605a6aed 2925 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2926 gen_vfp_mul(dp);
605a6aed
PM
2927 gen_vfp_F1_neg(dp);
2928 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2929 gen_vfp_neg(dp);
605a6aed 2930 gen_vfp_add(dp);
b7bcbe95
FB
2931 break;
2932 case 4: /* mul: fn * fm */
2933 gen_vfp_mul(dp);
2934 break;
2935 case 5: /* nmul: -(fn * fm) */
2936 gen_vfp_mul(dp);
2937 gen_vfp_neg(dp);
2938 break;
2939 case 6: /* add: fn + fm */
2940 gen_vfp_add(dp);
2941 break;
2942 case 7: /* sub: fn - fm */
2943 gen_vfp_sub(dp);
2944 break;
2945 case 8: /* div: fn / fm */
2946 gen_vfp_div(dp);
2947 break;
da97f52c
PM
2948 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2949 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2950 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2951 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2952 /* These are fused multiply-add, and must be done as one
2953 * floating point operation with no rounding between the
2954 * multiplication and addition steps.
2955 * NB that doing the negations here as separate steps is
2956 * correct : an input NaN should come out with its sign bit
2957 * flipped if it is a negated-input.
2958 */
2959 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
2960 return 1;
2961 }
2962 if (dp) {
2963 TCGv_ptr fpst;
2964 TCGv_i64 frd;
2965 if (op & 1) {
2966 /* VFNMS, VFMS */
2967 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
2968 }
2969 frd = tcg_temp_new_i64();
2970 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
2971 if (op & 2) {
2972 /* VFNMA, VFNMS */
2973 gen_helper_vfp_negd(frd, frd);
2974 }
2975 fpst = get_fpstatus_ptr(0);
2976 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
2977 cpu_F1d, frd, fpst);
2978 tcg_temp_free_ptr(fpst);
2979 tcg_temp_free_i64(frd);
2980 } else {
2981 TCGv_ptr fpst;
2982 TCGv_i32 frd;
2983 if (op & 1) {
2984 /* VFNMS, VFMS */
2985 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
2986 }
2987 frd = tcg_temp_new_i32();
2988 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
2989 if (op & 2) {
2990 gen_helper_vfp_negs(frd, frd);
2991 }
2992 fpst = get_fpstatus_ptr(0);
2993 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
2994 cpu_F1s, frd, fpst);
2995 tcg_temp_free_ptr(fpst);
2996 tcg_temp_free_i32(frd);
2997 }
2998 break;
9ee6e8bb
PB
2999 case 14: /* fconst */
3000 if (!arm_feature(env, ARM_FEATURE_VFP3))
3001 return 1;
3002
3003 n = (insn << 12) & 0x80000000;
3004 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3005 if (dp) {
3006 if (i & 0x40)
3007 i |= 0x3f80;
3008 else
3009 i |= 0x4000;
3010 n |= i << 16;
4373f3ce 3011 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3012 } else {
3013 if (i & 0x40)
3014 i |= 0x780;
3015 else
3016 i |= 0x800;
3017 n |= i << 19;
5b340b51 3018 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3019 }
9ee6e8bb 3020 break;
b7bcbe95
FB
3021 case 15: /* extension space */
3022 switch (rn) {
3023 case 0: /* cpy */
3024 /* no-op */
3025 break;
3026 case 1: /* abs */
3027 gen_vfp_abs(dp);
3028 break;
3029 case 2: /* neg */
3030 gen_vfp_neg(dp);
3031 break;
3032 case 3: /* sqrt */
3033 gen_vfp_sqrt(dp);
3034 break;
60011498 3035 case 4: /* vcvtb.f32.f16 */
60011498
PB
3036 tmp = gen_vfp_mrs();
3037 tcg_gen_ext16u_i32(tmp, tmp);
3038 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3039 tcg_temp_free_i32(tmp);
60011498
PB
3040 break;
3041 case 5: /* vcvtt.f32.f16 */
60011498
PB
3042 tmp = gen_vfp_mrs();
3043 tcg_gen_shri_i32(tmp, tmp, 16);
3044 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3045 tcg_temp_free_i32(tmp);
60011498
PB
3046 break;
3047 case 6: /* vcvtb.f16.f32 */
7d1b0095 3048 tmp = tcg_temp_new_i32();
60011498
PB
3049 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3050 gen_mov_F0_vreg(0, rd);
3051 tmp2 = gen_vfp_mrs();
3052 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3053 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3054 tcg_temp_free_i32(tmp2);
60011498
PB
3055 gen_vfp_msr(tmp);
3056 break;
3057 case 7: /* vcvtt.f16.f32 */
7d1b0095 3058 tmp = tcg_temp_new_i32();
60011498
PB
3059 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3060 tcg_gen_shli_i32(tmp, tmp, 16);
3061 gen_mov_F0_vreg(0, rd);
3062 tmp2 = gen_vfp_mrs();
3063 tcg_gen_ext16u_i32(tmp2, tmp2);
3064 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3065 tcg_temp_free_i32(tmp2);
60011498
PB
3066 gen_vfp_msr(tmp);
3067 break;
b7bcbe95
FB
3068 case 8: /* cmp */
3069 gen_vfp_cmp(dp);
3070 break;
3071 case 9: /* cmpe */
3072 gen_vfp_cmpe(dp);
3073 break;
3074 case 10: /* cmpz */
3075 gen_vfp_cmp(dp);
3076 break;
3077 case 11: /* cmpez */
3078 gen_vfp_F1_ld0(dp);
3079 gen_vfp_cmpe(dp);
3080 break;
3081 case 15: /* single<->double conversion */
3082 if (dp)
4373f3ce 3083 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3084 else
4373f3ce 3085 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3086 break;
3087 case 16: /* fuito */
5500b06c 3088 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3089 break;
3090 case 17: /* fsito */
5500b06c 3091 gen_vfp_sito(dp, 0);
b7bcbe95 3092 break;
9ee6e8bb
PB
3093 case 20: /* fshto */
3094 if (!arm_feature(env, ARM_FEATURE_VFP3))
3095 return 1;
5500b06c 3096 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3097 break;
3098 case 21: /* fslto */
3099 if (!arm_feature(env, ARM_FEATURE_VFP3))
3100 return 1;
5500b06c 3101 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3102 break;
3103 case 22: /* fuhto */
3104 if (!arm_feature(env, ARM_FEATURE_VFP3))
3105 return 1;
5500b06c 3106 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3107 break;
3108 case 23: /* fulto */
3109 if (!arm_feature(env, ARM_FEATURE_VFP3))
3110 return 1;
5500b06c 3111 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3112 break;
b7bcbe95 3113 case 24: /* ftoui */
5500b06c 3114 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3115 break;
3116 case 25: /* ftouiz */
5500b06c 3117 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3118 break;
3119 case 26: /* ftosi */
5500b06c 3120 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3121 break;
3122 case 27: /* ftosiz */
5500b06c 3123 gen_vfp_tosiz(dp, 0);
b7bcbe95 3124 break;
9ee6e8bb
PB
3125 case 28: /* ftosh */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
5500b06c 3128 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3129 break;
3130 case 29: /* ftosl */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
5500b06c 3133 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3134 break;
3135 case 30: /* ftouh */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
5500b06c 3138 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3139 break;
3140 case 31: /* ftoul */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
5500b06c 3143 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3144 break;
b7bcbe95 3145 default: /* undefined */
b7bcbe95
FB
3146 return 1;
3147 }
3148 break;
3149 default: /* undefined */
b7bcbe95
FB
3150 return 1;
3151 }
3152
3153 /* Write back the result. */
3154 if (op == 15 && (rn >= 8 && rn <= 11))
3155 ; /* Comparison, do nothing. */
04595bf6
PM
3156 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3157 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3158 gen_mov_vreg_F0(0, rd);
3159 else if (op == 15 && rn == 15)
3160 /* conversion */
3161 gen_mov_vreg_F0(!dp, rd);
3162 else
3163 gen_mov_vreg_F0(dp, rd);
3164
3165 /* break out of the loop if we have finished */
3166 if (veclen == 0)
3167 break;
3168
3169 if (op == 15 && delta_m == 0) {
3170 /* single source one-many */
3171 while (veclen--) {
3172 rd = ((rd + delta_d) & (bank_mask - 1))
3173 | (rd & bank_mask);
3174 gen_mov_vreg_F0(dp, rd);
3175 }
3176 break;
3177 }
3178 /* Setup the next operands. */
3179 veclen--;
3180 rd = ((rd + delta_d) & (bank_mask - 1))
3181 | (rd & bank_mask);
3182
3183 if (op == 15) {
3184 /* One source operand. */
3185 rm = ((rm + delta_m) & (bank_mask - 1))
3186 | (rm & bank_mask);
3187 gen_mov_F0_vreg(dp, rm);
3188 } else {
3189 /* Two source operands. */
3190 rn = ((rn + delta_d) & (bank_mask - 1))
3191 | (rn & bank_mask);
3192 gen_mov_F0_vreg(dp, rn);
3193 if (delta_m) {
3194 rm = ((rm + delta_m) & (bank_mask - 1))
3195 | (rm & bank_mask);
3196 gen_mov_F1_vreg(dp, rm);
3197 }
3198 }
3199 }
3200 }
3201 break;
3202 case 0xc:
3203 case 0xd:
8387da81 3204 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3205 /* two-register transfer */
3206 rn = (insn >> 16) & 0xf;
3207 rd = (insn >> 12) & 0xf;
3208 if (dp) {
9ee6e8bb
PB
3209 VFP_DREG_M(rm, insn);
3210 } else {
3211 rm = VFP_SREG_M(insn);
3212 }
b7bcbe95 3213
18c9b560 3214 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3215 /* vfp->arm */
3216 if (dp) {
4373f3ce
PB
3217 gen_mov_F0_vreg(0, rm * 2);
3218 tmp = gen_vfp_mrs();
3219 store_reg(s, rd, tmp);
3220 gen_mov_F0_vreg(0, rm * 2 + 1);
3221 tmp = gen_vfp_mrs();
3222 store_reg(s, rn, tmp);
b7bcbe95
FB
3223 } else {
3224 gen_mov_F0_vreg(0, rm);
4373f3ce 3225 tmp = gen_vfp_mrs();
8387da81 3226 store_reg(s, rd, tmp);
b7bcbe95 3227 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3228 tmp = gen_vfp_mrs();
8387da81 3229 store_reg(s, rn, tmp);
b7bcbe95
FB
3230 }
3231 } else {
3232 /* arm->vfp */
3233 if (dp) {
4373f3ce
PB
3234 tmp = load_reg(s, rd);
3235 gen_vfp_msr(tmp);
3236 gen_mov_vreg_F0(0, rm * 2);
3237 tmp = load_reg(s, rn);
3238 gen_vfp_msr(tmp);
3239 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3240 } else {
8387da81 3241 tmp = load_reg(s, rd);
4373f3ce 3242 gen_vfp_msr(tmp);
b7bcbe95 3243 gen_mov_vreg_F0(0, rm);
8387da81 3244 tmp = load_reg(s, rn);
4373f3ce 3245 gen_vfp_msr(tmp);
b7bcbe95
FB
3246 gen_mov_vreg_F0(0, rm + 1);
3247 }
3248 }
3249 } else {
3250 /* Load/store */
3251 rn = (insn >> 16) & 0xf;
3252 if (dp)
9ee6e8bb 3253 VFP_DREG_D(rd, insn);
b7bcbe95 3254 else
9ee6e8bb 3255 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3256 if ((insn & 0x01200000) == 0x01000000) {
3257 /* Single load/store */
3258 offset = (insn & 0xff) << 2;
3259 if ((insn & (1 << 23)) == 0)
3260 offset = -offset;
934814f1
PM
3261 if (s->thumb && rn == 15) {
3262 /* This is actually UNPREDICTABLE */
3263 addr = tcg_temp_new_i32();
3264 tcg_gen_movi_i32(addr, s->pc & ~2);
3265 } else {
3266 addr = load_reg(s, rn);
3267 }
312eea9f 3268 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3269 if (insn & (1 << 20)) {
312eea9f 3270 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3271 gen_mov_vreg_F0(dp, rd);
3272 } else {
3273 gen_mov_F0_vreg(dp, rd);
312eea9f 3274 gen_vfp_st(s, dp, addr);
b7bcbe95 3275 }
7d1b0095 3276 tcg_temp_free_i32(addr);
b7bcbe95
FB
3277 } else {
3278 /* load/store multiple */
934814f1 3279 int w = insn & (1 << 21);
b7bcbe95
FB
3280 if (dp)
3281 n = (insn >> 1) & 0x7f;
3282 else
3283 n = insn & 0xff;
3284
934814f1
PM
3285 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3286 /* P == U , W == 1 => UNDEF */
3287 return 1;
3288 }
3289 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3290 /* UNPREDICTABLE cases for bad immediates: we choose to
3291 * UNDEF to avoid generating huge numbers of TCG ops
3292 */
3293 return 1;
3294 }
3295 if (rn == 15 && w) {
3296 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3297 return 1;
3298 }
3299
3300 if (s->thumb && rn == 15) {
3301 /* This is actually UNPREDICTABLE */
3302 addr = tcg_temp_new_i32();
3303 tcg_gen_movi_i32(addr, s->pc & ~2);
3304 } else {
3305 addr = load_reg(s, rn);
3306 }
b7bcbe95 3307 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3308 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3309
3310 if (dp)
3311 offset = 8;
3312 else
3313 offset = 4;
3314 for (i = 0; i < n; i++) {
18c9b560 3315 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3316 /* load */
312eea9f 3317 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3318 gen_mov_vreg_F0(dp, rd + i);
3319 } else {
3320 /* store */
3321 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3322 gen_vfp_st(s, dp, addr);
b7bcbe95 3323 }
312eea9f 3324 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3325 }
934814f1 3326 if (w) {
b7bcbe95
FB
3327 /* writeback */
3328 if (insn & (1 << 24))
3329 offset = -offset * n;
3330 else if (dp && (insn & 1))
3331 offset = 4;
3332 else
3333 offset = 0;
3334
3335 if (offset != 0)
312eea9f
FN
3336 tcg_gen_addi_i32(addr, addr, offset);
3337 store_reg(s, rn, addr);
3338 } else {
7d1b0095 3339 tcg_temp_free_i32(addr);
b7bcbe95
FB
3340 }
3341 }
3342 }
3343 break;
3344 default:
3345 /* Should never happen. */
3346 return 1;
3347 }
3348 return 0;
3349}
3350
6e256c93 3351static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3352{
6e256c93
FB
3353 TranslationBlock *tb;
3354
3355 tb = s->tb;
3356 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3357 tcg_gen_goto_tb(n);
8984bd2e 3358 gen_set_pc_im(dest);
4b4a72e5 3359 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3360 } else {
8984bd2e 3361 gen_set_pc_im(dest);
57fec1fe 3362 tcg_gen_exit_tb(0);
6e256c93 3363 }
c53be334
FB
3364}
3365
8aaca4c0
FB
3366static inline void gen_jmp (DisasContext *s, uint32_t dest)
3367{
551bd27f 3368 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3369 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3370 if (s->thumb)
d9ba4830
PB
3371 dest |= 1;
3372 gen_bx_im(s, dest);
8aaca4c0 3373 } else {
6e256c93 3374 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3375 s->is_jmp = DISAS_TB_JUMP;
3376 }
3377}
3378
39d5492a 3379static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3380{
ee097184 3381 if (x)
d9ba4830 3382 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3383 else
d9ba4830 3384 gen_sxth(t0);
ee097184 3385 if (y)
d9ba4830 3386 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3387 else
d9ba4830
PB
3388 gen_sxth(t1);
3389 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3390}
3391
3392/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3393static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3394 uint32_t mask;
3395
3396 mask = 0;
3397 if (flags & (1 << 0))
3398 mask |= 0xff;
3399 if (flags & (1 << 1))
3400 mask |= 0xff00;
3401 if (flags & (1 << 2))
3402 mask |= 0xff0000;
3403 if (flags & (1 << 3))
3404 mask |= 0xff000000;
9ee6e8bb 3405
2ae23e75 3406 /* Mask out undefined bits. */
9ee6e8bb 3407 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3408 if (!arm_feature(env, ARM_FEATURE_V4T))
3409 mask &= ~CPSR_T;
3410 if (!arm_feature(env, ARM_FEATURE_V5))
3411 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3412 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3413 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3414 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3415 mask &= ~CPSR_IT;
9ee6e8bb 3416 /* Mask out execution state bits. */
2ae23e75 3417 if (!spsr)
e160c51c 3418 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3419 /* Mask out privileged bits. */
3420 if (IS_USER(s))
9ee6e8bb 3421 mask &= CPSR_USER;
b5ff1b31
FB
3422 return mask;
3423}
3424
2fbac54b 3425/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3426static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3427{
39d5492a 3428 TCGv_i32 tmp;
b5ff1b31
FB
3429 if (spsr) {
3430 /* ??? This is also undefined in system mode. */
3431 if (IS_USER(s))
3432 return 1;
d9ba4830
PB
3433
3434 tmp = load_cpu_field(spsr);
3435 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3436 tcg_gen_andi_i32(t0, t0, mask);
3437 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3438 store_cpu_field(tmp, spsr);
b5ff1b31 3439 } else {
2fbac54b 3440 gen_set_cpsr(t0, mask);
b5ff1b31 3441 }
7d1b0095 3442 tcg_temp_free_i32(t0);
b5ff1b31
FB
3443 gen_lookup_tb(s);
3444 return 0;
3445}
3446
2fbac54b
FN
3447/* Returns nonzero if access to the PSR is not permitted. */
3448static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3449{
39d5492a 3450 TCGv_i32 tmp;
7d1b0095 3451 tmp = tcg_temp_new_i32();
2fbac54b
FN
3452 tcg_gen_movi_i32(tmp, val);
3453 return gen_set_psr(s, mask, spsr, tmp);
3454}
3455
e9bb4aa9 3456/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3457static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3458{
39d5492a 3459 TCGv_i32 tmp;
e9bb4aa9 3460 store_reg(s, 15, pc);
d9ba4830
PB
3461 tmp = load_cpu_field(spsr);
3462 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3463 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3464 s->is_jmp = DISAS_UPDATE;
3465}
3466
b0109805 3467/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3468static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3469{
b0109805 3470 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3471 tcg_temp_free_i32(cpsr);
b0109805 3472 store_reg(s, 15, pc);
9ee6e8bb
PB
3473 s->is_jmp = DISAS_UPDATE;
3474}
3b46e624 3475
9ee6e8bb
PB
3476static inline void
3477gen_set_condexec (DisasContext *s)
3478{
3479 if (s->condexec_mask) {
8f01245e 3480 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3481 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3482 tcg_gen_movi_i32(tmp, val);
d9ba4830 3483 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3484 }
3485}
3b46e624 3486
bc4a0de0
PM
3487static void gen_exception_insn(DisasContext *s, int offset, int excp)
3488{
3489 gen_set_condexec(s);
3490 gen_set_pc_im(s->pc - offset);
3491 gen_exception(excp);
3492 s->is_jmp = DISAS_JUMP;
3493}
3494
9ee6e8bb
PB
3495static void gen_nop_hint(DisasContext *s, int val)
3496{
3497 switch (val) {
3498 case 3: /* wfi */
8984bd2e 3499 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3500 s->is_jmp = DISAS_WFI;
3501 break;
3502 case 2: /* wfe */
3503 case 4: /* sev */
12b10571
MR
3504 case 5: /* sevl */
3505 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3506 default: /* nop */
3507 break;
3508 }
3509}
99c475ab 3510
ad69471c 3511#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3512
39d5492a 3513static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3514{
3515 switch (size) {
dd8fbd78
FN
3516 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3517 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3518 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3519 default: abort();
9ee6e8bb 3520 }
9ee6e8bb
PB
3521}
3522
39d5492a 3523static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3524{
3525 switch (size) {
dd8fbd78
FN
3526 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3527 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3528 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3529 default: return;
3530 }
3531}
3532
3533/* 32-bit pairwise ops end up the same as the elementwise versions. */
3534#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3535#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3536#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3537#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3538
ad69471c
PB
3539#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3540 switch ((size << 1) | u) { \
3541 case 0: \
dd8fbd78 3542 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3543 break; \
3544 case 1: \
dd8fbd78 3545 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3546 break; \
3547 case 2: \
dd8fbd78 3548 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3549 break; \
3550 case 3: \
dd8fbd78 3551 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3552 break; \
3553 case 4: \
dd8fbd78 3554 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3555 break; \
3556 case 5: \
dd8fbd78 3557 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3558 break; \
3559 default: return 1; \
3560 }} while (0)
9ee6e8bb
PB
3561
3562#define GEN_NEON_INTEGER_OP(name) do { \
3563 switch ((size << 1) | u) { \
ad69471c 3564 case 0: \
dd8fbd78 3565 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3566 break; \
3567 case 1: \
dd8fbd78 3568 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3569 break; \
3570 case 2: \
dd8fbd78 3571 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3572 break; \
3573 case 3: \
dd8fbd78 3574 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3575 break; \
3576 case 4: \
dd8fbd78 3577 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3578 break; \
3579 case 5: \
dd8fbd78 3580 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3581 break; \
9ee6e8bb
PB
3582 default: return 1; \
3583 }} while (0)
3584
39d5492a 3585static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3586{
39d5492a 3587 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3588 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3589 return tmp;
9ee6e8bb
PB
3590}
3591
39d5492a 3592static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3593{
dd8fbd78 3594 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3595 tcg_temp_free_i32(var);
9ee6e8bb
PB
3596}
3597
39d5492a 3598static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3599{
39d5492a 3600 TCGv_i32 tmp;
9ee6e8bb 3601 if (size == 1) {
0fad6efc
PM
3602 tmp = neon_load_reg(reg & 7, reg >> 4);
3603 if (reg & 8) {
dd8fbd78 3604 gen_neon_dup_high16(tmp);
0fad6efc
PM
3605 } else {
3606 gen_neon_dup_low16(tmp);
dd8fbd78 3607 }
0fad6efc
PM
3608 } else {
3609 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3610 }
dd8fbd78 3611 return tmp;
9ee6e8bb
PB
3612}
3613
02acedf9 3614static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3615{
39d5492a 3616 TCGv_i32 tmp, tmp2;
600b828c 3617 if (!q && size == 2) {
02acedf9
PM
3618 return 1;
3619 }
3620 tmp = tcg_const_i32(rd);
3621 tmp2 = tcg_const_i32(rm);
3622 if (q) {
3623 switch (size) {
3624 case 0:
02da0b2d 3625 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3626 break;
3627 case 1:
02da0b2d 3628 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3629 break;
3630 case 2:
02da0b2d 3631 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3632 break;
3633 default:
3634 abort();
3635 }
3636 } else {
3637 switch (size) {
3638 case 0:
02da0b2d 3639 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3640 break;
3641 case 1:
02da0b2d 3642 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3643 break;
3644 default:
3645 abort();
3646 }
3647 }
3648 tcg_temp_free_i32(tmp);
3649 tcg_temp_free_i32(tmp2);
3650 return 0;
19457615
FN
3651}
3652
d68a6f3a 3653static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3654{
39d5492a 3655 TCGv_i32 tmp, tmp2;
600b828c 3656 if (!q && size == 2) {
d68a6f3a
PM
3657 return 1;
3658 }
3659 tmp = tcg_const_i32(rd);
3660 tmp2 = tcg_const_i32(rm);
3661 if (q) {
3662 switch (size) {
3663 case 0:
02da0b2d 3664 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3665 break;
3666 case 1:
02da0b2d 3667 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3668 break;
3669 case 2:
02da0b2d 3670 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3671 break;
3672 default:
3673 abort();
3674 }
3675 } else {
3676 switch (size) {
3677 case 0:
02da0b2d 3678 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3679 break;
3680 case 1:
02da0b2d 3681 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3682 break;
3683 default:
3684 abort();
3685 }
3686 }
3687 tcg_temp_free_i32(tmp);
3688 tcg_temp_free_i32(tmp2);
3689 return 0;
19457615
FN
3690}
3691
39d5492a 3692static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3693{
39d5492a 3694 TCGv_i32 rd, tmp;
19457615 3695
7d1b0095
PM
3696 rd = tcg_temp_new_i32();
3697 tmp = tcg_temp_new_i32();
19457615
FN
3698
3699 tcg_gen_shli_i32(rd, t0, 8);
3700 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3701 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3702 tcg_gen_or_i32(rd, rd, tmp);
3703
3704 tcg_gen_shri_i32(t1, t1, 8);
3705 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3706 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3707 tcg_gen_or_i32(t1, t1, tmp);
3708 tcg_gen_mov_i32(t0, rd);
3709
7d1b0095
PM
3710 tcg_temp_free_i32(tmp);
3711 tcg_temp_free_i32(rd);
19457615
FN
3712}
3713
39d5492a 3714static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3715{
39d5492a 3716 TCGv_i32 rd, tmp;
19457615 3717
7d1b0095
PM
3718 rd = tcg_temp_new_i32();
3719 tmp = tcg_temp_new_i32();
19457615
FN
3720
3721 tcg_gen_shli_i32(rd, t0, 16);
3722 tcg_gen_andi_i32(tmp, t1, 0xffff);
3723 tcg_gen_or_i32(rd, rd, tmp);
3724 tcg_gen_shri_i32(t1, t1, 16);
3725 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3726 tcg_gen_or_i32(t1, t1, tmp);
3727 tcg_gen_mov_i32(t0, rd);
3728
7d1b0095
PM
3729 tcg_temp_free_i32(tmp);
3730 tcg_temp_free_i32(rd);
19457615
FN
3731}
3732
3733
9ee6e8bb
PB
3734static struct {
3735 int nregs;
3736 int interleave;
3737 int spacing;
3738} neon_ls_element_type[11] = {
3739 {4, 4, 1},
3740 {4, 4, 2},
3741 {4, 1, 1},
3742 {4, 2, 1},
3743 {3, 3, 1},
3744 {3, 3, 2},
3745 {3, 1, 1},
3746 {1, 1, 1},
3747 {2, 2, 1},
3748 {2, 2, 2},
3749 {2, 1, 1}
3750};
3751
3752/* Translate a NEON load/store element instruction. Return nonzero if the
3753 instruction is invalid. */
0ecb72a5 3754static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3755{
3756 int rd, rn, rm;
3757 int op;
3758 int nregs;
3759 int interleave;
84496233 3760 int spacing;
9ee6e8bb
PB
3761 int stride;
3762 int size;
3763 int reg;
3764 int pass;
3765 int load;
3766 int shift;
9ee6e8bb 3767 int n;
39d5492a
PM
3768 TCGv_i32 addr;
3769 TCGv_i32 tmp;
3770 TCGv_i32 tmp2;
84496233 3771 TCGv_i64 tmp64;
9ee6e8bb 3772
5df8bac1 3773 if (!s->vfp_enabled)
9ee6e8bb
PB
3774 return 1;
3775 VFP_DREG_D(rd, insn);
3776 rn = (insn >> 16) & 0xf;
3777 rm = insn & 0xf;
3778 load = (insn & (1 << 21)) != 0;
3779 if ((insn & (1 << 23)) == 0) {
3780 /* Load store all elements. */
3781 op = (insn >> 8) & 0xf;
3782 size = (insn >> 6) & 3;
84496233 3783 if (op > 10)
9ee6e8bb 3784 return 1;
f2dd89d0
PM
3785 /* Catch UNDEF cases for bad values of align field */
3786 switch (op & 0xc) {
3787 case 4:
3788 if (((insn >> 5) & 1) == 1) {
3789 return 1;
3790 }
3791 break;
3792 case 8:
3793 if (((insn >> 4) & 3) == 3) {
3794 return 1;
3795 }
3796 break;
3797 default:
3798 break;
3799 }
9ee6e8bb
PB
3800 nregs = neon_ls_element_type[op].nregs;
3801 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3802 spacing = neon_ls_element_type[op].spacing;
3803 if (size == 3 && (interleave | spacing) != 1)
3804 return 1;
e318a60b 3805 addr = tcg_temp_new_i32();
dcc65026 3806 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3807 stride = (1 << size) * interleave;
3808 for (reg = 0; reg < nregs; reg++) {
3809 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3810 load_reg_var(s, addr, rn);
3811 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3812 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3813 load_reg_var(s, addr, rn);
3814 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3815 }
84496233 3816 if (size == 3) {
8ed1237d 3817 tmp64 = tcg_temp_new_i64();
84496233 3818 if (load) {
8ed1237d 3819 tcg_gen_qemu_ld64(tmp64, addr, IS_USER(s));
84496233 3820 neon_store_reg64(tmp64, rd);
84496233 3821 } else {
84496233 3822 neon_load_reg64(tmp64, rd);
8ed1237d 3823 tcg_gen_qemu_st64(tmp64, addr, IS_USER(s));
84496233 3824 }
8ed1237d 3825 tcg_temp_free_i64(tmp64);
84496233
JR
3826 tcg_gen_addi_i32(addr, addr, stride);
3827 } else {
3828 for (pass = 0; pass < 2; pass++) {
3829 if (size == 2) {
3830 if (load) {
58ab8e96
PM
3831 tmp = tcg_temp_new_i32();
3832 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3833 neon_store_reg(rd, pass, tmp);
3834 } else {
3835 tmp = neon_load_reg(rd, pass);
58ab8e96
PM
3836 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
3837 tcg_temp_free_i32(tmp);
84496233 3838 }
1b2b1e54 3839 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3840 } else if (size == 1) {
3841 if (load) {
58ab8e96
PM
3842 tmp = tcg_temp_new_i32();
3843 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
84496233 3844 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96
PM
3845 tmp2 = tcg_temp_new_i32();
3846 tcg_gen_qemu_ld16u(tmp2, addr, IS_USER(s));
84496233 3847 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3848 tcg_gen_shli_i32(tmp2, tmp2, 16);
3849 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3850 tcg_temp_free_i32(tmp2);
84496233
JR
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
7d1b0095 3854 tmp2 = tcg_temp_new_i32();
84496233 3855 tcg_gen_shri_i32(tmp2, tmp, 16);
58ab8e96
PM
3856 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
3857 tcg_temp_free_i32(tmp);
84496233 3858 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96
PM
3859 tcg_gen_qemu_st16(tmp2, addr, IS_USER(s));
3860 tcg_temp_free_i32(tmp2);
1b2b1e54 3861 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3862 }
84496233
JR
3863 } else /* size == 0 */ {
3864 if (load) {
39d5492a 3865 TCGV_UNUSED_I32(tmp2);
84496233 3866 for (n = 0; n < 4; n++) {
58ab8e96
PM
3867 tmp = tcg_temp_new_i32();
3868 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3869 tcg_gen_addi_i32(addr, addr, stride);
3870 if (n == 0) {
3871 tmp2 = tmp;
3872 } else {
41ba8341
PB
3873 tcg_gen_shli_i32(tmp, tmp, n * 8);
3874 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3875 tcg_temp_free_i32(tmp);
84496233 3876 }
9ee6e8bb 3877 }
84496233
JR
3878 neon_store_reg(rd, pass, tmp2);
3879 } else {
3880 tmp2 = neon_load_reg(rd, pass);
3881 for (n = 0; n < 4; n++) {
7d1b0095 3882 tmp = tcg_temp_new_i32();
84496233
JR
3883 if (n == 0) {
3884 tcg_gen_mov_i32(tmp, tmp2);
3885 } else {
3886 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3887 }
58ab8e96
PM
3888 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
3889 tcg_temp_free_i32(tmp);
84496233
JR
3890 tcg_gen_addi_i32(addr, addr, stride);
3891 }
7d1b0095 3892 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3893 }
3894 }
3895 }
3896 }
84496233 3897 rd += spacing;
9ee6e8bb 3898 }
e318a60b 3899 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3900 stride = nregs * 8;
3901 } else {
3902 size = (insn >> 10) & 3;
3903 if (size == 3) {
3904 /* Load single element to all lanes. */
8e18cde3
PM
3905 int a = (insn >> 4) & 1;
3906 if (!load) {
9ee6e8bb 3907 return 1;
8e18cde3 3908 }
9ee6e8bb
PB
3909 size = (insn >> 6) & 3;
3910 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3911
3912 if (size == 3) {
3913 if (nregs != 4 || a == 0) {
9ee6e8bb 3914 return 1;
99c475ab 3915 }
8e18cde3
PM
3916 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3917 size = 2;
3918 }
3919 if (nregs == 1 && a == 1 && size == 0) {
3920 return 1;
3921 }
3922 if (nregs == 3 && a == 1) {
3923 return 1;
3924 }
e318a60b 3925 addr = tcg_temp_new_i32();
8e18cde3
PM
3926 load_reg_var(s, addr, rn);
3927 if (nregs == 1) {
3928 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3929 tmp = gen_load_and_replicate(s, addr, size);
3930 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3931 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3932 if (insn & (1 << 5)) {
3933 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3934 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3935 }
3936 tcg_temp_free_i32(tmp);
3937 } else {
3938 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3939 stride = (insn & (1 << 5)) ? 2 : 1;
3940 for (reg = 0; reg < nregs; reg++) {
3941 tmp = gen_load_and_replicate(s, addr, size);
3942 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3943 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3944 tcg_temp_free_i32(tmp);
3945 tcg_gen_addi_i32(addr, addr, 1 << size);
3946 rd += stride;
3947 }
9ee6e8bb 3948 }
e318a60b 3949 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3950 stride = (1 << size) * nregs;
3951 } else {
3952 /* Single element. */
93262b16 3953 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3954 pass = (insn >> 7) & 1;
3955 switch (size) {
3956 case 0:
3957 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3958 stride = 1;
3959 break;
3960 case 1:
3961 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3962 stride = (insn & (1 << 5)) ? 2 : 1;
3963 break;
3964 case 2:
3965 shift = 0;
9ee6e8bb
PB
3966 stride = (insn & (1 << 6)) ? 2 : 1;
3967 break;
3968 default:
3969 abort();
3970 }
3971 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
3972 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
3973 switch (nregs) {
3974 case 1:
3975 if (((idx & (1 << size)) != 0) ||
3976 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
3977 return 1;
3978 }
3979 break;
3980 case 3:
3981 if ((idx & 1) != 0) {
3982 return 1;
3983 }
3984 /* fall through */
3985 case 2:
3986 if (size == 2 && (idx & 2) != 0) {
3987 return 1;
3988 }
3989 break;
3990 case 4:
3991 if ((size == 2) && ((idx & 3) == 3)) {
3992 return 1;
3993 }
3994 break;
3995 default:
3996 abort();
3997 }
3998 if ((rd + stride * (nregs - 1)) > 31) {
3999 /* Attempts to write off the end of the register file
4000 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4001 * the neon_load_reg() would write off the end of the array.
4002 */
4003 return 1;
4004 }
e318a60b 4005 addr = tcg_temp_new_i32();
dcc65026 4006 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4007 for (reg = 0; reg < nregs; reg++) {
4008 if (load) {
58ab8e96 4009 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4010 switch (size) {
4011 case 0:
58ab8e96 4012 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4013 break;
4014 case 1:
58ab8e96 4015 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4016 break;
4017 case 2:
58ab8e96 4018 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4019 break;
a50f5b91
PB
4020 default: /* Avoid compiler warnings. */
4021 abort();
9ee6e8bb
PB
4022 }
4023 if (size != 2) {
8f8e3aa4 4024 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4025 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4026 shift, size ? 16 : 8);
7d1b0095 4027 tcg_temp_free_i32(tmp2);
9ee6e8bb 4028 }
8f8e3aa4 4029 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4030 } else { /* Store */
8f8e3aa4
PB
4031 tmp = neon_load_reg(rd, pass);
4032 if (shift)
4033 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4034 switch (size) {
4035 case 0:
58ab8e96 4036 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4037 break;
4038 case 1:
58ab8e96 4039 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4040 break;
4041 case 2:
58ab8e96 4042 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4043 break;
99c475ab 4044 }
58ab8e96 4045 tcg_temp_free_i32(tmp);
99c475ab 4046 }
9ee6e8bb 4047 rd += stride;
1b2b1e54 4048 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4049 }
e318a60b 4050 tcg_temp_free_i32(addr);
9ee6e8bb 4051 stride = nregs * (1 << size);
99c475ab 4052 }
9ee6e8bb
PB
4053 }
4054 if (rm != 15) {
39d5492a 4055 TCGv_i32 base;
b26eefb6
PB
4056
4057 base = load_reg(s, rn);
9ee6e8bb 4058 if (rm == 13) {
b26eefb6 4059 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4060 } else {
39d5492a 4061 TCGv_i32 index;
b26eefb6
PB
4062 index = load_reg(s, rm);
4063 tcg_gen_add_i32(base, base, index);
7d1b0095 4064 tcg_temp_free_i32(index);
9ee6e8bb 4065 }
b26eefb6 4066 store_reg(s, rn, base);
9ee6e8bb
PB
4067 }
4068 return 0;
4069}
3b46e624 4070
8f8e3aa4 4071/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4072static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4073{
4074 tcg_gen_and_i32(t, t, c);
f669df27 4075 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4076 tcg_gen_or_i32(dest, t, f);
4077}
4078
39d5492a 4079static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4080{
4081 switch (size) {
4082 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4083 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4084 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4085 default: abort();
4086 }
4087}
4088
39d5492a 4089static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4090{
4091 switch (size) {
02da0b2d
PM
4092 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4093 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4094 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4095 default: abort();
4096 }
4097}
4098
39d5492a 4099static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4100{
4101 switch (size) {
02da0b2d
PM
4102 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4103 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4104 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4105 default: abort();
4106 }
4107}
4108
39d5492a 4109static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4110{
4111 switch (size) {
02da0b2d
PM
4112 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4113 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4114 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4115 default: abort();
4116 }
4117}
4118
39d5492a 4119static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4120 int q, int u)
4121{
4122 if (q) {
4123 if (u) {
4124 switch (size) {
4125 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4126 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4127 default: abort();
4128 }
4129 } else {
4130 switch (size) {
4131 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4132 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4133 default: abort();
4134 }
4135 }
4136 } else {
4137 if (u) {
4138 switch (size) {
b408a9b0
CL
4139 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4140 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4141 default: abort();
4142 }
4143 } else {
4144 switch (size) {
4145 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4146 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4147 default: abort();
4148 }
4149 }
4150 }
4151}
4152
39d5492a 4153static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4154{
4155 if (u) {
4156 switch (size) {
4157 case 0: gen_helper_neon_widen_u8(dest, src); break;
4158 case 1: gen_helper_neon_widen_u16(dest, src); break;
4159 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4160 default: abort();
4161 }
4162 } else {
4163 switch (size) {
4164 case 0: gen_helper_neon_widen_s8(dest, src); break;
4165 case 1: gen_helper_neon_widen_s16(dest, src); break;
4166 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4167 default: abort();
4168 }
4169 }
7d1b0095 4170 tcg_temp_free_i32(src);
ad69471c
PB
4171}
4172
4173static inline void gen_neon_addl(int size)
4174{
4175 switch (size) {
4176 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4177 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4178 case 2: tcg_gen_add_i64(CPU_V001); break;
4179 default: abort();
4180 }
4181}
4182
4183static inline void gen_neon_subl(int size)
4184{
4185 switch (size) {
4186 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4187 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4188 case 2: tcg_gen_sub_i64(CPU_V001); break;
4189 default: abort();
4190 }
4191}
4192
a7812ae4 4193static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4194{
4195 switch (size) {
4196 case 0: gen_helper_neon_negl_u16(var, var); break;
4197 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4198 case 2:
4199 tcg_gen_neg_i64(var, var);
4200 break;
ad69471c
PB
4201 default: abort();
4202 }
4203}
4204
a7812ae4 4205static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4206{
4207 switch (size) {
02da0b2d
PM
4208 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4209 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4210 default: abort();
4211 }
4212}
4213
39d5492a
PM
4214static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4215 int size, int u)
ad69471c 4216{
a7812ae4 4217 TCGv_i64 tmp;
ad69471c
PB
4218
4219 switch ((size << 1) | u) {
4220 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4221 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4222 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4223 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4224 case 4:
4225 tmp = gen_muls_i64_i32(a, b);
4226 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4227 tcg_temp_free_i64(tmp);
ad69471c
PB
4228 break;
4229 case 5:
4230 tmp = gen_mulu_i64_i32(a, b);
4231 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4232 tcg_temp_free_i64(tmp);
ad69471c
PB
4233 break;
4234 default: abort();
4235 }
c6067f04
CL
4236
4237 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4238 Don't forget to clean them now. */
4239 if (size < 2) {
7d1b0095
PM
4240 tcg_temp_free_i32(a);
4241 tcg_temp_free_i32(b);
c6067f04 4242 }
ad69471c
PB
4243}
4244
39d5492a
PM
4245static void gen_neon_narrow_op(int op, int u, int size,
4246 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4247{
4248 if (op) {
4249 if (u) {
4250 gen_neon_unarrow_sats(size, dest, src);
4251 } else {
4252 gen_neon_narrow(size, dest, src);
4253 }
4254 } else {
4255 if (u) {
4256 gen_neon_narrow_satu(size, dest, src);
4257 } else {
4258 gen_neon_narrow_sats(size, dest, src);
4259 }
4260 }
4261}
4262
62698be3
PM
4263/* Symbolic constants for op fields for Neon 3-register same-length.
4264 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4265 * table A7-9.
4266 */
4267#define NEON_3R_VHADD 0
4268#define NEON_3R_VQADD 1
4269#define NEON_3R_VRHADD 2
4270#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4271#define NEON_3R_VHSUB 4
4272#define NEON_3R_VQSUB 5
4273#define NEON_3R_VCGT 6
4274#define NEON_3R_VCGE 7
4275#define NEON_3R_VSHL 8
4276#define NEON_3R_VQSHL 9
4277#define NEON_3R_VRSHL 10
4278#define NEON_3R_VQRSHL 11
4279#define NEON_3R_VMAX 12
4280#define NEON_3R_VMIN 13
4281#define NEON_3R_VABD 14
4282#define NEON_3R_VABA 15
4283#define NEON_3R_VADD_VSUB 16
4284#define NEON_3R_VTST_VCEQ 17
4285#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4286#define NEON_3R_VMUL 19
4287#define NEON_3R_VPMAX 20
4288#define NEON_3R_VPMIN 21
4289#define NEON_3R_VQDMULH_VQRDMULH 22
4290#define NEON_3R_VPADD 23
da97f52c 4291#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4292#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4293#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4294#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4295#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4296#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4297#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4298
4299static const uint8_t neon_3r_sizes[] = {
4300 [NEON_3R_VHADD] = 0x7,
4301 [NEON_3R_VQADD] = 0xf,
4302 [NEON_3R_VRHADD] = 0x7,
4303 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4304 [NEON_3R_VHSUB] = 0x7,
4305 [NEON_3R_VQSUB] = 0xf,
4306 [NEON_3R_VCGT] = 0x7,
4307 [NEON_3R_VCGE] = 0x7,
4308 [NEON_3R_VSHL] = 0xf,
4309 [NEON_3R_VQSHL] = 0xf,
4310 [NEON_3R_VRSHL] = 0xf,
4311 [NEON_3R_VQRSHL] = 0xf,
4312 [NEON_3R_VMAX] = 0x7,
4313 [NEON_3R_VMIN] = 0x7,
4314 [NEON_3R_VABD] = 0x7,
4315 [NEON_3R_VABA] = 0x7,
4316 [NEON_3R_VADD_VSUB] = 0xf,
4317 [NEON_3R_VTST_VCEQ] = 0x7,
4318 [NEON_3R_VML] = 0x7,
4319 [NEON_3R_VMUL] = 0x7,
4320 [NEON_3R_VPMAX] = 0x7,
4321 [NEON_3R_VPMIN] = 0x7,
4322 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4323 [NEON_3R_VPADD] = 0x7,
da97f52c 4324 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4325 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4326 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4327 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4328 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4329 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4330 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4331};
4332
600b828c
PM
4333/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4334 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4335 * table A7-13.
4336 */
4337#define NEON_2RM_VREV64 0
4338#define NEON_2RM_VREV32 1
4339#define NEON_2RM_VREV16 2
4340#define NEON_2RM_VPADDL 4
4341#define NEON_2RM_VPADDL_U 5
4342#define NEON_2RM_VCLS 8
4343#define NEON_2RM_VCLZ 9
4344#define NEON_2RM_VCNT 10
4345#define NEON_2RM_VMVN 11
4346#define NEON_2RM_VPADAL 12
4347#define NEON_2RM_VPADAL_U 13
4348#define NEON_2RM_VQABS 14
4349#define NEON_2RM_VQNEG 15
4350#define NEON_2RM_VCGT0 16
4351#define NEON_2RM_VCGE0 17
4352#define NEON_2RM_VCEQ0 18
4353#define NEON_2RM_VCLE0 19
4354#define NEON_2RM_VCLT0 20
4355#define NEON_2RM_VABS 22
4356#define NEON_2RM_VNEG 23
4357#define NEON_2RM_VCGT0_F 24
4358#define NEON_2RM_VCGE0_F 25
4359#define NEON_2RM_VCEQ0_F 26
4360#define NEON_2RM_VCLE0_F 27
4361#define NEON_2RM_VCLT0_F 28
4362#define NEON_2RM_VABS_F 30
4363#define NEON_2RM_VNEG_F 31
4364#define NEON_2RM_VSWP 32
4365#define NEON_2RM_VTRN 33
4366#define NEON_2RM_VUZP 34
4367#define NEON_2RM_VZIP 35
4368#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4369#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4370#define NEON_2RM_VSHLL 38
4371#define NEON_2RM_VCVT_F16_F32 44
4372#define NEON_2RM_VCVT_F32_F16 46
4373#define NEON_2RM_VRECPE 56
4374#define NEON_2RM_VRSQRTE 57
4375#define NEON_2RM_VRECPE_F 58
4376#define NEON_2RM_VRSQRTE_F 59
4377#define NEON_2RM_VCVT_FS 60
4378#define NEON_2RM_VCVT_FU 61
4379#define NEON_2RM_VCVT_SF 62
4380#define NEON_2RM_VCVT_UF 63
4381
4382static int neon_2rm_is_float_op(int op)
4383{
4384 /* Return true if this neon 2reg-misc op is float-to-float */
4385 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4386 op >= NEON_2RM_VRECPE_F);
4387}
4388
4389/* Each entry in this array has bit n set if the insn allows
4390 * size value n (otherwise it will UNDEF). Since unallocated
4391 * op values will have no bits set they always UNDEF.
4392 */
4393static const uint8_t neon_2rm_sizes[] = {
4394 [NEON_2RM_VREV64] = 0x7,
4395 [NEON_2RM_VREV32] = 0x3,
4396 [NEON_2RM_VREV16] = 0x1,
4397 [NEON_2RM_VPADDL] = 0x7,
4398 [NEON_2RM_VPADDL_U] = 0x7,
4399 [NEON_2RM_VCLS] = 0x7,
4400 [NEON_2RM_VCLZ] = 0x7,
4401 [NEON_2RM_VCNT] = 0x1,
4402 [NEON_2RM_VMVN] = 0x1,
4403 [NEON_2RM_VPADAL] = 0x7,
4404 [NEON_2RM_VPADAL_U] = 0x7,
4405 [NEON_2RM_VQABS] = 0x7,
4406 [NEON_2RM_VQNEG] = 0x7,
4407 [NEON_2RM_VCGT0] = 0x7,
4408 [NEON_2RM_VCGE0] = 0x7,
4409 [NEON_2RM_VCEQ0] = 0x7,
4410 [NEON_2RM_VCLE0] = 0x7,
4411 [NEON_2RM_VCLT0] = 0x7,
4412 [NEON_2RM_VABS] = 0x7,
4413 [NEON_2RM_VNEG] = 0x7,
4414 [NEON_2RM_VCGT0_F] = 0x4,
4415 [NEON_2RM_VCGE0_F] = 0x4,
4416 [NEON_2RM_VCEQ0_F] = 0x4,
4417 [NEON_2RM_VCLE0_F] = 0x4,
4418 [NEON_2RM_VCLT0_F] = 0x4,
4419 [NEON_2RM_VABS_F] = 0x4,
4420 [NEON_2RM_VNEG_F] = 0x4,
4421 [NEON_2RM_VSWP] = 0x1,
4422 [NEON_2RM_VTRN] = 0x7,
4423 [NEON_2RM_VUZP] = 0x7,
4424 [NEON_2RM_VZIP] = 0x7,
4425 [NEON_2RM_VMOVN] = 0x7,
4426 [NEON_2RM_VQMOVN] = 0x7,
4427 [NEON_2RM_VSHLL] = 0x7,
4428 [NEON_2RM_VCVT_F16_F32] = 0x2,
4429 [NEON_2RM_VCVT_F32_F16] = 0x2,
4430 [NEON_2RM_VRECPE] = 0x4,
4431 [NEON_2RM_VRSQRTE] = 0x4,
4432 [NEON_2RM_VRECPE_F] = 0x4,
4433 [NEON_2RM_VRSQRTE_F] = 0x4,
4434 [NEON_2RM_VCVT_FS] = 0x4,
4435 [NEON_2RM_VCVT_FU] = 0x4,
4436 [NEON_2RM_VCVT_SF] = 0x4,
4437 [NEON_2RM_VCVT_UF] = 0x4,
4438};
4439
9ee6e8bb
PB
4440/* Translate a NEON data processing instruction. Return nonzero if the
4441 instruction is invalid.
ad69471c
PB
4442 We process data in a mixture of 32-bit and 64-bit chunks.
4443 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4444
0ecb72a5 4445static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4446{
4447 int op;
4448 int q;
4449 int rd, rn, rm;
4450 int size;
4451 int shift;
4452 int pass;
4453 int count;
4454 int pairwise;
4455 int u;
ca9a32e4 4456 uint32_t imm, mask;
39d5492a 4457 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4458 TCGv_i64 tmp64;
9ee6e8bb 4459
5df8bac1 4460 if (!s->vfp_enabled)
9ee6e8bb
PB
4461 return 1;
4462 q = (insn & (1 << 6)) != 0;
4463 u = (insn >> 24) & 1;
4464 VFP_DREG_D(rd, insn);
4465 VFP_DREG_N(rn, insn);
4466 VFP_DREG_M(rm, insn);
4467 size = (insn >> 20) & 3;
4468 if ((insn & (1 << 23)) == 0) {
4469 /* Three register same length. */
4470 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4471 /* Catch invalid op and bad size combinations: UNDEF */
4472 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4473 return 1;
4474 }
25f84f79
PM
4475 /* All insns of this form UNDEF for either this condition or the
4476 * superset of cases "Q==1"; we catch the latter later.
4477 */
4478 if (q && ((rd | rn | rm) & 1)) {
4479 return 1;
4480 }
62698be3
PM
4481 if (size == 3 && op != NEON_3R_LOGIC) {
4482 /* 64-bit element instructions. */
9ee6e8bb 4483 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4484 neon_load_reg64(cpu_V0, rn + pass);
4485 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4486 switch (op) {
62698be3 4487 case NEON_3R_VQADD:
9ee6e8bb 4488 if (u) {
02da0b2d
PM
4489 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4490 cpu_V0, cpu_V1);
2c0262af 4491 } else {
02da0b2d
PM
4492 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4493 cpu_V0, cpu_V1);
2c0262af 4494 }
9ee6e8bb 4495 break;
62698be3 4496 case NEON_3R_VQSUB:
9ee6e8bb 4497 if (u) {
02da0b2d
PM
4498 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4499 cpu_V0, cpu_V1);
ad69471c 4500 } else {
02da0b2d
PM
4501 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4502 cpu_V0, cpu_V1);
ad69471c
PB
4503 }
4504 break;
62698be3 4505 case NEON_3R_VSHL:
ad69471c
PB
4506 if (u) {
4507 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4508 } else {
4509 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4510 }
4511 break;
62698be3 4512 case NEON_3R_VQSHL:
ad69471c 4513 if (u) {
02da0b2d
PM
4514 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4515 cpu_V1, cpu_V0);
ad69471c 4516 } else {
02da0b2d
PM
4517 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4518 cpu_V1, cpu_V0);
ad69471c
PB
4519 }
4520 break;
62698be3 4521 case NEON_3R_VRSHL:
ad69471c
PB
4522 if (u) {
4523 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4524 } else {
ad69471c
PB
4525 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4526 }
4527 break;
62698be3 4528 case NEON_3R_VQRSHL:
ad69471c 4529 if (u) {
02da0b2d
PM
4530 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4531 cpu_V1, cpu_V0);
ad69471c 4532 } else {
02da0b2d
PM
4533 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4534 cpu_V1, cpu_V0);
1e8d4eec 4535 }
9ee6e8bb 4536 break;
62698be3 4537 case NEON_3R_VADD_VSUB:
9ee6e8bb 4538 if (u) {
ad69471c 4539 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4540 } else {
ad69471c 4541 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4542 }
4543 break;
4544 default:
4545 abort();
2c0262af 4546 }
ad69471c 4547 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4548 }
9ee6e8bb 4549 return 0;
2c0262af 4550 }
25f84f79 4551 pairwise = 0;
9ee6e8bb 4552 switch (op) {
62698be3
PM
4553 case NEON_3R_VSHL:
4554 case NEON_3R_VQSHL:
4555 case NEON_3R_VRSHL:
4556 case NEON_3R_VQRSHL:
9ee6e8bb 4557 {
ad69471c
PB
4558 int rtmp;
4559 /* Shift instruction operands are reversed. */
4560 rtmp = rn;
9ee6e8bb 4561 rn = rm;
ad69471c 4562 rm = rtmp;
9ee6e8bb 4563 }
2c0262af 4564 break;
25f84f79
PM
4565 case NEON_3R_VPADD:
4566 if (u) {
4567 return 1;
4568 }
4569 /* Fall through */
62698be3
PM
4570 case NEON_3R_VPMAX:
4571 case NEON_3R_VPMIN:
9ee6e8bb 4572 pairwise = 1;
2c0262af 4573 break;
25f84f79
PM
4574 case NEON_3R_FLOAT_ARITH:
4575 pairwise = (u && size < 2); /* if VPADD (float) */
4576 break;
4577 case NEON_3R_FLOAT_MINMAX:
4578 pairwise = u; /* if VPMIN/VPMAX (float) */
4579 break;
4580 case NEON_3R_FLOAT_CMP:
4581 if (!u && size) {
4582 /* no encoding for U=0 C=1x */
4583 return 1;
4584 }
4585 break;
4586 case NEON_3R_FLOAT_ACMP:
4587 if (!u) {
4588 return 1;
4589 }
4590 break;
4591 case NEON_3R_VRECPS_VRSQRTS:
4592 if (u) {
4593 return 1;
4594 }
2c0262af 4595 break;
25f84f79
PM
4596 case NEON_3R_VMUL:
4597 if (u && (size != 0)) {
4598 /* UNDEF on invalid size for polynomial subcase */
4599 return 1;
4600 }
2c0262af 4601 break;
da97f52c
PM
4602 case NEON_3R_VFM:
4603 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4604 return 1;
4605 }
4606 break;
9ee6e8bb 4607 default:
2c0262af 4608 break;
9ee6e8bb 4609 }
dd8fbd78 4610
25f84f79
PM
4611 if (pairwise && q) {
4612 /* All the pairwise insns UNDEF if Q is set */
4613 return 1;
4614 }
4615
9ee6e8bb
PB
4616 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4617
4618 if (pairwise) {
4619 /* Pairwise. */
a5a14945
JR
4620 if (pass < 1) {
4621 tmp = neon_load_reg(rn, 0);
4622 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4623 } else {
a5a14945
JR
4624 tmp = neon_load_reg(rm, 0);
4625 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4626 }
4627 } else {
4628 /* Elementwise. */
dd8fbd78
FN
4629 tmp = neon_load_reg(rn, pass);
4630 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4631 }
4632 switch (op) {
62698be3 4633 case NEON_3R_VHADD:
9ee6e8bb
PB
4634 GEN_NEON_INTEGER_OP(hadd);
4635 break;
62698be3 4636 case NEON_3R_VQADD:
02da0b2d 4637 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4638 break;
62698be3 4639 case NEON_3R_VRHADD:
9ee6e8bb 4640 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4641 break;
62698be3 4642 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4643 switch ((u << 2) | size) {
4644 case 0: /* VAND */
dd8fbd78 4645 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4646 break;
4647 case 1: /* BIC */
f669df27 4648 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4649 break;
4650 case 2: /* VORR */
dd8fbd78 4651 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4652 break;
4653 case 3: /* VORN */
f669df27 4654 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4655 break;
4656 case 4: /* VEOR */
dd8fbd78 4657 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4658 break;
4659 case 5: /* VBSL */
dd8fbd78
FN
4660 tmp3 = neon_load_reg(rd, pass);
4661 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4662 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4663 break;
4664 case 6: /* VBIT */
dd8fbd78
FN
4665 tmp3 = neon_load_reg(rd, pass);
4666 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4667 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4668 break;
4669 case 7: /* VBIF */
dd8fbd78
FN
4670 tmp3 = neon_load_reg(rd, pass);
4671 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4672 tcg_temp_free_i32(tmp3);
9ee6e8bb 4673 break;
2c0262af
FB
4674 }
4675 break;
62698be3 4676 case NEON_3R_VHSUB:
9ee6e8bb
PB
4677 GEN_NEON_INTEGER_OP(hsub);
4678 break;
62698be3 4679 case NEON_3R_VQSUB:
02da0b2d 4680 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4681 break;
62698be3 4682 case NEON_3R_VCGT:
9ee6e8bb
PB
4683 GEN_NEON_INTEGER_OP(cgt);
4684 break;
62698be3 4685 case NEON_3R_VCGE:
9ee6e8bb
PB
4686 GEN_NEON_INTEGER_OP(cge);
4687 break;
62698be3 4688 case NEON_3R_VSHL:
ad69471c 4689 GEN_NEON_INTEGER_OP(shl);
2c0262af 4690 break;
62698be3 4691 case NEON_3R_VQSHL:
02da0b2d 4692 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4693 break;
62698be3 4694 case NEON_3R_VRSHL:
ad69471c 4695 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4696 break;
62698be3 4697 case NEON_3R_VQRSHL:
02da0b2d 4698 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4699 break;
62698be3 4700 case NEON_3R_VMAX:
9ee6e8bb
PB
4701 GEN_NEON_INTEGER_OP(max);
4702 break;
62698be3 4703 case NEON_3R_VMIN:
9ee6e8bb
PB
4704 GEN_NEON_INTEGER_OP(min);
4705 break;
62698be3 4706 case NEON_3R_VABD:
9ee6e8bb
PB
4707 GEN_NEON_INTEGER_OP(abd);
4708 break;
62698be3 4709 case NEON_3R_VABA:
9ee6e8bb 4710 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4711 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4712 tmp2 = neon_load_reg(rd, pass);
4713 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4714 break;
62698be3 4715 case NEON_3R_VADD_VSUB:
9ee6e8bb 4716 if (!u) { /* VADD */
62698be3 4717 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4718 } else { /* VSUB */
4719 switch (size) {
dd8fbd78
FN
4720 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4721 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4722 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4723 default: abort();
9ee6e8bb
PB
4724 }
4725 }
4726 break;
62698be3 4727 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4728 if (!u) { /* VTST */
4729 switch (size) {
dd8fbd78
FN
4730 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4731 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4732 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4733 default: abort();
9ee6e8bb
PB
4734 }
4735 } else { /* VCEQ */
4736 switch (size) {
dd8fbd78
FN
4737 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4738 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4739 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4740 default: abort();
9ee6e8bb
PB
4741 }
4742 }
4743 break;
62698be3 4744 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4745 switch (size) {
dd8fbd78
FN
4746 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4747 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4748 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4749 default: abort();
9ee6e8bb 4750 }
7d1b0095 4751 tcg_temp_free_i32(tmp2);
dd8fbd78 4752 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4753 if (u) { /* VMLS */
dd8fbd78 4754 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4755 } else { /* VMLA */
dd8fbd78 4756 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4757 }
4758 break;
62698be3 4759 case NEON_3R_VMUL:
9ee6e8bb 4760 if (u) { /* polynomial */
dd8fbd78 4761 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4762 } else { /* Integer */
4763 switch (size) {
dd8fbd78
FN
4764 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4765 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4766 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4767 default: abort();
9ee6e8bb
PB
4768 }
4769 }
4770 break;
62698be3 4771 case NEON_3R_VPMAX:
9ee6e8bb
PB
4772 GEN_NEON_INTEGER_OP(pmax);
4773 break;
62698be3 4774 case NEON_3R_VPMIN:
9ee6e8bb
PB
4775 GEN_NEON_INTEGER_OP(pmin);
4776 break;
62698be3 4777 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4778 if (!u) { /* VQDMULH */
4779 switch (size) {
02da0b2d
PM
4780 case 1:
4781 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4782 break;
4783 case 2:
4784 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4785 break;
62698be3 4786 default: abort();
9ee6e8bb 4787 }
62698be3 4788 } else { /* VQRDMULH */
9ee6e8bb 4789 switch (size) {
02da0b2d
PM
4790 case 1:
4791 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4792 break;
4793 case 2:
4794 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4795 break;
62698be3 4796 default: abort();
9ee6e8bb
PB
4797 }
4798 }
4799 break;
62698be3 4800 case NEON_3R_VPADD:
9ee6e8bb 4801 switch (size) {
dd8fbd78
FN
4802 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4803 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4804 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4805 default: abort();
9ee6e8bb
PB
4806 }
4807 break;
62698be3 4808 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4809 {
4810 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4811 switch ((u << 2) | size) {
4812 case 0: /* VADD */
aa47cfdd
PM
4813 case 4: /* VPADD */
4814 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4815 break;
4816 case 2: /* VSUB */
aa47cfdd 4817 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4818 break;
4819 case 6: /* VABD */
aa47cfdd 4820 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4821 break;
4822 default:
62698be3 4823 abort();
9ee6e8bb 4824 }
aa47cfdd 4825 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4826 break;
aa47cfdd 4827 }
62698be3 4828 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4829 {
4830 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4831 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4832 if (!u) {
7d1b0095 4833 tcg_temp_free_i32(tmp2);
dd8fbd78 4834 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4835 if (size == 0) {
aa47cfdd 4836 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4837 } else {
aa47cfdd 4838 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4839 }
4840 }
aa47cfdd 4841 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4842 break;
aa47cfdd 4843 }
62698be3 4844 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4845 {
4846 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4847 if (!u) {
aa47cfdd 4848 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4849 } else {
aa47cfdd
PM
4850 if (size == 0) {
4851 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4852 } else {
4853 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4854 }
b5ff1b31 4855 }
aa47cfdd 4856 tcg_temp_free_ptr(fpstatus);
2c0262af 4857 break;
aa47cfdd 4858 }
62698be3 4859 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4860 {
4861 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4862 if (size == 0) {
4863 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4864 } else {
4865 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4866 }
4867 tcg_temp_free_ptr(fpstatus);
2c0262af 4868 break;
aa47cfdd 4869 }
62698be3 4870 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4871 {
4872 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4873 if (size == 0) {
4874 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4875 } else {
4876 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4877 }
4878 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4879 break;
aa47cfdd 4880 }
62698be3 4881 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4882 if (size == 0)
dd8fbd78 4883 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4884 else
dd8fbd78 4885 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4886 break;
da97f52c
PM
4887 case NEON_3R_VFM:
4888 {
4889 /* VFMA, VFMS: fused multiply-add */
4890 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4891 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4892 if (size) {
4893 /* VFMS */
4894 gen_helper_vfp_negs(tmp, tmp);
4895 }
4896 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4897 tcg_temp_free_i32(tmp3);
4898 tcg_temp_free_ptr(fpstatus);
4899 break;
4900 }
9ee6e8bb
PB
4901 default:
4902 abort();
2c0262af 4903 }
7d1b0095 4904 tcg_temp_free_i32(tmp2);
dd8fbd78 4905
9ee6e8bb
PB
4906 /* Save the result. For elementwise operations we can put it
4907 straight into the destination register. For pairwise operations
4908 we have to be careful to avoid clobbering the source operands. */
4909 if (pairwise && rd == rm) {
dd8fbd78 4910 neon_store_scratch(pass, tmp);
9ee6e8bb 4911 } else {
dd8fbd78 4912 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4913 }
4914
4915 } /* for pass */
4916 if (pairwise && rd == rm) {
4917 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4918 tmp = neon_load_scratch(pass);
4919 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4920 }
4921 }
ad69471c 4922 /* End of 3 register same size operations. */
9ee6e8bb
PB
4923 } else if (insn & (1 << 4)) {
4924 if ((insn & 0x00380080) != 0) {
4925 /* Two registers and shift. */
4926 op = (insn >> 8) & 0xf;
4927 if (insn & (1 << 7)) {
cc13115b
PM
4928 /* 64-bit shift. */
4929 if (op > 7) {
4930 return 1;
4931 }
9ee6e8bb
PB
4932 size = 3;
4933 } else {
4934 size = 2;
4935 while ((insn & (1 << (size + 19))) == 0)
4936 size--;
4937 }
4938 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4939 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4940 by immediate using the variable shift operations. */
4941 if (op < 8) {
4942 /* Shift by immediate:
4943 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4944 if (q && ((rd | rm) & 1)) {
4945 return 1;
4946 }
4947 if (!u && (op == 4 || op == 6)) {
4948 return 1;
4949 }
9ee6e8bb
PB
4950 /* Right shifts are encoded as N - shift, where N is the
4951 element size in bits. */
4952 if (op <= 4)
4953 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4954 if (size == 3) {
4955 count = q + 1;
4956 } else {
4957 count = q ? 4: 2;
4958 }
4959 switch (size) {
4960 case 0:
4961 imm = (uint8_t) shift;
4962 imm |= imm << 8;
4963 imm |= imm << 16;
4964 break;
4965 case 1:
4966 imm = (uint16_t) shift;
4967 imm |= imm << 16;
4968 break;
4969 case 2:
4970 case 3:
4971 imm = shift;
4972 break;
4973 default:
4974 abort();
4975 }
4976
4977 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4978 if (size == 3) {
4979 neon_load_reg64(cpu_V0, rm + pass);
4980 tcg_gen_movi_i64(cpu_V1, imm);
4981 switch (op) {
4982 case 0: /* VSHR */
4983 case 1: /* VSRA */
4984 if (u)
4985 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4986 else
ad69471c 4987 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4988 break;
ad69471c
PB
4989 case 2: /* VRSHR */
4990 case 3: /* VRSRA */
4991 if (u)
4992 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4993 else
ad69471c 4994 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4995 break;
ad69471c 4996 case 4: /* VSRI */
ad69471c
PB
4997 case 5: /* VSHL, VSLI */
4998 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4999 break;
0322b26e 5000 case 6: /* VQSHLU */
02da0b2d
PM
5001 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5002 cpu_V0, cpu_V1);
ad69471c 5003 break;
0322b26e
PM
5004 case 7: /* VQSHL */
5005 if (u) {
02da0b2d 5006 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5007 cpu_V0, cpu_V1);
5008 } else {
02da0b2d 5009 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5010 cpu_V0, cpu_V1);
5011 }
9ee6e8bb 5012 break;
9ee6e8bb 5013 }
ad69471c
PB
5014 if (op == 1 || op == 3) {
5015 /* Accumulate. */
5371cb81 5016 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5017 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5018 } else if (op == 4 || (op == 5 && u)) {
5019 /* Insert */
923e6509
CL
5020 neon_load_reg64(cpu_V1, rd + pass);
5021 uint64_t mask;
5022 if (shift < -63 || shift > 63) {
5023 mask = 0;
5024 } else {
5025 if (op == 4) {
5026 mask = 0xffffffffffffffffull >> -shift;
5027 } else {
5028 mask = 0xffffffffffffffffull << shift;
5029 }
5030 }
5031 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5032 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5033 }
5034 neon_store_reg64(cpu_V0, rd + pass);
5035 } else { /* size < 3 */
5036 /* Operands in T0 and T1. */
dd8fbd78 5037 tmp = neon_load_reg(rm, pass);
7d1b0095 5038 tmp2 = tcg_temp_new_i32();
dd8fbd78 5039 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5040 switch (op) {
5041 case 0: /* VSHR */
5042 case 1: /* VSRA */
5043 GEN_NEON_INTEGER_OP(shl);
5044 break;
5045 case 2: /* VRSHR */
5046 case 3: /* VRSRA */
5047 GEN_NEON_INTEGER_OP(rshl);
5048 break;
5049 case 4: /* VSRI */
ad69471c
PB
5050 case 5: /* VSHL, VSLI */
5051 switch (size) {
dd8fbd78
FN
5052 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5053 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5054 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5055 default: abort();
ad69471c
PB
5056 }
5057 break;
0322b26e 5058 case 6: /* VQSHLU */
ad69471c 5059 switch (size) {
0322b26e 5060 case 0:
02da0b2d
PM
5061 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5062 tmp, tmp2);
0322b26e
PM
5063 break;
5064 case 1:
02da0b2d
PM
5065 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5066 tmp, tmp2);
0322b26e
PM
5067 break;
5068 case 2:
02da0b2d
PM
5069 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5070 tmp, tmp2);
0322b26e
PM
5071 break;
5072 default:
cc13115b 5073 abort();
ad69471c
PB
5074 }
5075 break;
0322b26e 5076 case 7: /* VQSHL */
02da0b2d 5077 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5078 break;
ad69471c 5079 }
7d1b0095 5080 tcg_temp_free_i32(tmp2);
ad69471c
PB
5081
5082 if (op == 1 || op == 3) {
5083 /* Accumulate. */
dd8fbd78 5084 tmp2 = neon_load_reg(rd, pass);
5371cb81 5085 gen_neon_add(size, tmp, tmp2);
7d1b0095 5086 tcg_temp_free_i32(tmp2);
ad69471c
PB
5087 } else if (op == 4 || (op == 5 && u)) {
5088 /* Insert */
5089 switch (size) {
5090 case 0:
5091 if (op == 4)
ca9a32e4 5092 mask = 0xff >> -shift;
ad69471c 5093 else
ca9a32e4
JR
5094 mask = (uint8_t)(0xff << shift);
5095 mask |= mask << 8;
5096 mask |= mask << 16;
ad69471c
PB
5097 break;
5098 case 1:
5099 if (op == 4)
ca9a32e4 5100 mask = 0xffff >> -shift;
ad69471c 5101 else
ca9a32e4
JR
5102 mask = (uint16_t)(0xffff << shift);
5103 mask |= mask << 16;
ad69471c
PB
5104 break;
5105 case 2:
ca9a32e4
JR
5106 if (shift < -31 || shift > 31) {
5107 mask = 0;
5108 } else {
5109 if (op == 4)
5110 mask = 0xffffffffu >> -shift;
5111 else
5112 mask = 0xffffffffu << shift;
5113 }
ad69471c
PB
5114 break;
5115 default:
5116 abort();
5117 }
dd8fbd78 5118 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5119 tcg_gen_andi_i32(tmp, tmp, mask);
5120 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5121 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5122 tcg_temp_free_i32(tmp2);
ad69471c 5123 }
dd8fbd78 5124 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5125 }
5126 } /* for pass */
5127 } else if (op < 10) {
ad69471c 5128 /* Shift by immediate and narrow:
9ee6e8bb 5129 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5130 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5131 if (rm & 1) {
5132 return 1;
5133 }
9ee6e8bb
PB
5134 shift = shift - (1 << (size + 3));
5135 size++;
92cdfaeb 5136 if (size == 3) {
a7812ae4 5137 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5138 neon_load_reg64(cpu_V0, rm);
5139 neon_load_reg64(cpu_V1, rm + 1);
5140 for (pass = 0; pass < 2; pass++) {
5141 TCGv_i64 in;
5142 if (pass == 0) {
5143 in = cpu_V0;
5144 } else {
5145 in = cpu_V1;
5146 }
ad69471c 5147 if (q) {
0b36f4cd 5148 if (input_unsigned) {
92cdfaeb 5149 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5150 } else {
92cdfaeb 5151 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5152 }
ad69471c 5153 } else {
0b36f4cd 5154 if (input_unsigned) {
92cdfaeb 5155 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5156 } else {
92cdfaeb 5157 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5158 }
ad69471c 5159 }
7d1b0095 5160 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5161 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5162 neon_store_reg(rd, pass, tmp);
5163 } /* for pass */
5164 tcg_temp_free_i64(tmp64);
5165 } else {
5166 if (size == 1) {
5167 imm = (uint16_t)shift;
5168 imm |= imm << 16;
2c0262af 5169 } else {
92cdfaeb
PM
5170 /* size == 2 */
5171 imm = (uint32_t)shift;
5172 }
5173 tmp2 = tcg_const_i32(imm);
5174 tmp4 = neon_load_reg(rm + 1, 0);
5175 tmp5 = neon_load_reg(rm + 1, 1);
5176 for (pass = 0; pass < 2; pass++) {
5177 if (pass == 0) {
5178 tmp = neon_load_reg(rm, 0);
5179 } else {
5180 tmp = tmp4;
5181 }
0b36f4cd
CL
5182 gen_neon_shift_narrow(size, tmp, tmp2, q,
5183 input_unsigned);
92cdfaeb
PM
5184 if (pass == 0) {
5185 tmp3 = neon_load_reg(rm, 1);
5186 } else {
5187 tmp3 = tmp5;
5188 }
0b36f4cd
CL
5189 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5190 input_unsigned);
36aa55dc 5191 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5192 tcg_temp_free_i32(tmp);
5193 tcg_temp_free_i32(tmp3);
5194 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5195 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5196 neon_store_reg(rd, pass, tmp);
5197 } /* for pass */
c6067f04 5198 tcg_temp_free_i32(tmp2);
b75263d6 5199 }
9ee6e8bb 5200 } else if (op == 10) {
cc13115b
PM
5201 /* VSHLL, VMOVL */
5202 if (q || (rd & 1)) {
9ee6e8bb 5203 return 1;
cc13115b 5204 }
ad69471c
PB
5205 tmp = neon_load_reg(rm, 0);
5206 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5207 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5208 if (pass == 1)
5209 tmp = tmp2;
5210
5211 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5212
9ee6e8bb
PB
5213 if (shift != 0) {
5214 /* The shift is less than the width of the source
ad69471c
PB
5215 type, so we can just shift the whole register. */
5216 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5217 /* Widen the result of shift: we need to clear
5218 * the potential overflow bits resulting from
5219 * left bits of the narrow input appearing as
5220 * right bits of left the neighbour narrow
5221 * input. */
ad69471c
PB
5222 if (size < 2 || !u) {
5223 uint64_t imm64;
5224 if (size == 0) {
5225 imm = (0xffu >> (8 - shift));
5226 imm |= imm << 16;
acdf01ef 5227 } else if (size == 1) {
ad69471c 5228 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5229 } else {
5230 /* size == 2 */
5231 imm = 0xffffffff >> (32 - shift);
5232 }
5233 if (size < 2) {
5234 imm64 = imm | (((uint64_t)imm) << 32);
5235 } else {
5236 imm64 = imm;
9ee6e8bb 5237 }
acdf01ef 5238 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5239 }
5240 }
ad69471c 5241 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5242 }
f73534a5 5243 } else if (op >= 14) {
9ee6e8bb 5244 /* VCVT fixed-point. */
cc13115b
PM
5245 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5246 return 1;
5247 }
f73534a5
PM
5248 /* We have already masked out the must-be-1 top bit of imm6,
5249 * hence this 32-shift where the ARM ARM has 64-imm6.
5250 */
5251 shift = 32 - shift;
9ee6e8bb 5252 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5253 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5254 if (!(op & 1)) {
9ee6e8bb 5255 if (u)
5500b06c 5256 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5257 else
5500b06c 5258 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5259 } else {
5260 if (u)
5500b06c 5261 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5262 else
5500b06c 5263 gen_vfp_tosl(0, shift, 1);
2c0262af 5264 }
4373f3ce 5265 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5266 }
5267 } else {
9ee6e8bb
PB
5268 return 1;
5269 }
5270 } else { /* (insn & 0x00380080) == 0 */
5271 int invert;
7d80fee5
PM
5272 if (q && (rd & 1)) {
5273 return 1;
5274 }
9ee6e8bb
PB
5275
5276 op = (insn >> 8) & 0xf;
5277 /* One register and immediate. */
5278 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5279 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5280 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5281 * We choose to not special-case this and will behave as if a
5282 * valid constant encoding of 0 had been given.
5283 */
9ee6e8bb
PB
5284 switch (op) {
5285 case 0: case 1:
5286 /* no-op */
5287 break;
5288 case 2: case 3:
5289 imm <<= 8;
5290 break;
5291 case 4: case 5:
5292 imm <<= 16;
5293 break;
5294 case 6: case 7:
5295 imm <<= 24;
5296 break;
5297 case 8: case 9:
5298 imm |= imm << 16;
5299 break;
5300 case 10: case 11:
5301 imm = (imm << 8) | (imm << 24);
5302 break;
5303 case 12:
8e31209e 5304 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5305 break;
5306 case 13:
5307 imm = (imm << 16) | 0xffff;
5308 break;
5309 case 14:
5310 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5311 if (invert)
5312 imm = ~imm;
5313 break;
5314 case 15:
7d80fee5
PM
5315 if (invert) {
5316 return 1;
5317 }
9ee6e8bb
PB
5318 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5319 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5320 break;
5321 }
5322 if (invert)
5323 imm = ~imm;
5324
9ee6e8bb
PB
5325 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5326 if (op & 1 && op < 12) {
ad69471c 5327 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5328 if (invert) {
5329 /* The immediate value has already been inverted, so
5330 BIC becomes AND. */
ad69471c 5331 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5332 } else {
ad69471c 5333 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5334 }
9ee6e8bb 5335 } else {
ad69471c 5336 /* VMOV, VMVN. */
7d1b0095 5337 tmp = tcg_temp_new_i32();
9ee6e8bb 5338 if (op == 14 && invert) {
a5a14945 5339 int n;
ad69471c
PB
5340 uint32_t val;
5341 val = 0;
9ee6e8bb
PB
5342 for (n = 0; n < 4; n++) {
5343 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5344 val |= 0xff << (n * 8);
9ee6e8bb 5345 }
ad69471c
PB
5346 tcg_gen_movi_i32(tmp, val);
5347 } else {
5348 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5349 }
9ee6e8bb 5350 }
ad69471c 5351 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5352 }
5353 }
e4b3861d 5354 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5355 if (size != 3) {
5356 op = (insn >> 8) & 0xf;
5357 if ((insn & (1 << 6)) == 0) {
5358 /* Three registers of different lengths. */
5359 int src1_wide;
5360 int src2_wide;
5361 int prewiden;
695272dc
PM
5362 /* undefreq: bit 0 : UNDEF if size != 0
5363 * bit 1 : UNDEF if size == 0
5364 * bit 2 : UNDEF if U == 1
5365 * Note that [1:0] set implies 'always UNDEF'
5366 */
5367 int undefreq;
5368 /* prewiden, src1_wide, src2_wide, undefreq */
5369 static const int neon_3reg_wide[16][4] = {
5370 {1, 0, 0, 0}, /* VADDL */
5371 {1, 1, 0, 0}, /* VADDW */
5372 {1, 0, 0, 0}, /* VSUBL */
5373 {1, 1, 0, 0}, /* VSUBW */
5374 {0, 1, 1, 0}, /* VADDHN */
5375 {0, 0, 0, 0}, /* VABAL */
5376 {0, 1, 1, 0}, /* VSUBHN */
5377 {0, 0, 0, 0}, /* VABDL */
5378 {0, 0, 0, 0}, /* VMLAL */
5379 {0, 0, 0, 6}, /* VQDMLAL */
5380 {0, 0, 0, 0}, /* VMLSL */
5381 {0, 0, 0, 6}, /* VQDMLSL */
5382 {0, 0, 0, 0}, /* Integer VMULL */
5383 {0, 0, 0, 2}, /* VQDMULL */
5384 {0, 0, 0, 5}, /* Polynomial VMULL */
5385 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5386 };
5387
5388 prewiden = neon_3reg_wide[op][0];
5389 src1_wide = neon_3reg_wide[op][1];
5390 src2_wide = neon_3reg_wide[op][2];
695272dc 5391 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5392
695272dc
PM
5393 if (((undefreq & 1) && (size != 0)) ||
5394 ((undefreq & 2) && (size == 0)) ||
5395 ((undefreq & 4) && u)) {
5396 return 1;
5397 }
5398 if ((src1_wide && (rn & 1)) ||
5399 (src2_wide && (rm & 1)) ||
5400 (!src2_wide && (rd & 1))) {
ad69471c 5401 return 1;
695272dc 5402 }
ad69471c 5403
9ee6e8bb
PB
5404 /* Avoid overlapping operands. Wide source operands are
5405 always aligned so will never overlap with wide
5406 destinations in problematic ways. */
8f8e3aa4 5407 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5408 tmp = neon_load_reg(rm, 1);
5409 neon_store_scratch(2, tmp);
8f8e3aa4 5410 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5411 tmp = neon_load_reg(rn, 1);
5412 neon_store_scratch(2, tmp);
9ee6e8bb 5413 }
39d5492a 5414 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5415 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5416 if (src1_wide) {
5417 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5418 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5419 } else {
ad69471c 5420 if (pass == 1 && rd == rn) {
dd8fbd78 5421 tmp = neon_load_scratch(2);
9ee6e8bb 5422 } else {
ad69471c
PB
5423 tmp = neon_load_reg(rn, pass);
5424 }
5425 if (prewiden) {
5426 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5427 }
5428 }
ad69471c
PB
5429 if (src2_wide) {
5430 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5431 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5432 } else {
ad69471c 5433 if (pass == 1 && rd == rm) {
dd8fbd78 5434 tmp2 = neon_load_scratch(2);
9ee6e8bb 5435 } else {
ad69471c
PB
5436 tmp2 = neon_load_reg(rm, pass);
5437 }
5438 if (prewiden) {
5439 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5440 }
9ee6e8bb
PB
5441 }
5442 switch (op) {
5443 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5444 gen_neon_addl(size);
9ee6e8bb 5445 break;
79b0e534 5446 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5447 gen_neon_subl(size);
9ee6e8bb
PB
5448 break;
5449 case 5: case 7: /* VABAL, VABDL */
5450 switch ((size << 1) | u) {
ad69471c
PB
5451 case 0:
5452 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5453 break;
5454 case 1:
5455 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5456 break;
5457 case 2:
5458 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5459 break;
5460 case 3:
5461 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5462 break;
5463 case 4:
5464 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5465 break;
5466 case 5:
5467 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5468 break;
9ee6e8bb
PB
5469 default: abort();
5470 }
7d1b0095
PM
5471 tcg_temp_free_i32(tmp2);
5472 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5473 break;
5474 case 8: case 9: case 10: case 11: case 12: case 13:
5475 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5476 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5477 break;
5478 case 14: /* Polynomial VMULL */
e5ca24cb 5479 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5480 tcg_temp_free_i32(tmp2);
5481 tcg_temp_free_i32(tmp);
e5ca24cb 5482 break;
695272dc
PM
5483 default: /* 15 is RESERVED: caught earlier */
5484 abort();
9ee6e8bb 5485 }
ebcd88ce
PM
5486 if (op == 13) {
5487 /* VQDMULL */
5488 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5489 neon_store_reg64(cpu_V0, rd + pass);
5490 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5491 /* Accumulate. */
ebcd88ce 5492 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5493 switch (op) {
4dc064e6
PM
5494 case 10: /* VMLSL */
5495 gen_neon_negl(cpu_V0, size);
5496 /* Fall through */
5497 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5498 gen_neon_addl(size);
9ee6e8bb
PB
5499 break;
5500 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5501 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5502 if (op == 11) {
5503 gen_neon_negl(cpu_V0, size);
5504 }
ad69471c
PB
5505 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5506 break;
9ee6e8bb
PB
5507 default:
5508 abort();
5509 }
ad69471c 5510 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5511 } else if (op == 4 || op == 6) {
5512 /* Narrowing operation. */
7d1b0095 5513 tmp = tcg_temp_new_i32();
79b0e534 5514 if (!u) {
9ee6e8bb 5515 switch (size) {
ad69471c
PB
5516 case 0:
5517 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5518 break;
5519 case 1:
5520 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5521 break;
5522 case 2:
5523 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5524 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5525 break;
9ee6e8bb
PB
5526 default: abort();
5527 }
5528 } else {
5529 switch (size) {
ad69471c
PB
5530 case 0:
5531 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5532 break;
5533 case 1:
5534 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5535 break;
5536 case 2:
5537 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5538 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5539 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5540 break;
9ee6e8bb
PB
5541 default: abort();
5542 }
5543 }
ad69471c
PB
5544 if (pass == 0) {
5545 tmp3 = tmp;
5546 } else {
5547 neon_store_reg(rd, 0, tmp3);
5548 neon_store_reg(rd, 1, tmp);
5549 }
9ee6e8bb
PB
5550 } else {
5551 /* Write back the result. */
ad69471c 5552 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5553 }
5554 }
5555 } else {
3e3326df
PM
5556 /* Two registers and a scalar. NB that for ops of this form
5557 * the ARM ARM labels bit 24 as Q, but it is in our variable
5558 * 'u', not 'q'.
5559 */
5560 if (size == 0) {
5561 return 1;
5562 }
9ee6e8bb 5563 switch (op) {
9ee6e8bb 5564 case 1: /* Float VMLA scalar */
9ee6e8bb 5565 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5566 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5567 if (size == 1) {
5568 return 1;
5569 }
5570 /* fall through */
5571 case 0: /* Integer VMLA scalar */
5572 case 4: /* Integer VMLS scalar */
5573 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5574 case 12: /* VQDMULH scalar */
5575 case 13: /* VQRDMULH scalar */
3e3326df
PM
5576 if (u && ((rd | rn) & 1)) {
5577 return 1;
5578 }
dd8fbd78
FN
5579 tmp = neon_get_scalar(size, rm);
5580 neon_store_scratch(0, tmp);
9ee6e8bb 5581 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5582 tmp = neon_load_scratch(0);
5583 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5584 if (op == 12) {
5585 if (size == 1) {
02da0b2d 5586 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5587 } else {
02da0b2d 5588 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5589 }
5590 } else if (op == 13) {
5591 if (size == 1) {
02da0b2d 5592 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5593 } else {
02da0b2d 5594 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5595 }
5596 } else if (op & 1) {
aa47cfdd
PM
5597 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5598 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5599 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5600 } else {
5601 switch (size) {
dd8fbd78
FN
5602 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5603 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5604 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5605 default: abort();
9ee6e8bb
PB
5606 }
5607 }
7d1b0095 5608 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5609 if (op < 8) {
5610 /* Accumulate. */
dd8fbd78 5611 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5612 switch (op) {
5613 case 0:
dd8fbd78 5614 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5615 break;
5616 case 1:
aa47cfdd
PM
5617 {
5618 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5619 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5620 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5621 break;
aa47cfdd 5622 }
9ee6e8bb 5623 case 4:
dd8fbd78 5624 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5625 break;
5626 case 5:
aa47cfdd
PM
5627 {
5628 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5629 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5630 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5631 break;
aa47cfdd 5632 }
9ee6e8bb
PB
5633 default:
5634 abort();
5635 }
7d1b0095 5636 tcg_temp_free_i32(tmp2);
9ee6e8bb 5637 }
dd8fbd78 5638 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5639 }
5640 break;
9ee6e8bb 5641 case 3: /* VQDMLAL scalar */
9ee6e8bb 5642 case 7: /* VQDMLSL scalar */
9ee6e8bb 5643 case 11: /* VQDMULL scalar */
3e3326df 5644 if (u == 1) {
ad69471c 5645 return 1;
3e3326df
PM
5646 }
5647 /* fall through */
5648 case 2: /* VMLAL sclar */
5649 case 6: /* VMLSL scalar */
5650 case 10: /* VMULL scalar */
5651 if (rd & 1) {
5652 return 1;
5653 }
dd8fbd78 5654 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5655 /* We need a copy of tmp2 because gen_neon_mull
5656 * deletes it during pass 0. */
7d1b0095 5657 tmp4 = tcg_temp_new_i32();
c6067f04 5658 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5659 tmp3 = neon_load_reg(rn, 1);
ad69471c 5660
9ee6e8bb 5661 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5662 if (pass == 0) {
5663 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5664 } else {
dd8fbd78 5665 tmp = tmp3;
c6067f04 5666 tmp2 = tmp4;
9ee6e8bb 5667 }
ad69471c 5668 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5669 if (op != 11) {
5670 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5671 }
9ee6e8bb 5672 switch (op) {
4dc064e6
PM
5673 case 6:
5674 gen_neon_negl(cpu_V0, size);
5675 /* Fall through */
5676 case 2:
ad69471c 5677 gen_neon_addl(size);
9ee6e8bb
PB
5678 break;
5679 case 3: case 7:
ad69471c 5680 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5681 if (op == 7) {
5682 gen_neon_negl(cpu_V0, size);
5683 }
ad69471c 5684 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5685 break;
5686 case 10:
5687 /* no-op */
5688 break;
5689 case 11:
ad69471c 5690 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5691 break;
5692 default:
5693 abort();
5694 }
ad69471c 5695 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5696 }
dd8fbd78 5697
dd8fbd78 5698
9ee6e8bb
PB
5699 break;
5700 default: /* 14 and 15 are RESERVED */
5701 return 1;
5702 }
5703 }
5704 } else { /* size == 3 */
5705 if (!u) {
5706 /* Extract. */
9ee6e8bb 5707 imm = (insn >> 8) & 0xf;
ad69471c
PB
5708
5709 if (imm > 7 && !q)
5710 return 1;
5711
52579ea1
PM
5712 if (q && ((rd | rn | rm) & 1)) {
5713 return 1;
5714 }
5715
ad69471c
PB
5716 if (imm == 0) {
5717 neon_load_reg64(cpu_V0, rn);
5718 if (q) {
5719 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5720 }
ad69471c
PB
5721 } else if (imm == 8) {
5722 neon_load_reg64(cpu_V0, rn + 1);
5723 if (q) {
5724 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5725 }
ad69471c 5726 } else if (q) {
a7812ae4 5727 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5728 if (imm < 8) {
5729 neon_load_reg64(cpu_V0, rn);
a7812ae4 5730 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5731 } else {
5732 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5733 neon_load_reg64(tmp64, rm);
ad69471c
PB
5734 }
5735 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5736 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5737 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5738 if (imm < 8) {
5739 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5740 } else {
ad69471c
PB
5741 neon_load_reg64(cpu_V1, rm + 1);
5742 imm -= 8;
9ee6e8bb 5743 }
ad69471c 5744 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5745 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5746 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5747 tcg_temp_free_i64(tmp64);
ad69471c 5748 } else {
a7812ae4 5749 /* BUGFIX */
ad69471c 5750 neon_load_reg64(cpu_V0, rn);
a7812ae4 5751 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5752 neon_load_reg64(cpu_V1, rm);
a7812ae4 5753 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5754 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5755 }
5756 neon_store_reg64(cpu_V0, rd);
5757 if (q) {
5758 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5759 }
5760 } else if ((insn & (1 << 11)) == 0) {
5761 /* Two register misc. */
5762 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5763 size = (insn >> 18) & 3;
600b828c
PM
5764 /* UNDEF for unknown op values and bad op-size combinations */
5765 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5766 return 1;
5767 }
fc2a9b37
PM
5768 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5769 q && ((rm | rd) & 1)) {
5770 return 1;
5771 }
9ee6e8bb 5772 switch (op) {
600b828c 5773 case NEON_2RM_VREV64:
9ee6e8bb 5774 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5775 tmp = neon_load_reg(rm, pass * 2);
5776 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5777 switch (size) {
dd8fbd78
FN
5778 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5779 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5780 case 2: /* no-op */ break;
5781 default: abort();
5782 }
dd8fbd78 5783 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5784 if (size == 2) {
dd8fbd78 5785 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5786 } else {
9ee6e8bb 5787 switch (size) {
dd8fbd78
FN
5788 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5789 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5790 default: abort();
5791 }
dd8fbd78 5792 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5793 }
5794 }
5795 break;
600b828c
PM
5796 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5797 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5798 for (pass = 0; pass < q + 1; pass++) {
5799 tmp = neon_load_reg(rm, pass * 2);
5800 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5801 tmp = neon_load_reg(rm, pass * 2 + 1);
5802 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5803 switch (size) {
5804 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5805 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5806 case 2: tcg_gen_add_i64(CPU_V001); break;
5807 default: abort();
5808 }
600b828c 5809 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5810 /* Accumulate. */
ad69471c
PB
5811 neon_load_reg64(cpu_V1, rd + pass);
5812 gen_neon_addl(size);
9ee6e8bb 5813 }
ad69471c 5814 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5815 }
5816 break;
600b828c 5817 case NEON_2RM_VTRN:
9ee6e8bb 5818 if (size == 2) {
a5a14945 5819 int n;
9ee6e8bb 5820 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5821 tmp = neon_load_reg(rm, n);
5822 tmp2 = neon_load_reg(rd, n + 1);
5823 neon_store_reg(rm, n, tmp2);
5824 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5825 }
5826 } else {
5827 goto elementwise;
5828 }
5829 break;
600b828c 5830 case NEON_2RM_VUZP:
02acedf9 5831 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5832 return 1;
9ee6e8bb
PB
5833 }
5834 break;
600b828c 5835 case NEON_2RM_VZIP:
d68a6f3a 5836 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5837 return 1;
9ee6e8bb
PB
5838 }
5839 break;
600b828c
PM
5840 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5841 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5842 if (rm & 1) {
5843 return 1;
5844 }
39d5492a 5845 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5846 for (pass = 0; pass < 2; pass++) {
ad69471c 5847 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5848 tmp = tcg_temp_new_i32();
600b828c
PM
5849 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5850 tmp, cpu_V0);
ad69471c
PB
5851 if (pass == 0) {
5852 tmp2 = tmp;
5853 } else {
5854 neon_store_reg(rd, 0, tmp2);
5855 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5856 }
9ee6e8bb
PB
5857 }
5858 break;
600b828c 5859 case NEON_2RM_VSHLL:
fc2a9b37 5860 if (q || (rd & 1)) {
9ee6e8bb 5861 return 1;
600b828c 5862 }
ad69471c
PB
5863 tmp = neon_load_reg(rm, 0);
5864 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5865 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5866 if (pass == 1)
5867 tmp = tmp2;
5868 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5869 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5870 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5871 }
5872 break;
600b828c 5873 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5874 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5875 q || (rm & 1)) {
5876 return 1;
5877 }
7d1b0095
PM
5878 tmp = tcg_temp_new_i32();
5879 tmp2 = tcg_temp_new_i32();
60011498 5880 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5881 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5882 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5883 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5884 tcg_gen_shli_i32(tmp2, tmp2, 16);
5885 tcg_gen_or_i32(tmp2, tmp2, tmp);
5886 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5887 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5888 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5889 neon_store_reg(rd, 0, tmp2);
7d1b0095 5890 tmp2 = tcg_temp_new_i32();
2d981da7 5891 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5892 tcg_gen_shli_i32(tmp2, tmp2, 16);
5893 tcg_gen_or_i32(tmp2, tmp2, tmp);
5894 neon_store_reg(rd, 1, tmp2);
7d1b0095 5895 tcg_temp_free_i32(tmp);
60011498 5896 break;
600b828c 5897 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5898 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5899 q || (rd & 1)) {
5900 return 1;
5901 }
7d1b0095 5902 tmp3 = tcg_temp_new_i32();
60011498
PB
5903 tmp = neon_load_reg(rm, 0);
5904 tmp2 = neon_load_reg(rm, 1);
5905 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5906 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5907 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5908 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5909 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5910 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5911 tcg_temp_free_i32(tmp);
60011498 5912 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5913 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5914 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5915 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5916 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5917 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5918 tcg_temp_free_i32(tmp2);
5919 tcg_temp_free_i32(tmp3);
60011498 5920 break;
9ee6e8bb
PB
5921 default:
5922 elementwise:
5923 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5924 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5925 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5926 neon_reg_offset(rm, pass));
39d5492a 5927 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5928 } else {
dd8fbd78 5929 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5930 }
5931 switch (op) {
600b828c 5932 case NEON_2RM_VREV32:
9ee6e8bb 5933 switch (size) {
dd8fbd78
FN
5934 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5935 case 1: gen_swap_half(tmp); break;
600b828c 5936 default: abort();
9ee6e8bb
PB
5937 }
5938 break;
600b828c 5939 case NEON_2RM_VREV16:
dd8fbd78 5940 gen_rev16(tmp);
9ee6e8bb 5941 break;
600b828c 5942 case NEON_2RM_VCLS:
9ee6e8bb 5943 switch (size) {
dd8fbd78
FN
5944 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5945 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5946 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5947 default: abort();
9ee6e8bb
PB
5948 }
5949 break;
600b828c 5950 case NEON_2RM_VCLZ:
9ee6e8bb 5951 switch (size) {
dd8fbd78
FN
5952 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5953 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5954 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5955 default: abort();
9ee6e8bb
PB
5956 }
5957 break;
600b828c 5958 case NEON_2RM_VCNT:
dd8fbd78 5959 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 5960 break;
600b828c 5961 case NEON_2RM_VMVN:
dd8fbd78 5962 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 5963 break;
600b828c 5964 case NEON_2RM_VQABS:
9ee6e8bb 5965 switch (size) {
02da0b2d
PM
5966 case 0:
5967 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
5968 break;
5969 case 1:
5970 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
5971 break;
5972 case 2:
5973 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
5974 break;
600b828c 5975 default: abort();
9ee6e8bb
PB
5976 }
5977 break;
600b828c 5978 case NEON_2RM_VQNEG:
9ee6e8bb 5979 switch (size) {
02da0b2d
PM
5980 case 0:
5981 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
5982 break;
5983 case 1:
5984 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
5985 break;
5986 case 2:
5987 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
5988 break;
600b828c 5989 default: abort();
9ee6e8bb
PB
5990 }
5991 break;
600b828c 5992 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 5993 tmp2 = tcg_const_i32(0);
9ee6e8bb 5994 switch(size) {
dd8fbd78
FN
5995 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5996 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5997 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 5998 default: abort();
9ee6e8bb 5999 }
39d5492a 6000 tcg_temp_free_i32(tmp2);
600b828c 6001 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6002 tcg_gen_not_i32(tmp, tmp);
600b828c 6003 }
9ee6e8bb 6004 break;
600b828c 6005 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6006 tmp2 = tcg_const_i32(0);
9ee6e8bb 6007 switch(size) {
dd8fbd78
FN
6008 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6009 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6010 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6011 default: abort();
9ee6e8bb 6012 }
39d5492a 6013 tcg_temp_free_i32(tmp2);
600b828c 6014 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6015 tcg_gen_not_i32(tmp, tmp);
600b828c 6016 }
9ee6e8bb 6017 break;
600b828c 6018 case NEON_2RM_VCEQ0:
dd8fbd78 6019 tmp2 = tcg_const_i32(0);
9ee6e8bb 6020 switch(size) {
dd8fbd78
FN
6021 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6022 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6023 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6024 default: abort();
9ee6e8bb 6025 }
39d5492a 6026 tcg_temp_free_i32(tmp2);
9ee6e8bb 6027 break;
600b828c 6028 case NEON_2RM_VABS:
9ee6e8bb 6029 switch(size) {
dd8fbd78
FN
6030 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6031 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6032 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6033 default: abort();
9ee6e8bb
PB
6034 }
6035 break;
600b828c 6036 case NEON_2RM_VNEG:
dd8fbd78
FN
6037 tmp2 = tcg_const_i32(0);
6038 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6039 tcg_temp_free_i32(tmp2);
9ee6e8bb 6040 break;
600b828c 6041 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6042 {
6043 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6044 tmp2 = tcg_const_i32(0);
aa47cfdd 6045 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6046 tcg_temp_free_i32(tmp2);
aa47cfdd 6047 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6048 break;
aa47cfdd 6049 }
600b828c 6050 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6051 {
6052 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6053 tmp2 = tcg_const_i32(0);
aa47cfdd 6054 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6055 tcg_temp_free_i32(tmp2);
aa47cfdd 6056 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6057 break;
aa47cfdd 6058 }
600b828c 6059 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6060 {
6061 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6062 tmp2 = tcg_const_i32(0);
aa47cfdd 6063 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6064 tcg_temp_free_i32(tmp2);
aa47cfdd 6065 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6066 break;
aa47cfdd 6067 }
600b828c 6068 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6069 {
6070 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6071 tmp2 = tcg_const_i32(0);
aa47cfdd 6072 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6073 tcg_temp_free_i32(tmp2);
aa47cfdd 6074 tcg_temp_free_ptr(fpstatus);
0e326109 6075 break;
aa47cfdd 6076 }
600b828c 6077 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6078 {
6079 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6080 tmp2 = tcg_const_i32(0);
aa47cfdd 6081 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6082 tcg_temp_free_i32(tmp2);
aa47cfdd 6083 tcg_temp_free_ptr(fpstatus);
0e326109 6084 break;
aa47cfdd 6085 }
600b828c 6086 case NEON_2RM_VABS_F:
4373f3ce 6087 gen_vfp_abs(0);
9ee6e8bb 6088 break;
600b828c 6089 case NEON_2RM_VNEG_F:
4373f3ce 6090 gen_vfp_neg(0);
9ee6e8bb 6091 break;
600b828c 6092 case NEON_2RM_VSWP:
dd8fbd78
FN
6093 tmp2 = neon_load_reg(rd, pass);
6094 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6095 break;
600b828c 6096 case NEON_2RM_VTRN:
dd8fbd78 6097 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6098 switch (size) {
dd8fbd78
FN
6099 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6100 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6101 default: abort();
9ee6e8bb 6102 }
dd8fbd78 6103 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6104 break;
600b828c 6105 case NEON_2RM_VRECPE:
dd8fbd78 6106 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6107 break;
600b828c 6108 case NEON_2RM_VRSQRTE:
dd8fbd78 6109 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6110 break;
600b828c 6111 case NEON_2RM_VRECPE_F:
4373f3ce 6112 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6113 break;
600b828c 6114 case NEON_2RM_VRSQRTE_F:
4373f3ce 6115 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6116 break;
600b828c 6117 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6118 gen_vfp_sito(0, 1);
9ee6e8bb 6119 break;
600b828c 6120 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6121 gen_vfp_uito(0, 1);
9ee6e8bb 6122 break;
600b828c 6123 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6124 gen_vfp_tosiz(0, 1);
9ee6e8bb 6125 break;
600b828c 6126 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6127 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6128 break;
6129 default:
600b828c
PM
6130 /* Reserved op values were caught by the
6131 * neon_2rm_sizes[] check earlier.
6132 */
6133 abort();
9ee6e8bb 6134 }
600b828c 6135 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6136 tcg_gen_st_f32(cpu_F0s, cpu_env,
6137 neon_reg_offset(rd, pass));
9ee6e8bb 6138 } else {
dd8fbd78 6139 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6140 }
6141 }
6142 break;
6143 }
6144 } else if ((insn & (1 << 10)) == 0) {
6145 /* VTBL, VTBX. */
56907d77
PM
6146 int n = ((insn >> 8) & 3) + 1;
6147 if ((rn + n) > 32) {
6148 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6149 * helper function running off the end of the register file.
6150 */
6151 return 1;
6152 }
6153 n <<= 3;
9ee6e8bb 6154 if (insn & (1 << 6)) {
8f8e3aa4 6155 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6156 } else {
7d1b0095 6157 tmp = tcg_temp_new_i32();
8f8e3aa4 6158 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6159 }
8f8e3aa4 6160 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6161 tmp4 = tcg_const_i32(rn);
6162 tmp5 = tcg_const_i32(n);
9ef39277 6163 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6164 tcg_temp_free_i32(tmp);
9ee6e8bb 6165 if (insn & (1 << 6)) {
8f8e3aa4 6166 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6167 } else {
7d1b0095 6168 tmp = tcg_temp_new_i32();
8f8e3aa4 6169 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6170 }
8f8e3aa4 6171 tmp3 = neon_load_reg(rm, 1);
9ef39277 6172 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6173 tcg_temp_free_i32(tmp5);
6174 tcg_temp_free_i32(tmp4);
8f8e3aa4 6175 neon_store_reg(rd, 0, tmp2);
3018f259 6176 neon_store_reg(rd, 1, tmp3);
7d1b0095 6177 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6178 } else if ((insn & 0x380) == 0) {
6179 /* VDUP */
133da6aa
JR
6180 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6181 return 1;
6182 }
9ee6e8bb 6183 if (insn & (1 << 19)) {
dd8fbd78 6184 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6185 } else {
dd8fbd78 6186 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6187 }
6188 if (insn & (1 << 16)) {
dd8fbd78 6189 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6190 } else if (insn & (1 << 17)) {
6191 if ((insn >> 18) & 1)
dd8fbd78 6192 gen_neon_dup_high16(tmp);
9ee6e8bb 6193 else
dd8fbd78 6194 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6195 }
6196 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6197 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6198 tcg_gen_mov_i32(tmp2, tmp);
6199 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6200 }
7d1b0095 6201 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6202 } else {
6203 return 1;
6204 }
6205 }
6206 }
6207 return 0;
6208}
6209
0ecb72a5 6210static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6211{
4b6a83fb
PM
6212 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6213 const ARMCPRegInfo *ri;
6214 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6215
6216 cpnum = (insn >> 8) & 0xf;
6217 if (arm_feature(env, ARM_FEATURE_XSCALE)
6218 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6219 return 1;
6220
4b6a83fb 6221 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6222 switch (cpnum) {
6223 case 0:
6224 case 1:
6225 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6226 return disas_iwmmxt_insn(env, s, insn);
6227 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6228 return disas_dsp_insn(env, s, insn);
6229 }
6230 return 1;
6231 case 10:
6232 case 11:
6233 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6234 default:
6235 break;
6236 }
6237
6238 /* Otherwise treat as a generic register access */
6239 is64 = (insn & (1 << 25)) == 0;
6240 if (!is64 && ((insn & (1 << 4)) == 0)) {
6241 /* cdp */
6242 return 1;
6243 }
6244
6245 crm = insn & 0xf;
6246 if (is64) {
6247 crn = 0;
6248 opc1 = (insn >> 4) & 0xf;
6249 opc2 = 0;
6250 rt2 = (insn >> 16) & 0xf;
6251 } else {
6252 crn = (insn >> 16) & 0xf;
6253 opc1 = (insn >> 21) & 7;
6254 opc2 = (insn >> 5) & 7;
6255 rt2 = 0;
6256 }
6257 isread = (insn >> 20) & 1;
6258 rt = (insn >> 12) & 0xf;
6259
6260 ri = get_arm_cp_reginfo(cpu,
6261 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6262 if (ri) {
6263 /* Check access permissions */
6264 if (!cp_access_ok(env, ri, isread)) {
6265 return 1;
6266 }
6267
6268 /* Handle special cases first */
6269 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6270 case ARM_CP_NOP:
6271 return 0;
6272 case ARM_CP_WFI:
6273 if (isread) {
6274 return 1;
6275 }
6276 gen_set_pc_im(s->pc);
6277 s->is_jmp = DISAS_WFI;
2bee5105 6278 return 0;
4b6a83fb
PM
6279 default:
6280 break;
6281 }
6282
2452731c
PM
6283 if (use_icount && (ri->type & ARM_CP_IO)) {
6284 gen_io_start();
6285 }
6286
4b6a83fb
PM
6287 if (isread) {
6288 /* Read */
6289 if (is64) {
6290 TCGv_i64 tmp64;
6291 TCGv_i32 tmp;
6292 if (ri->type & ARM_CP_CONST) {
6293 tmp64 = tcg_const_i64(ri->resetvalue);
6294 } else if (ri->readfn) {
6295 TCGv_ptr tmpptr;
6296 gen_set_pc_im(s->pc);
6297 tmp64 = tcg_temp_new_i64();
6298 tmpptr = tcg_const_ptr(ri);
6299 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6300 tcg_temp_free_ptr(tmpptr);
6301 } else {
6302 tmp64 = tcg_temp_new_i64();
6303 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6304 }
6305 tmp = tcg_temp_new_i32();
6306 tcg_gen_trunc_i64_i32(tmp, tmp64);
6307 store_reg(s, rt, tmp);
6308 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6309 tmp = tcg_temp_new_i32();
4b6a83fb 6310 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6311 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6312 store_reg(s, rt2, tmp);
6313 } else {
39d5492a 6314 TCGv_i32 tmp;
4b6a83fb
PM
6315 if (ri->type & ARM_CP_CONST) {
6316 tmp = tcg_const_i32(ri->resetvalue);
6317 } else if (ri->readfn) {
6318 TCGv_ptr tmpptr;
6319 gen_set_pc_im(s->pc);
6320 tmp = tcg_temp_new_i32();
6321 tmpptr = tcg_const_ptr(ri);
6322 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6323 tcg_temp_free_ptr(tmpptr);
6324 } else {
6325 tmp = load_cpu_offset(ri->fieldoffset);
6326 }
6327 if (rt == 15) {
6328 /* Destination register of r15 for 32 bit loads sets
6329 * the condition codes from the high 4 bits of the value
6330 */
6331 gen_set_nzcv(tmp);
6332 tcg_temp_free_i32(tmp);
6333 } else {
6334 store_reg(s, rt, tmp);
6335 }
6336 }
6337 } else {
6338 /* Write */
6339 if (ri->type & ARM_CP_CONST) {
6340 /* If not forbidden by access permissions, treat as WI */
6341 return 0;
6342 }
6343
6344 if (is64) {
39d5492a 6345 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6346 TCGv_i64 tmp64 = tcg_temp_new_i64();
6347 tmplo = load_reg(s, rt);
6348 tmphi = load_reg(s, rt2);
6349 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6350 tcg_temp_free_i32(tmplo);
6351 tcg_temp_free_i32(tmphi);
6352 if (ri->writefn) {
6353 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6354 gen_set_pc_im(s->pc);
6355 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6356 tcg_temp_free_ptr(tmpptr);
6357 } else {
6358 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6359 }
6360 tcg_temp_free_i64(tmp64);
6361 } else {
6362 if (ri->writefn) {
39d5492a 6363 TCGv_i32 tmp;
4b6a83fb
PM
6364 TCGv_ptr tmpptr;
6365 gen_set_pc_im(s->pc);
6366 tmp = load_reg(s, rt);
6367 tmpptr = tcg_const_ptr(ri);
6368 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6369 tcg_temp_free_ptr(tmpptr);
6370 tcg_temp_free_i32(tmp);
6371 } else {
39d5492a 6372 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6373 store_cpu_offset(tmp, ri->fieldoffset);
6374 }
6375 }
2452731c
PM
6376 }
6377
6378 if (use_icount && (ri->type & ARM_CP_IO)) {
6379 /* I/O operations must end the TB here (whether read or write) */
6380 gen_io_end();
6381 gen_lookup_tb(s);
6382 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6383 /* We default to ending the TB on a coprocessor register write,
6384 * but allow this to be suppressed by the register definition
6385 * (usually only necessary to work around guest bugs).
6386 */
2452731c 6387 gen_lookup_tb(s);
4b6a83fb 6388 }
2452731c 6389
4b6a83fb
PM
6390 return 0;
6391 }
6392
4a9a539f 6393 return 1;
9ee6e8bb
PB
6394}
6395
5e3f878a
PB
6396
6397/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6398static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6399{
39d5492a 6400 TCGv_i32 tmp;
7d1b0095 6401 tmp = tcg_temp_new_i32();
5e3f878a
PB
6402 tcg_gen_trunc_i64_i32(tmp, val);
6403 store_reg(s, rlow, tmp);
7d1b0095 6404 tmp = tcg_temp_new_i32();
5e3f878a
PB
6405 tcg_gen_shri_i64(val, val, 32);
6406 tcg_gen_trunc_i64_i32(tmp, val);
6407 store_reg(s, rhigh, tmp);
6408}
6409
6410/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6411static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6412{
a7812ae4 6413 TCGv_i64 tmp;
39d5492a 6414 TCGv_i32 tmp2;
5e3f878a 6415
36aa55dc 6416 /* Load value and extend to 64 bits. */
a7812ae4 6417 tmp = tcg_temp_new_i64();
5e3f878a
PB
6418 tmp2 = load_reg(s, rlow);
6419 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6420 tcg_temp_free_i32(tmp2);
5e3f878a 6421 tcg_gen_add_i64(val, val, tmp);
b75263d6 6422 tcg_temp_free_i64(tmp);
5e3f878a
PB
6423}
6424
6425/* load and add a 64-bit value from a register pair. */
a7812ae4 6426static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6427{
a7812ae4 6428 TCGv_i64 tmp;
39d5492a
PM
6429 TCGv_i32 tmpl;
6430 TCGv_i32 tmph;
5e3f878a
PB
6431
6432 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6433 tmpl = load_reg(s, rlow);
6434 tmph = load_reg(s, rhigh);
a7812ae4 6435 tmp = tcg_temp_new_i64();
36aa55dc 6436 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6437 tcg_temp_free_i32(tmpl);
6438 tcg_temp_free_i32(tmph);
5e3f878a 6439 tcg_gen_add_i64(val, val, tmp);
b75263d6 6440 tcg_temp_free_i64(tmp);
5e3f878a
PB
6441}
6442
c9f10124 6443/* Set N and Z flags from hi|lo. */
39d5492a 6444static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6445{
c9f10124
RH
6446 tcg_gen_mov_i32(cpu_NF, hi);
6447 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6448}
6449
426f5abc
PB
6450/* Load/Store exclusive instructions are implemented by remembering
6451 the value/address loaded, and seeing if these are the same
b90372ad 6452 when the store is performed. This should be sufficient to implement
426f5abc
PB
6453 the architecturally mandated semantics, and avoids having to monitor
6454 regular stores.
6455
6456 In system emulation mode only one CPU will be running at once, so
6457 this sequence is effectively atomic. In user emulation mode we
6458 throw an exception and handle the atomic operation elsewhere. */
6459static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6460 TCGv_i32 addr, int size)
426f5abc 6461{
94ee24e7 6462 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6463
6464 switch (size) {
6465 case 0:
94ee24e7 6466 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6467 break;
6468 case 1:
94ee24e7 6469 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6470 break;
6471 case 2:
6472 case 3:
94ee24e7 6473 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6474 break;
6475 default:
6476 abort();
6477 }
6478 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6479 store_reg(s, rt, tmp);
6480 if (size == 3) {
39d5492a 6481 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6482 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7
PM
6483 tmp = tcg_temp_new_i32();
6484 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6485 tcg_temp_free_i32(tmp2);
426f5abc
PB
6486 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6487 store_reg(s, rt2, tmp);
6488 }
6489 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6490}
6491
6492static void gen_clrex(DisasContext *s)
6493{
6494 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6495}
6496
6497#ifdef CONFIG_USER_ONLY
6498static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6499 TCGv_i32 addr, int size)
426f5abc
PB
6500{
6501 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6502 tcg_gen_movi_i32(cpu_exclusive_info,
6503 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6504 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6505}
6506#else
6507static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6508 TCGv_i32 addr, int size)
426f5abc 6509{
39d5492a 6510 TCGv_i32 tmp;
426f5abc
PB
6511 int done_label;
6512 int fail_label;
6513
6514 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6515 [addr] = {Rt};
6516 {Rd} = 0;
6517 } else {
6518 {Rd} = 1;
6519 } */
6520 fail_label = gen_new_label();
6521 done_label = gen_new_label();
6522 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6523 tmp = tcg_temp_new_i32();
426f5abc
PB
6524 switch (size) {
6525 case 0:
94ee24e7 6526 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6527 break;
6528 case 1:
94ee24e7 6529 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6530 break;
6531 case 2:
6532 case 3:
94ee24e7 6533 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6534 break;
6535 default:
6536 abort();
6537 }
6538 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6539 tcg_temp_free_i32(tmp);
426f5abc 6540 if (size == 3) {
39d5492a 6541 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6542 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7
PM
6543 tmp = tcg_temp_new_i32();
6544 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6545 tcg_temp_free_i32(tmp2);
426f5abc 6546 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6547 tcg_temp_free_i32(tmp);
426f5abc
PB
6548 }
6549 tmp = load_reg(s, rt);
6550 switch (size) {
6551 case 0:
94ee24e7 6552 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6553 break;
6554 case 1:
94ee24e7 6555 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6556 break;
6557 case 2:
6558 case 3:
94ee24e7 6559 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6560 break;
6561 default:
6562 abort();
6563 }
94ee24e7 6564 tcg_temp_free_i32(tmp);
426f5abc
PB
6565 if (size == 3) {
6566 tcg_gen_addi_i32(addr, addr, 4);
6567 tmp = load_reg(s, rt2);
94ee24e7
PM
6568 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
6569 tcg_temp_free_i32(tmp);
426f5abc
PB
6570 }
6571 tcg_gen_movi_i32(cpu_R[rd], 0);
6572 tcg_gen_br(done_label);
6573 gen_set_label(fail_label);
6574 tcg_gen_movi_i32(cpu_R[rd], 1);
6575 gen_set_label(done_label);
6576 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6577}
6578#endif
6579
81465888
PM
6580/* gen_srs:
6581 * @env: CPUARMState
6582 * @s: DisasContext
6583 * @mode: mode field from insn (which stack to store to)
6584 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6585 * @writeback: true if writeback bit set
6586 *
6587 * Generate code for the SRS (Store Return State) insn.
6588 */
6589static void gen_srs(DisasContext *s,
6590 uint32_t mode, uint32_t amode, bool writeback)
6591{
6592 int32_t offset;
6593 TCGv_i32 addr = tcg_temp_new_i32();
6594 TCGv_i32 tmp = tcg_const_i32(mode);
6595 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6596 tcg_temp_free_i32(tmp);
6597 switch (amode) {
6598 case 0: /* DA */
6599 offset = -4;
6600 break;
6601 case 1: /* IA */
6602 offset = 0;
6603 break;
6604 case 2: /* DB */
6605 offset = -8;
6606 break;
6607 case 3: /* IB */
6608 offset = 4;
6609 break;
6610 default:
6611 abort();
6612 }
6613 tcg_gen_addi_i32(addr, addr, offset);
6614 tmp = load_reg(s, 14);
5a839c0d
PM
6615 tcg_gen_qemu_st32(tmp, addr, 0);
6616 tcg_temp_free_i32(tmp);
81465888
PM
6617 tmp = load_cpu_field(spsr);
6618 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
6619 tcg_gen_qemu_st32(tmp, addr, 0);
6620 tcg_temp_free_i32(tmp);
81465888
PM
6621 if (writeback) {
6622 switch (amode) {
6623 case 0:
6624 offset = -8;
6625 break;
6626 case 1:
6627 offset = 4;
6628 break;
6629 case 2:
6630 offset = -4;
6631 break;
6632 case 3:
6633 offset = 0;
6634 break;
6635 default:
6636 abort();
6637 }
6638 tcg_gen_addi_i32(addr, addr, offset);
6639 tmp = tcg_const_i32(mode);
6640 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6641 tcg_temp_free_i32(tmp);
6642 }
6643 tcg_temp_free_i32(addr);
6644}
6645
0ecb72a5 6646static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6647{
6648 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6649 TCGv_i32 tmp;
6650 TCGv_i32 tmp2;
6651 TCGv_i32 tmp3;
6652 TCGv_i32 addr;
a7812ae4 6653 TCGv_i64 tmp64;
9ee6e8bb 6654
d31dd73e 6655 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6656 s->pc += 4;
6657
6658 /* M variants do not implement ARM mode. */
6659 if (IS_M(env))
6660 goto illegal_op;
6661 cond = insn >> 28;
6662 if (cond == 0xf){
be5e7a76
DES
6663 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6664 * choose to UNDEF. In ARMv5 and above the space is used
6665 * for miscellaneous unconditional instructions.
6666 */
6667 ARCH(5);
6668
9ee6e8bb
PB
6669 /* Unconditional instructions. */
6670 if (((insn >> 25) & 7) == 1) {
6671 /* NEON Data processing. */
6672 if (!arm_feature(env, ARM_FEATURE_NEON))
6673 goto illegal_op;
6674
6675 if (disas_neon_data_insn(env, s, insn))
6676 goto illegal_op;
6677 return;
6678 }
6679 if ((insn & 0x0f100000) == 0x04000000) {
6680 /* NEON load/store. */
6681 if (!arm_feature(env, ARM_FEATURE_NEON))
6682 goto illegal_op;
6683
6684 if (disas_neon_ls_insn(env, s, insn))
6685 goto illegal_op;
6686 return;
6687 }
3d185e5d
PM
6688 if (((insn & 0x0f30f000) == 0x0510f000) ||
6689 ((insn & 0x0f30f010) == 0x0710f000)) {
6690 if ((insn & (1 << 22)) == 0) {
6691 /* PLDW; v7MP */
6692 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6693 goto illegal_op;
6694 }
6695 }
6696 /* Otherwise PLD; v5TE+ */
be5e7a76 6697 ARCH(5TE);
3d185e5d
PM
6698 return;
6699 }
6700 if (((insn & 0x0f70f000) == 0x0450f000) ||
6701 ((insn & 0x0f70f010) == 0x0650f000)) {
6702 ARCH(7);
6703 return; /* PLI; V7 */
6704 }
6705 if (((insn & 0x0f700000) == 0x04100000) ||
6706 ((insn & 0x0f700010) == 0x06100000)) {
6707 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6708 goto illegal_op;
6709 }
6710 return; /* v7MP: Unallocated memory hint: must NOP */
6711 }
6712
6713 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6714 ARCH(6);
6715 /* setend */
10962fd5
PM
6716 if (((insn >> 9) & 1) != s->bswap_code) {
6717 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6718 goto illegal_op;
6719 }
6720 return;
6721 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6722 switch ((insn >> 4) & 0xf) {
6723 case 1: /* clrex */
6724 ARCH(6K);
426f5abc 6725 gen_clrex(s);
9ee6e8bb
PB
6726 return;
6727 case 4: /* dsb */
6728 case 5: /* dmb */
6729 case 6: /* isb */
6730 ARCH(7);
6731 /* We don't emulate caches so these are a no-op. */
6732 return;
6733 default:
6734 goto illegal_op;
6735 }
6736 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6737 /* srs */
81465888 6738 if (IS_USER(s)) {
9ee6e8bb 6739 goto illegal_op;
9ee6e8bb 6740 }
81465888
PM
6741 ARCH(6);
6742 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6743 return;
ea825eee 6744 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6745 /* rfe */
c67b6b71 6746 int32_t offset;
9ee6e8bb
PB
6747 if (IS_USER(s))
6748 goto illegal_op;
6749 ARCH(6);
6750 rn = (insn >> 16) & 0xf;
b0109805 6751 addr = load_reg(s, rn);
9ee6e8bb
PB
6752 i = (insn >> 23) & 3;
6753 switch (i) {
b0109805 6754 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6755 case 1: offset = 0; break; /* IA */
6756 case 2: offset = -8; break; /* DB */
b0109805 6757 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6758 default: abort();
6759 }
6760 if (offset)
b0109805
PB
6761 tcg_gen_addi_i32(addr, addr, offset);
6762 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d
PM
6763 tmp = tcg_temp_new_i32();
6764 tcg_gen_qemu_ld32u(tmp, addr, 0);
b0109805 6765 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 6766 tmp2 = tcg_temp_new_i32();
5866e078 6767 tcg_gen_qemu_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
6768 if (insn & (1 << 21)) {
6769 /* Base writeback. */
6770 switch (i) {
b0109805 6771 case 0: offset = -8; break;
c67b6b71
FN
6772 case 1: offset = 4; break;
6773 case 2: offset = -4; break;
b0109805 6774 case 3: offset = 0; break;
9ee6e8bb
PB
6775 default: abort();
6776 }
6777 if (offset)
b0109805
PB
6778 tcg_gen_addi_i32(addr, addr, offset);
6779 store_reg(s, rn, addr);
6780 } else {
7d1b0095 6781 tcg_temp_free_i32(addr);
9ee6e8bb 6782 }
b0109805 6783 gen_rfe(s, tmp, tmp2);
c67b6b71 6784 return;
9ee6e8bb
PB
6785 } else if ((insn & 0x0e000000) == 0x0a000000) {
6786 /* branch link and change to thumb (blx <offset>) */
6787 int32_t offset;
6788
6789 val = (uint32_t)s->pc;
7d1b0095 6790 tmp = tcg_temp_new_i32();
d9ba4830
PB
6791 tcg_gen_movi_i32(tmp, val);
6792 store_reg(s, 14, tmp);
9ee6e8bb
PB
6793 /* Sign-extend the 24-bit offset */
6794 offset = (((int32_t)insn) << 8) >> 8;
6795 /* offset * 4 + bit24 * 2 + (thumb bit) */
6796 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6797 /* pipeline offset */
6798 val += 4;
be5e7a76 6799 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6800 gen_bx_im(s, val);
9ee6e8bb
PB
6801 return;
6802 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6803 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6804 /* iWMMXt register transfer. */
6805 if (env->cp15.c15_cpar & (1 << 1))
6806 if (!disas_iwmmxt_insn(env, s, insn))
6807 return;
6808 }
6809 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6810 /* Coprocessor double register transfer. */
be5e7a76 6811 ARCH(5TE);
9ee6e8bb
PB
6812 } else if ((insn & 0x0f000010) == 0x0e000010) {
6813 /* Additional coprocessor register transfer. */
7997d92f 6814 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6815 uint32_t mask;
6816 uint32_t val;
6817 /* cps (privileged) */
6818 if (IS_USER(s))
6819 return;
6820 mask = val = 0;
6821 if (insn & (1 << 19)) {
6822 if (insn & (1 << 8))
6823 mask |= CPSR_A;
6824 if (insn & (1 << 7))
6825 mask |= CPSR_I;
6826 if (insn & (1 << 6))
6827 mask |= CPSR_F;
6828 if (insn & (1 << 18))
6829 val |= mask;
6830 }
7997d92f 6831 if (insn & (1 << 17)) {
9ee6e8bb
PB
6832 mask |= CPSR_M;
6833 val |= (insn & 0x1f);
6834 }
6835 if (mask) {
2fbac54b 6836 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6837 }
6838 return;
6839 }
6840 goto illegal_op;
6841 }
6842 if (cond != 0xe) {
6843 /* if not always execute, we generate a conditional jump to
6844 next instruction */
6845 s->condlabel = gen_new_label();
d9ba4830 6846 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6847 s->condjmp = 1;
6848 }
6849 if ((insn & 0x0f900000) == 0x03000000) {
6850 if ((insn & (1 << 21)) == 0) {
6851 ARCH(6T2);
6852 rd = (insn >> 12) & 0xf;
6853 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6854 if ((insn & (1 << 22)) == 0) {
6855 /* MOVW */
7d1b0095 6856 tmp = tcg_temp_new_i32();
5e3f878a 6857 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6858 } else {
6859 /* MOVT */
5e3f878a 6860 tmp = load_reg(s, rd);
86831435 6861 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6862 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6863 }
5e3f878a 6864 store_reg(s, rd, tmp);
9ee6e8bb
PB
6865 } else {
6866 if (((insn >> 12) & 0xf) != 0xf)
6867 goto illegal_op;
6868 if (((insn >> 16) & 0xf) == 0) {
6869 gen_nop_hint(s, insn & 0xff);
6870 } else {
6871 /* CPSR = immediate */
6872 val = insn & 0xff;
6873 shift = ((insn >> 8) & 0xf) * 2;
6874 if (shift)
6875 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6876 i = ((insn & (1 << 22)) != 0);
2fbac54b 6877 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6878 goto illegal_op;
6879 }
6880 }
6881 } else if ((insn & 0x0f900000) == 0x01000000
6882 && (insn & 0x00000090) != 0x00000090) {
6883 /* miscellaneous instructions */
6884 op1 = (insn >> 21) & 3;
6885 sh = (insn >> 4) & 0xf;
6886 rm = insn & 0xf;
6887 switch (sh) {
6888 case 0x0: /* move program status register */
6889 if (op1 & 1) {
6890 /* PSR = reg */
2fbac54b 6891 tmp = load_reg(s, rm);
9ee6e8bb 6892 i = ((op1 & 2) != 0);
2fbac54b 6893 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6894 goto illegal_op;
6895 } else {
6896 /* reg = PSR */
6897 rd = (insn >> 12) & 0xf;
6898 if (op1 & 2) {
6899 if (IS_USER(s))
6900 goto illegal_op;
d9ba4830 6901 tmp = load_cpu_field(spsr);
9ee6e8bb 6902 } else {
7d1b0095 6903 tmp = tcg_temp_new_i32();
9ef39277 6904 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6905 }
d9ba4830 6906 store_reg(s, rd, tmp);
9ee6e8bb
PB
6907 }
6908 break;
6909 case 0x1:
6910 if (op1 == 1) {
6911 /* branch/exchange thumb (bx). */
be5e7a76 6912 ARCH(4T);
d9ba4830
PB
6913 tmp = load_reg(s, rm);
6914 gen_bx(s, tmp);
9ee6e8bb
PB
6915 } else if (op1 == 3) {
6916 /* clz */
be5e7a76 6917 ARCH(5);
9ee6e8bb 6918 rd = (insn >> 12) & 0xf;
1497c961
PB
6919 tmp = load_reg(s, rm);
6920 gen_helper_clz(tmp, tmp);
6921 store_reg(s, rd, tmp);
9ee6e8bb
PB
6922 } else {
6923 goto illegal_op;
6924 }
6925 break;
6926 case 0x2:
6927 if (op1 == 1) {
6928 ARCH(5J); /* bxj */
6929 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6930 tmp = load_reg(s, rm);
6931 gen_bx(s, tmp);
9ee6e8bb
PB
6932 } else {
6933 goto illegal_op;
6934 }
6935 break;
6936 case 0x3:
6937 if (op1 != 1)
6938 goto illegal_op;
6939
be5e7a76 6940 ARCH(5);
9ee6e8bb 6941 /* branch link/exchange thumb (blx) */
d9ba4830 6942 tmp = load_reg(s, rm);
7d1b0095 6943 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6944 tcg_gen_movi_i32(tmp2, s->pc);
6945 store_reg(s, 14, tmp2);
6946 gen_bx(s, tmp);
9ee6e8bb
PB
6947 break;
6948 case 0x5: /* saturating add/subtract */
be5e7a76 6949 ARCH(5TE);
9ee6e8bb
PB
6950 rd = (insn >> 12) & 0xf;
6951 rn = (insn >> 16) & 0xf;
b40d0353 6952 tmp = load_reg(s, rm);
5e3f878a 6953 tmp2 = load_reg(s, rn);
9ee6e8bb 6954 if (op1 & 2)
9ef39277 6955 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6956 if (op1 & 1)
9ef39277 6957 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6958 else
9ef39277 6959 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6960 tcg_temp_free_i32(tmp2);
5e3f878a 6961 store_reg(s, rd, tmp);
9ee6e8bb 6962 break;
49e14940
AL
6963 case 7:
6964 /* SMC instruction (op1 == 3)
6965 and undefined instructions (op1 == 0 || op1 == 2)
6966 will trap */
6967 if (op1 != 1) {
6968 goto illegal_op;
6969 }
6970 /* bkpt */
be5e7a76 6971 ARCH(5);
bc4a0de0 6972 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6973 break;
6974 case 0x8: /* signed multiply */
6975 case 0xa:
6976 case 0xc:
6977 case 0xe:
be5e7a76 6978 ARCH(5TE);
9ee6e8bb
PB
6979 rs = (insn >> 8) & 0xf;
6980 rn = (insn >> 12) & 0xf;
6981 rd = (insn >> 16) & 0xf;
6982 if (op1 == 1) {
6983 /* (32 * 16) >> 16 */
5e3f878a
PB
6984 tmp = load_reg(s, rm);
6985 tmp2 = load_reg(s, rs);
9ee6e8bb 6986 if (sh & 4)
5e3f878a 6987 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6988 else
5e3f878a 6989 gen_sxth(tmp2);
a7812ae4
PB
6990 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6991 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 6992 tmp = tcg_temp_new_i32();
a7812ae4 6993 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 6994 tcg_temp_free_i64(tmp64);
9ee6e8bb 6995 if ((sh & 2) == 0) {
5e3f878a 6996 tmp2 = load_reg(s, rn);
9ef39277 6997 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 6998 tcg_temp_free_i32(tmp2);
9ee6e8bb 6999 }
5e3f878a 7000 store_reg(s, rd, tmp);
9ee6e8bb
PB
7001 } else {
7002 /* 16 * 16 */
5e3f878a
PB
7003 tmp = load_reg(s, rm);
7004 tmp2 = load_reg(s, rs);
7005 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7006 tcg_temp_free_i32(tmp2);
9ee6e8bb 7007 if (op1 == 2) {
a7812ae4
PB
7008 tmp64 = tcg_temp_new_i64();
7009 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7010 tcg_temp_free_i32(tmp);
a7812ae4
PB
7011 gen_addq(s, tmp64, rn, rd);
7012 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7013 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7014 } else {
7015 if (op1 == 0) {
5e3f878a 7016 tmp2 = load_reg(s, rn);
9ef39277 7017 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7018 tcg_temp_free_i32(tmp2);
9ee6e8bb 7019 }
5e3f878a 7020 store_reg(s, rd, tmp);
9ee6e8bb
PB
7021 }
7022 }
7023 break;
7024 default:
7025 goto illegal_op;
7026 }
7027 } else if (((insn & 0x0e000000) == 0 &&
7028 (insn & 0x00000090) != 0x90) ||
7029 ((insn & 0x0e000000) == (1 << 25))) {
7030 int set_cc, logic_cc, shiftop;
7031
7032 op1 = (insn >> 21) & 0xf;
7033 set_cc = (insn >> 20) & 1;
7034 logic_cc = table_logic_cc[op1] & set_cc;
7035
7036 /* data processing instruction */
7037 if (insn & (1 << 25)) {
7038 /* immediate operand */
7039 val = insn & 0xff;
7040 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7041 if (shift) {
9ee6e8bb 7042 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7043 }
7d1b0095 7044 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7045 tcg_gen_movi_i32(tmp2, val);
7046 if (logic_cc && shift) {
7047 gen_set_CF_bit31(tmp2);
7048 }
9ee6e8bb
PB
7049 } else {
7050 /* register */
7051 rm = (insn) & 0xf;
e9bb4aa9 7052 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7053 shiftop = (insn >> 5) & 3;
7054 if (!(insn & (1 << 4))) {
7055 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7056 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7057 } else {
7058 rs = (insn >> 8) & 0xf;
8984bd2e 7059 tmp = load_reg(s, rs);
e9bb4aa9 7060 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7061 }
7062 }
7063 if (op1 != 0x0f && op1 != 0x0d) {
7064 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7065 tmp = load_reg(s, rn);
7066 } else {
39d5492a 7067 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7068 }
7069 rd = (insn >> 12) & 0xf;
7070 switch(op1) {
7071 case 0x00:
e9bb4aa9
JR
7072 tcg_gen_and_i32(tmp, tmp, tmp2);
7073 if (logic_cc) {
7074 gen_logic_CC(tmp);
7075 }
21aeb343 7076 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7077 break;
7078 case 0x01:
e9bb4aa9
JR
7079 tcg_gen_xor_i32(tmp, tmp, tmp2);
7080 if (logic_cc) {
7081 gen_logic_CC(tmp);
7082 }
21aeb343 7083 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7084 break;
7085 case 0x02:
7086 if (set_cc && rd == 15) {
7087 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7088 if (IS_USER(s)) {
9ee6e8bb 7089 goto illegal_op;
e9bb4aa9 7090 }
72485ec4 7091 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7092 gen_exception_return(s, tmp);
9ee6e8bb 7093 } else {
e9bb4aa9 7094 if (set_cc) {
72485ec4 7095 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7096 } else {
7097 tcg_gen_sub_i32(tmp, tmp, tmp2);
7098 }
21aeb343 7099 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7100 }
7101 break;
7102 case 0x03:
e9bb4aa9 7103 if (set_cc) {
72485ec4 7104 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7105 } else {
7106 tcg_gen_sub_i32(tmp, tmp2, tmp);
7107 }
21aeb343 7108 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7109 break;
7110 case 0x04:
e9bb4aa9 7111 if (set_cc) {
72485ec4 7112 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7113 } else {
7114 tcg_gen_add_i32(tmp, tmp, tmp2);
7115 }
21aeb343 7116 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7117 break;
7118 case 0x05:
e9bb4aa9 7119 if (set_cc) {
49b4c31e 7120 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7121 } else {
7122 gen_add_carry(tmp, tmp, tmp2);
7123 }
21aeb343 7124 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7125 break;
7126 case 0x06:
e9bb4aa9 7127 if (set_cc) {
2de68a49 7128 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7129 } else {
7130 gen_sub_carry(tmp, tmp, tmp2);
7131 }
21aeb343 7132 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7133 break;
7134 case 0x07:
e9bb4aa9 7135 if (set_cc) {
2de68a49 7136 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7137 } else {
7138 gen_sub_carry(tmp, tmp2, tmp);
7139 }
21aeb343 7140 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7141 break;
7142 case 0x08:
7143 if (set_cc) {
e9bb4aa9
JR
7144 tcg_gen_and_i32(tmp, tmp, tmp2);
7145 gen_logic_CC(tmp);
9ee6e8bb 7146 }
7d1b0095 7147 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7148 break;
7149 case 0x09:
7150 if (set_cc) {
e9bb4aa9
JR
7151 tcg_gen_xor_i32(tmp, tmp, tmp2);
7152 gen_logic_CC(tmp);
9ee6e8bb 7153 }
7d1b0095 7154 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7155 break;
7156 case 0x0a:
7157 if (set_cc) {
72485ec4 7158 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7159 }
7d1b0095 7160 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7161 break;
7162 case 0x0b:
7163 if (set_cc) {
72485ec4 7164 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7165 }
7d1b0095 7166 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7167 break;
7168 case 0x0c:
e9bb4aa9
JR
7169 tcg_gen_or_i32(tmp, tmp, tmp2);
7170 if (logic_cc) {
7171 gen_logic_CC(tmp);
7172 }
21aeb343 7173 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7174 break;
7175 case 0x0d:
7176 if (logic_cc && rd == 15) {
7177 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7178 if (IS_USER(s)) {
9ee6e8bb 7179 goto illegal_op;
e9bb4aa9
JR
7180 }
7181 gen_exception_return(s, tmp2);
9ee6e8bb 7182 } else {
e9bb4aa9
JR
7183 if (logic_cc) {
7184 gen_logic_CC(tmp2);
7185 }
21aeb343 7186 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7187 }
7188 break;
7189 case 0x0e:
f669df27 7190 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7191 if (logic_cc) {
7192 gen_logic_CC(tmp);
7193 }
21aeb343 7194 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7195 break;
7196 default:
7197 case 0x0f:
e9bb4aa9
JR
7198 tcg_gen_not_i32(tmp2, tmp2);
7199 if (logic_cc) {
7200 gen_logic_CC(tmp2);
7201 }
21aeb343 7202 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7203 break;
7204 }
e9bb4aa9 7205 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7206 tcg_temp_free_i32(tmp2);
e9bb4aa9 7207 }
9ee6e8bb
PB
7208 } else {
7209 /* other instructions */
7210 op1 = (insn >> 24) & 0xf;
7211 switch(op1) {
7212 case 0x0:
7213 case 0x1:
7214 /* multiplies, extra load/stores */
7215 sh = (insn >> 5) & 3;
7216 if (sh == 0) {
7217 if (op1 == 0x0) {
7218 rd = (insn >> 16) & 0xf;
7219 rn = (insn >> 12) & 0xf;
7220 rs = (insn >> 8) & 0xf;
7221 rm = (insn) & 0xf;
7222 op1 = (insn >> 20) & 0xf;
7223 switch (op1) {
7224 case 0: case 1: case 2: case 3: case 6:
7225 /* 32 bit mul */
5e3f878a
PB
7226 tmp = load_reg(s, rs);
7227 tmp2 = load_reg(s, rm);
7228 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7229 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7230 if (insn & (1 << 22)) {
7231 /* Subtract (mls) */
7232 ARCH(6T2);
5e3f878a
PB
7233 tmp2 = load_reg(s, rn);
7234 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7235 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7236 } else if (insn & (1 << 21)) {
7237 /* Add */
5e3f878a
PB
7238 tmp2 = load_reg(s, rn);
7239 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7240 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7241 }
7242 if (insn & (1 << 20))
5e3f878a
PB
7243 gen_logic_CC(tmp);
7244 store_reg(s, rd, tmp);
9ee6e8bb 7245 break;
8aac08b1
AJ
7246 case 4:
7247 /* 64 bit mul double accumulate (UMAAL) */
7248 ARCH(6);
7249 tmp = load_reg(s, rs);
7250 tmp2 = load_reg(s, rm);
7251 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7252 gen_addq_lo(s, tmp64, rn);
7253 gen_addq_lo(s, tmp64, rd);
7254 gen_storeq_reg(s, rn, rd, tmp64);
7255 tcg_temp_free_i64(tmp64);
7256 break;
7257 case 8: case 9: case 10: case 11:
7258 case 12: case 13: case 14: case 15:
7259 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7260 tmp = load_reg(s, rs);
7261 tmp2 = load_reg(s, rm);
8aac08b1 7262 if (insn & (1 << 22)) {
c9f10124 7263 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7264 } else {
c9f10124 7265 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7266 }
7267 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7268 TCGv_i32 al = load_reg(s, rn);
7269 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7270 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7271 tcg_temp_free_i32(al);
7272 tcg_temp_free_i32(ah);
9ee6e8bb 7273 }
8aac08b1 7274 if (insn & (1 << 20)) {
c9f10124 7275 gen_logicq_cc(tmp, tmp2);
8aac08b1 7276 }
c9f10124
RH
7277 store_reg(s, rn, tmp);
7278 store_reg(s, rd, tmp2);
9ee6e8bb 7279 break;
8aac08b1
AJ
7280 default:
7281 goto illegal_op;
9ee6e8bb
PB
7282 }
7283 } else {
7284 rn = (insn >> 16) & 0xf;
7285 rd = (insn >> 12) & 0xf;
7286 if (insn & (1 << 23)) {
7287 /* load/store exclusive */
2359bf80 7288 int op2 = (insn >> 8) & 3;
86753403 7289 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7290
7291 switch (op2) {
7292 case 0: /* lda/stl */
7293 if (op1 == 1) {
7294 goto illegal_op;
7295 }
7296 ARCH(8);
7297 break;
7298 case 1: /* reserved */
7299 goto illegal_op;
7300 case 2: /* ldaex/stlex */
7301 ARCH(8);
7302 break;
7303 case 3: /* ldrex/strex */
7304 if (op1) {
7305 ARCH(6K);
7306 } else {
7307 ARCH(6);
7308 }
7309 break;
7310 }
7311
3174f8e9 7312 addr = tcg_temp_local_new_i32();
98a46317 7313 load_reg_var(s, addr, rn);
2359bf80
MR
7314
7315 /* Since the emulation does not have barriers,
7316 the acquire/release semantics need no special
7317 handling */
7318 if (op2 == 0) {
7319 if (insn & (1 << 20)) {
7320 tmp = tcg_temp_new_i32();
7321 switch (op1) {
7322 case 0: /* lda */
7323 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7324 break;
7325 case 2: /* ldab */
7326 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
7327 break;
7328 case 3: /* ldah */
7329 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
7330 break;
7331 default:
7332 abort();
7333 }
7334 store_reg(s, rd, tmp);
7335 } else {
7336 rm = insn & 0xf;
7337 tmp = load_reg(s, rm);
7338 switch (op1) {
7339 case 0: /* stl */
7340 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7341 break;
7342 case 2: /* stlb */
7343 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
7344 break;
7345 case 3: /* stlh */
7346 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
7347 break;
7348 default:
7349 abort();
7350 }
7351 tcg_temp_free_i32(tmp);
7352 }
7353 } else if (insn & (1 << 20)) {
86753403
PB
7354 switch (op1) {
7355 case 0: /* ldrex */
426f5abc 7356 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7357 break;
7358 case 1: /* ldrexd */
426f5abc 7359 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7360 break;
7361 case 2: /* ldrexb */
426f5abc 7362 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7363 break;
7364 case 3: /* ldrexh */
426f5abc 7365 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7366 break;
7367 default:
7368 abort();
7369 }
9ee6e8bb
PB
7370 } else {
7371 rm = insn & 0xf;
86753403
PB
7372 switch (op1) {
7373 case 0: /* strex */
426f5abc 7374 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7375 break;
7376 case 1: /* strexd */
502e64fe 7377 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7378 break;
7379 case 2: /* strexb */
426f5abc 7380 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7381 break;
7382 case 3: /* strexh */
426f5abc 7383 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7384 break;
7385 default:
7386 abort();
7387 }
9ee6e8bb 7388 }
39d5492a 7389 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7390 } else {
7391 /* SWP instruction */
7392 rm = (insn) & 0xf;
7393
8984bd2e
PB
7394 /* ??? This is not really atomic. However we know
7395 we never have multiple CPUs running in parallel,
7396 so it is good enough. */
7397 addr = load_reg(s, rn);
7398 tmp = load_reg(s, rm);
5a839c0d 7399 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7400 if (insn & (1 << 22)) {
5a839c0d
PM
7401 tcg_gen_qemu_ld8u(tmp2, addr, IS_USER(s));
7402 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7403 } else {
5a839c0d
PM
7404 tcg_gen_qemu_ld32u(tmp2, addr, IS_USER(s));
7405 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7406 }
5a839c0d 7407 tcg_temp_free_i32(tmp);
7d1b0095 7408 tcg_temp_free_i32(addr);
8984bd2e 7409 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7410 }
7411 }
7412 } else {
7413 int address_offset;
7414 int load;
7415 /* Misc load/store */
7416 rn = (insn >> 16) & 0xf;
7417 rd = (insn >> 12) & 0xf;
b0109805 7418 addr = load_reg(s, rn);
9ee6e8bb 7419 if (insn & (1 << 24))
b0109805 7420 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7421 address_offset = 0;
7422 if (insn & (1 << 20)) {
7423 /* load */
5a839c0d 7424 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7425 switch(sh) {
7426 case 1:
5a839c0d 7427 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7428 break;
7429 case 2:
5a839c0d 7430 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7431 break;
7432 default:
7433 case 3:
5a839c0d 7434 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7435 break;
7436 }
7437 load = 1;
7438 } else if (sh & 2) {
be5e7a76 7439 ARCH(5TE);
9ee6e8bb
PB
7440 /* doubleword */
7441 if (sh & 1) {
7442 /* store */
b0109805 7443 tmp = load_reg(s, rd);
5a839c0d
PM
7444 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7445 tcg_temp_free_i32(tmp);
b0109805
PB
7446 tcg_gen_addi_i32(addr, addr, 4);
7447 tmp = load_reg(s, rd + 1);
5a839c0d
PM
7448 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7449 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7450 load = 0;
7451 } else {
7452 /* load */
5a839c0d
PM
7453 tmp = tcg_temp_new_i32();
7454 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7455 store_reg(s, rd, tmp);
7456 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
7457 tmp = tcg_temp_new_i32();
7458 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7459 rd++;
7460 load = 1;
7461 }
7462 address_offset = -4;
7463 } else {
7464 /* store */
b0109805 7465 tmp = load_reg(s, rd);
5a839c0d
PM
7466 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
7467 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7468 load = 0;
7469 }
7470 /* Perform base writeback before the loaded value to
7471 ensure correct behavior with overlapping index registers.
7472 ldrd with base writeback is is undefined if the
7473 destination and index registers overlap. */
7474 if (!(insn & (1 << 24))) {
b0109805
PB
7475 gen_add_datah_offset(s, insn, address_offset, addr);
7476 store_reg(s, rn, addr);
9ee6e8bb
PB
7477 } else if (insn & (1 << 21)) {
7478 if (address_offset)
b0109805
PB
7479 tcg_gen_addi_i32(addr, addr, address_offset);
7480 store_reg(s, rn, addr);
7481 } else {
7d1b0095 7482 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7483 }
7484 if (load) {
7485 /* Complete the load. */
b0109805 7486 store_reg(s, rd, tmp);
9ee6e8bb
PB
7487 }
7488 }
7489 break;
7490 case 0x4:
7491 case 0x5:
7492 goto do_ldst;
7493 case 0x6:
7494 case 0x7:
7495 if (insn & (1 << 4)) {
7496 ARCH(6);
7497 /* Armv6 Media instructions. */
7498 rm = insn & 0xf;
7499 rn = (insn >> 16) & 0xf;
2c0262af 7500 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7501 rs = (insn >> 8) & 0xf;
7502 switch ((insn >> 23) & 3) {
7503 case 0: /* Parallel add/subtract. */
7504 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7505 tmp = load_reg(s, rn);
7506 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7507 sh = (insn >> 5) & 7;
7508 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7509 goto illegal_op;
6ddbc6e4 7510 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7511 tcg_temp_free_i32(tmp2);
6ddbc6e4 7512 store_reg(s, rd, tmp);
9ee6e8bb
PB
7513 break;
7514 case 1:
7515 if ((insn & 0x00700020) == 0) {
6c95676b 7516 /* Halfword pack. */
3670669c
PB
7517 tmp = load_reg(s, rn);
7518 tmp2 = load_reg(s, rm);
9ee6e8bb 7519 shift = (insn >> 7) & 0x1f;
3670669c
PB
7520 if (insn & (1 << 6)) {
7521 /* pkhtb */
22478e79
AZ
7522 if (shift == 0)
7523 shift = 31;
7524 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7525 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7526 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7527 } else {
7528 /* pkhbt */
22478e79
AZ
7529 if (shift)
7530 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7531 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7532 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7533 }
7534 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7535 tcg_temp_free_i32(tmp2);
3670669c 7536 store_reg(s, rd, tmp);
9ee6e8bb
PB
7537 } else if ((insn & 0x00200020) == 0x00200000) {
7538 /* [us]sat */
6ddbc6e4 7539 tmp = load_reg(s, rm);
9ee6e8bb
PB
7540 shift = (insn >> 7) & 0x1f;
7541 if (insn & (1 << 6)) {
7542 if (shift == 0)
7543 shift = 31;
6ddbc6e4 7544 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7545 } else {
6ddbc6e4 7546 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7547 }
7548 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7549 tmp2 = tcg_const_i32(sh);
7550 if (insn & (1 << 22))
9ef39277 7551 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7552 else
9ef39277 7553 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7554 tcg_temp_free_i32(tmp2);
6ddbc6e4 7555 store_reg(s, rd, tmp);
9ee6e8bb
PB
7556 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7557 /* [us]sat16 */
6ddbc6e4 7558 tmp = load_reg(s, rm);
9ee6e8bb 7559 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7560 tmp2 = tcg_const_i32(sh);
7561 if (insn & (1 << 22))
9ef39277 7562 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7563 else
9ef39277 7564 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7565 tcg_temp_free_i32(tmp2);
6ddbc6e4 7566 store_reg(s, rd, tmp);
9ee6e8bb
PB
7567 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7568 /* Select bytes. */
6ddbc6e4
PB
7569 tmp = load_reg(s, rn);
7570 tmp2 = load_reg(s, rm);
7d1b0095 7571 tmp3 = tcg_temp_new_i32();
0ecb72a5 7572 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7573 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7574 tcg_temp_free_i32(tmp3);
7575 tcg_temp_free_i32(tmp2);
6ddbc6e4 7576 store_reg(s, rd, tmp);
9ee6e8bb 7577 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7578 tmp = load_reg(s, rm);
9ee6e8bb 7579 shift = (insn >> 10) & 3;
1301f322 7580 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7581 rotate, a shift is sufficient. */
7582 if (shift != 0)
f669df27 7583 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7584 op1 = (insn >> 20) & 7;
7585 switch (op1) {
5e3f878a
PB
7586 case 0: gen_sxtb16(tmp); break;
7587 case 2: gen_sxtb(tmp); break;
7588 case 3: gen_sxth(tmp); break;
7589 case 4: gen_uxtb16(tmp); break;
7590 case 6: gen_uxtb(tmp); break;
7591 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7592 default: goto illegal_op;
7593 }
7594 if (rn != 15) {
5e3f878a 7595 tmp2 = load_reg(s, rn);
9ee6e8bb 7596 if ((op1 & 3) == 0) {
5e3f878a 7597 gen_add16(tmp, tmp2);
9ee6e8bb 7598 } else {
5e3f878a 7599 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7600 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7601 }
7602 }
6c95676b 7603 store_reg(s, rd, tmp);
9ee6e8bb
PB
7604 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7605 /* rev */
b0109805 7606 tmp = load_reg(s, rm);
9ee6e8bb
PB
7607 if (insn & (1 << 22)) {
7608 if (insn & (1 << 7)) {
b0109805 7609 gen_revsh(tmp);
9ee6e8bb
PB
7610 } else {
7611 ARCH(6T2);
b0109805 7612 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7613 }
7614 } else {
7615 if (insn & (1 << 7))
b0109805 7616 gen_rev16(tmp);
9ee6e8bb 7617 else
66896cb8 7618 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7619 }
b0109805 7620 store_reg(s, rd, tmp);
9ee6e8bb
PB
7621 } else {
7622 goto illegal_op;
7623 }
7624 break;
7625 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7626 switch ((insn >> 20) & 0x7) {
7627 case 5:
7628 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7629 /* op2 not 00x or 11x : UNDEF */
7630 goto illegal_op;
7631 }
838fa72d
AJ
7632 /* Signed multiply most significant [accumulate].
7633 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7634 tmp = load_reg(s, rm);
7635 tmp2 = load_reg(s, rs);
a7812ae4 7636 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7637
955a7dd5 7638 if (rd != 15) {
838fa72d 7639 tmp = load_reg(s, rd);
9ee6e8bb 7640 if (insn & (1 << 6)) {
838fa72d 7641 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7642 } else {
838fa72d 7643 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7644 }
7645 }
838fa72d
AJ
7646 if (insn & (1 << 5)) {
7647 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7648 }
7649 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7650 tmp = tcg_temp_new_i32();
838fa72d
AJ
7651 tcg_gen_trunc_i64_i32(tmp, tmp64);
7652 tcg_temp_free_i64(tmp64);
955a7dd5 7653 store_reg(s, rn, tmp);
41e9564d
PM
7654 break;
7655 case 0:
7656 case 4:
7657 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7658 if (insn & (1 << 7)) {
7659 goto illegal_op;
7660 }
7661 tmp = load_reg(s, rm);
7662 tmp2 = load_reg(s, rs);
9ee6e8bb 7663 if (insn & (1 << 5))
5e3f878a
PB
7664 gen_swap_half(tmp2);
7665 gen_smul_dual(tmp, tmp2);
5e3f878a 7666 if (insn & (1 << 6)) {
e1d177b9 7667 /* This subtraction cannot overflow. */
5e3f878a
PB
7668 tcg_gen_sub_i32(tmp, tmp, tmp2);
7669 } else {
e1d177b9
PM
7670 /* This addition cannot overflow 32 bits;
7671 * however it may overflow considered as a signed
7672 * operation, in which case we must set the Q flag.
7673 */
9ef39277 7674 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7675 }
7d1b0095 7676 tcg_temp_free_i32(tmp2);
9ee6e8bb 7677 if (insn & (1 << 22)) {
5e3f878a 7678 /* smlald, smlsld */
a7812ae4
PB
7679 tmp64 = tcg_temp_new_i64();
7680 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7681 tcg_temp_free_i32(tmp);
a7812ae4
PB
7682 gen_addq(s, tmp64, rd, rn);
7683 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7684 tcg_temp_free_i64(tmp64);
9ee6e8bb 7685 } else {
5e3f878a 7686 /* smuad, smusd, smlad, smlsd */
22478e79 7687 if (rd != 15)
9ee6e8bb 7688 {
22478e79 7689 tmp2 = load_reg(s, rd);
9ef39277 7690 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7691 tcg_temp_free_i32(tmp2);
9ee6e8bb 7692 }
22478e79 7693 store_reg(s, rn, tmp);
9ee6e8bb 7694 }
41e9564d 7695 break;
b8b8ea05
PM
7696 case 1:
7697 case 3:
7698 /* SDIV, UDIV */
7699 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7700 goto illegal_op;
7701 }
7702 if (((insn >> 5) & 7) || (rd != 15)) {
7703 goto illegal_op;
7704 }
7705 tmp = load_reg(s, rm);
7706 tmp2 = load_reg(s, rs);
7707 if (insn & (1 << 21)) {
7708 gen_helper_udiv(tmp, tmp, tmp2);
7709 } else {
7710 gen_helper_sdiv(tmp, tmp, tmp2);
7711 }
7712 tcg_temp_free_i32(tmp2);
7713 store_reg(s, rn, tmp);
7714 break;
41e9564d
PM
7715 default:
7716 goto illegal_op;
9ee6e8bb
PB
7717 }
7718 break;
7719 case 3:
7720 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7721 switch (op1) {
7722 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7723 ARCH(6);
7724 tmp = load_reg(s, rm);
7725 tmp2 = load_reg(s, rs);
7726 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7727 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7728 if (rd != 15) {
7729 tmp2 = load_reg(s, rd);
6ddbc6e4 7730 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7731 tcg_temp_free_i32(tmp2);
9ee6e8bb 7732 }
ded9d295 7733 store_reg(s, rn, tmp);
9ee6e8bb
PB
7734 break;
7735 case 0x20: case 0x24: case 0x28: case 0x2c:
7736 /* Bitfield insert/clear. */
7737 ARCH(6T2);
7738 shift = (insn >> 7) & 0x1f;
7739 i = (insn >> 16) & 0x1f;
7740 i = i + 1 - shift;
7741 if (rm == 15) {
7d1b0095 7742 tmp = tcg_temp_new_i32();
5e3f878a 7743 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7744 } else {
5e3f878a 7745 tmp = load_reg(s, rm);
9ee6e8bb
PB
7746 }
7747 if (i != 32) {
5e3f878a 7748 tmp2 = load_reg(s, rd);
d593c48e 7749 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7750 tcg_temp_free_i32(tmp2);
9ee6e8bb 7751 }
5e3f878a 7752 store_reg(s, rd, tmp);
9ee6e8bb
PB
7753 break;
7754 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7755 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7756 ARCH(6T2);
5e3f878a 7757 tmp = load_reg(s, rm);
9ee6e8bb
PB
7758 shift = (insn >> 7) & 0x1f;
7759 i = ((insn >> 16) & 0x1f) + 1;
7760 if (shift + i > 32)
7761 goto illegal_op;
7762 if (i < 32) {
7763 if (op1 & 0x20) {
5e3f878a 7764 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7765 } else {
5e3f878a 7766 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7767 }
7768 }
5e3f878a 7769 store_reg(s, rd, tmp);
9ee6e8bb
PB
7770 break;
7771 default:
7772 goto illegal_op;
7773 }
7774 break;
7775 }
7776 break;
7777 }
7778 do_ldst:
7779 /* Check for undefined extension instructions
7780 * per the ARM Bible IE:
7781 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7782 */
7783 sh = (0xf << 20) | (0xf << 4);
7784 if (op1 == 0x7 && ((insn & sh) == sh))
7785 {
7786 goto illegal_op;
7787 }
7788 /* load/store byte/word */
7789 rn = (insn >> 16) & 0xf;
7790 rd = (insn >> 12) & 0xf;
b0109805 7791 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7792 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7793 if (insn & (1 << 24))
b0109805 7794 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7795 if (insn & (1 << 20)) {
7796 /* load */
5a839c0d 7797 tmp = tcg_temp_new_i32();
9ee6e8bb 7798 if (insn & (1 << 22)) {
5a839c0d 7799 tcg_gen_qemu_ld8u(tmp, tmp2, i);
9ee6e8bb 7800 } else {
5a839c0d 7801 tcg_gen_qemu_ld32u(tmp, tmp2, i);
9ee6e8bb 7802 }
9ee6e8bb
PB
7803 } else {
7804 /* store */
b0109805 7805 tmp = load_reg(s, rd);
5a839c0d
PM
7806 if (insn & (1 << 22)) {
7807 tcg_gen_qemu_st8(tmp, tmp2, i);
7808 } else {
7809 tcg_gen_qemu_st32(tmp, tmp2, i);
7810 }
7811 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7812 }
7813 if (!(insn & (1 << 24))) {
b0109805
PB
7814 gen_add_data_offset(s, insn, tmp2);
7815 store_reg(s, rn, tmp2);
7816 } else if (insn & (1 << 21)) {
7817 store_reg(s, rn, tmp2);
7818 } else {
7d1b0095 7819 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7820 }
7821 if (insn & (1 << 20)) {
7822 /* Complete the load. */
be5e7a76 7823 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7824 }
7825 break;
7826 case 0x08:
7827 case 0x09:
7828 {
7829 int j, n, user, loaded_base;
39d5492a 7830 TCGv_i32 loaded_var;
9ee6e8bb
PB
7831 /* load/store multiple words */
7832 /* XXX: store correct base if write back */
7833 user = 0;
7834 if (insn & (1 << 22)) {
7835 if (IS_USER(s))
7836 goto illegal_op; /* only usable in supervisor mode */
7837
7838 if ((insn & (1 << 15)) == 0)
7839 user = 1;
7840 }
7841 rn = (insn >> 16) & 0xf;
b0109805 7842 addr = load_reg(s, rn);
9ee6e8bb
PB
7843
7844 /* compute total size */
7845 loaded_base = 0;
39d5492a 7846 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7847 n = 0;
7848 for(i=0;i<16;i++) {
7849 if (insn & (1 << i))
7850 n++;
7851 }
7852 /* XXX: test invalid n == 0 case ? */
7853 if (insn & (1 << 23)) {
7854 if (insn & (1 << 24)) {
7855 /* pre increment */
b0109805 7856 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7857 } else {
7858 /* post increment */
7859 }
7860 } else {
7861 if (insn & (1 << 24)) {
7862 /* pre decrement */
b0109805 7863 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7864 } else {
7865 /* post decrement */
7866 if (n != 1)
b0109805 7867 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7868 }
7869 }
7870 j = 0;
7871 for(i=0;i<16;i++) {
7872 if (insn & (1 << i)) {
7873 if (insn & (1 << 20)) {
7874 /* load */
5a839c0d
PM
7875 tmp = tcg_temp_new_i32();
7876 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7877 if (user) {
b75263d6 7878 tmp2 = tcg_const_i32(i);
1ce94f81 7879 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7880 tcg_temp_free_i32(tmp2);
7d1b0095 7881 tcg_temp_free_i32(tmp);
9ee6e8bb 7882 } else if (i == rn) {
b0109805 7883 loaded_var = tmp;
9ee6e8bb
PB
7884 loaded_base = 1;
7885 } else {
be5e7a76 7886 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7887 }
7888 } else {
7889 /* store */
7890 if (i == 15) {
7891 /* special case: r15 = PC + 8 */
7892 val = (long)s->pc + 4;
7d1b0095 7893 tmp = tcg_temp_new_i32();
b0109805 7894 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7895 } else if (user) {
7d1b0095 7896 tmp = tcg_temp_new_i32();
b75263d6 7897 tmp2 = tcg_const_i32(i);
9ef39277 7898 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7899 tcg_temp_free_i32(tmp2);
9ee6e8bb 7900 } else {
b0109805 7901 tmp = load_reg(s, i);
9ee6e8bb 7902 }
5a839c0d
PM
7903 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7904 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7905 }
7906 j++;
7907 /* no need to add after the last transfer */
7908 if (j != n)
b0109805 7909 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7910 }
7911 }
7912 if (insn & (1 << 21)) {
7913 /* write back */
7914 if (insn & (1 << 23)) {
7915 if (insn & (1 << 24)) {
7916 /* pre increment */
7917 } else {
7918 /* post increment */
b0109805 7919 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7920 }
7921 } else {
7922 if (insn & (1 << 24)) {
7923 /* pre decrement */
7924 if (n != 1)
b0109805 7925 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7926 } else {
7927 /* post decrement */
b0109805 7928 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7929 }
7930 }
b0109805
PB
7931 store_reg(s, rn, addr);
7932 } else {
7d1b0095 7933 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7934 }
7935 if (loaded_base) {
b0109805 7936 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7937 }
7938 if ((insn & (1 << 22)) && !user) {
7939 /* Restore CPSR from SPSR. */
d9ba4830
PB
7940 tmp = load_cpu_field(spsr);
7941 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7942 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7943 s->is_jmp = DISAS_UPDATE;
7944 }
7945 }
7946 break;
7947 case 0xa:
7948 case 0xb:
7949 {
7950 int32_t offset;
7951
7952 /* branch (and link) */
7953 val = (int32_t)s->pc;
7954 if (insn & (1 << 24)) {
7d1b0095 7955 tmp = tcg_temp_new_i32();
5e3f878a
PB
7956 tcg_gen_movi_i32(tmp, val);
7957 store_reg(s, 14, tmp);
9ee6e8bb
PB
7958 }
7959 offset = (((int32_t)insn << 8) >> 8);
7960 val += (offset << 2) + 4;
7961 gen_jmp(s, val);
7962 }
7963 break;
7964 case 0xc:
7965 case 0xd:
7966 case 0xe:
7967 /* Coprocessor. */
7968 if (disas_coproc_insn(env, s, insn))
7969 goto illegal_op;
7970 break;
7971 case 0xf:
7972 /* swi */
5e3f878a 7973 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7974 s->is_jmp = DISAS_SWI;
7975 break;
7976 default:
7977 illegal_op:
bc4a0de0 7978 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7979 break;
7980 }
7981 }
7982}
7983
7984/* Return true if this is a Thumb-2 logical op. */
7985static int
7986thumb2_logic_op(int op)
7987{
7988 return (op < 8);
7989}
7990
7991/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7992 then set condition code flags based on the result of the operation.
7993 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7994 to the high bit of T1.
7995 Returns zero if the opcode is valid. */
7996
7997static int
39d5492a
PM
7998gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
7999 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8000{
8001 int logic_cc;
8002
8003 logic_cc = 0;
8004 switch (op) {
8005 case 0: /* and */
396e467c 8006 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8007 logic_cc = conds;
8008 break;
8009 case 1: /* bic */
f669df27 8010 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8011 logic_cc = conds;
8012 break;
8013 case 2: /* orr */
396e467c 8014 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8015 logic_cc = conds;
8016 break;
8017 case 3: /* orn */
29501f1b 8018 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8019 logic_cc = conds;
8020 break;
8021 case 4: /* eor */
396e467c 8022 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8023 logic_cc = conds;
8024 break;
8025 case 8: /* add */
8026 if (conds)
72485ec4 8027 gen_add_CC(t0, t0, t1);
9ee6e8bb 8028 else
396e467c 8029 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8030 break;
8031 case 10: /* adc */
8032 if (conds)
49b4c31e 8033 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8034 else
396e467c 8035 gen_adc(t0, t1);
9ee6e8bb
PB
8036 break;
8037 case 11: /* sbc */
2de68a49
RH
8038 if (conds) {
8039 gen_sbc_CC(t0, t0, t1);
8040 } else {
396e467c 8041 gen_sub_carry(t0, t0, t1);
2de68a49 8042 }
9ee6e8bb
PB
8043 break;
8044 case 13: /* sub */
8045 if (conds)
72485ec4 8046 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8047 else
396e467c 8048 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8049 break;
8050 case 14: /* rsb */
8051 if (conds)
72485ec4 8052 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8053 else
396e467c 8054 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8055 break;
8056 default: /* 5, 6, 7, 9, 12, 15. */
8057 return 1;
8058 }
8059 if (logic_cc) {
396e467c 8060 gen_logic_CC(t0);
9ee6e8bb 8061 if (shifter_out)
396e467c 8062 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8063 }
8064 return 0;
8065}
8066
8067/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8068 is not legal. */
0ecb72a5 8069static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8070{
b0109805 8071 uint32_t insn, imm, shift, offset;
9ee6e8bb 8072 uint32_t rd, rn, rm, rs;
39d5492a
PM
8073 TCGv_i32 tmp;
8074 TCGv_i32 tmp2;
8075 TCGv_i32 tmp3;
8076 TCGv_i32 addr;
a7812ae4 8077 TCGv_i64 tmp64;
9ee6e8bb
PB
8078 int op;
8079 int shiftop;
8080 int conds;
8081 int logic_cc;
8082
8083 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8084 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8085 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8086 16-bit instructions to get correct prefetch abort behavior. */
8087 insn = insn_hw1;
8088 if ((insn & (1 << 12)) == 0) {
be5e7a76 8089 ARCH(5);
9ee6e8bb
PB
8090 /* Second half of blx. */
8091 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8092 tmp = load_reg(s, 14);
8093 tcg_gen_addi_i32(tmp, tmp, offset);
8094 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8095
7d1b0095 8096 tmp2 = tcg_temp_new_i32();
b0109805 8097 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8098 store_reg(s, 14, tmp2);
8099 gen_bx(s, tmp);
9ee6e8bb
PB
8100 return 0;
8101 }
8102 if (insn & (1 << 11)) {
8103 /* Second half of bl. */
8104 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8105 tmp = load_reg(s, 14);
6a0d8a1d 8106 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8107
7d1b0095 8108 tmp2 = tcg_temp_new_i32();
b0109805 8109 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8110 store_reg(s, 14, tmp2);
8111 gen_bx(s, tmp);
9ee6e8bb
PB
8112 return 0;
8113 }
8114 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8115 /* Instruction spans a page boundary. Implement it as two
8116 16-bit instructions in case the second half causes an
8117 prefetch abort. */
8118 offset = ((int32_t)insn << 21) >> 9;
396e467c 8119 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8120 return 0;
8121 }
8122 /* Fall through to 32-bit decode. */
8123 }
8124
d31dd73e 8125 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8126 s->pc += 2;
8127 insn |= (uint32_t)insn_hw1 << 16;
8128
8129 if ((insn & 0xf800e800) != 0xf000e800) {
8130 ARCH(6T2);
8131 }
8132
8133 rn = (insn >> 16) & 0xf;
8134 rs = (insn >> 12) & 0xf;
8135 rd = (insn >> 8) & 0xf;
8136 rm = insn & 0xf;
8137 switch ((insn >> 25) & 0xf) {
8138 case 0: case 1: case 2: case 3:
8139 /* 16-bit instructions. Should never happen. */
8140 abort();
8141 case 4:
8142 if (insn & (1 << 22)) {
8143 /* Other load/store, table branch. */
8144 if (insn & 0x01200000) {
8145 /* Load/store doubleword. */
8146 if (rn == 15) {
7d1b0095 8147 addr = tcg_temp_new_i32();
b0109805 8148 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8149 } else {
b0109805 8150 addr = load_reg(s, rn);
9ee6e8bb
PB
8151 }
8152 offset = (insn & 0xff) * 4;
8153 if ((insn & (1 << 23)) == 0)
8154 offset = -offset;
8155 if (insn & (1 << 24)) {
b0109805 8156 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8157 offset = 0;
8158 }
8159 if (insn & (1 << 20)) {
8160 /* ldrd */
e2592fad
PM
8161 tmp = tcg_temp_new_i32();
8162 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8163 store_reg(s, rs, tmp);
8164 tcg_gen_addi_i32(addr, addr, 4);
e2592fad
PM
8165 tmp = tcg_temp_new_i32();
8166 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 8167 store_reg(s, rd, tmp);
9ee6e8bb
PB
8168 } else {
8169 /* strd */
b0109805 8170 tmp = load_reg(s, rs);
e2592fad
PM
8171 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8172 tcg_temp_free_i32(tmp);
b0109805
PB
8173 tcg_gen_addi_i32(addr, addr, 4);
8174 tmp = load_reg(s, rd);
e2592fad
PM
8175 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8176 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8177 }
8178 if (insn & (1 << 21)) {
8179 /* Base writeback. */
8180 if (rn == 15)
8181 goto illegal_op;
b0109805
PB
8182 tcg_gen_addi_i32(addr, addr, offset - 4);
8183 store_reg(s, rn, addr);
8184 } else {
7d1b0095 8185 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8186 }
8187 } else if ((insn & (1 << 23)) == 0) {
8188 /* Load/store exclusive word. */
39d5492a 8189 addr = tcg_temp_local_new_i32();
98a46317 8190 load_reg_var(s, addr, rn);
426f5abc 8191 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8192 if (insn & (1 << 20)) {
426f5abc 8193 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8194 } else {
426f5abc 8195 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8196 }
39d5492a 8197 tcg_temp_free_i32(addr);
2359bf80 8198 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8199 /* Table Branch. */
8200 if (rn == 15) {
7d1b0095 8201 addr = tcg_temp_new_i32();
b0109805 8202 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8203 } else {
b0109805 8204 addr = load_reg(s, rn);
9ee6e8bb 8205 }
b26eefb6 8206 tmp = load_reg(s, rm);
b0109805 8207 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8208 if (insn & (1 << 4)) {
8209 /* tbh */
b0109805 8210 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8211 tcg_temp_free_i32(tmp);
e2592fad
PM
8212 tmp = tcg_temp_new_i32();
8213 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8214 } else { /* tbb */
7d1b0095 8215 tcg_temp_free_i32(tmp);
e2592fad
PM
8216 tmp = tcg_temp_new_i32();
8217 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8218 }
7d1b0095 8219 tcg_temp_free_i32(addr);
b0109805
PB
8220 tcg_gen_shli_i32(tmp, tmp, 1);
8221 tcg_gen_addi_i32(tmp, tmp, s->pc);
8222 store_reg(s, 15, tmp);
9ee6e8bb 8223 } else {
2359bf80 8224 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8225 op = (insn >> 4) & 0x3;
2359bf80
MR
8226 switch (op2) {
8227 case 0:
426f5abc 8228 goto illegal_op;
2359bf80
MR
8229 case 1:
8230 /* Load/store exclusive byte/halfword/doubleword */
8231 if (op == 2) {
8232 goto illegal_op;
8233 }
8234 ARCH(7);
8235 break;
8236 case 2:
8237 /* Load-acquire/store-release */
8238 if (op == 3) {
8239 goto illegal_op;
8240 }
8241 /* Fall through */
8242 case 3:
8243 /* Load-acquire/store-release exclusive */
8244 ARCH(8);
8245 break;
426f5abc 8246 }
39d5492a 8247 addr = tcg_temp_local_new_i32();
98a46317 8248 load_reg_var(s, addr, rn);
2359bf80
MR
8249 if (!(op2 & 1)) {
8250 if (insn & (1 << 20)) {
8251 tmp = tcg_temp_new_i32();
8252 switch (op) {
8253 case 0: /* ldab */
8254 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8255 break;
8256 case 1: /* ldah */
8257 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8258 break;
8259 case 2: /* lda */
8260 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8261 break;
8262 default:
8263 abort();
8264 }
8265 store_reg(s, rs, tmp);
8266 } else {
8267 tmp = load_reg(s, rs);
8268 switch (op) {
8269 case 0: /* stlb */
8270 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
8271 break;
8272 case 1: /* stlh */
8273 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
8274 break;
8275 case 2: /* stl */
8276 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8277 break;
8278 default:
8279 abort();
8280 }
8281 tcg_temp_free_i32(tmp);
8282 }
8283 } else if (insn & (1 << 20)) {
426f5abc 8284 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8285 } else {
426f5abc 8286 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8287 }
39d5492a 8288 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8289 }
8290 } else {
8291 /* Load/store multiple, RFE, SRS. */
8292 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8293 /* RFE, SRS: not available in user mode or on M profile */
8294 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8295 goto illegal_op;
00115976 8296 }
9ee6e8bb
PB
8297 if (insn & (1 << 20)) {
8298 /* rfe */
b0109805
PB
8299 addr = load_reg(s, rn);
8300 if ((insn & (1 << 24)) == 0)
8301 tcg_gen_addi_i32(addr, addr, -8);
8302 /* Load PC into tmp and CPSR into tmp2. */
e2592fad
PM
8303 tmp = tcg_temp_new_i32();
8304 tcg_gen_qemu_ld32u(tmp, addr, 0);
b0109805 8305 tcg_gen_addi_i32(addr, addr, 4);
e2592fad
PM
8306 tmp2 = tcg_temp_new_i32();
8307 tcg_gen_qemu_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8308 if (insn & (1 << 21)) {
8309 /* Base writeback. */
b0109805
PB
8310 if (insn & (1 << 24)) {
8311 tcg_gen_addi_i32(addr, addr, 4);
8312 } else {
8313 tcg_gen_addi_i32(addr, addr, -4);
8314 }
8315 store_reg(s, rn, addr);
8316 } else {
7d1b0095 8317 tcg_temp_free_i32(addr);
9ee6e8bb 8318 }
b0109805 8319 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8320 } else {
8321 /* srs */
81465888
PM
8322 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8323 insn & (1 << 21));
9ee6e8bb
PB
8324 }
8325 } else {
5856d44e 8326 int i, loaded_base = 0;
39d5492a 8327 TCGv_i32 loaded_var;
9ee6e8bb 8328 /* Load/store multiple. */
b0109805 8329 addr = load_reg(s, rn);
9ee6e8bb
PB
8330 offset = 0;
8331 for (i = 0; i < 16; i++) {
8332 if (insn & (1 << i))
8333 offset += 4;
8334 }
8335 if (insn & (1 << 24)) {
b0109805 8336 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8337 }
8338
39d5492a 8339 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8340 for (i = 0; i < 16; i++) {
8341 if ((insn & (1 << i)) == 0)
8342 continue;
8343 if (insn & (1 << 20)) {
8344 /* Load. */
e2592fad
PM
8345 tmp = tcg_temp_new_i32();
8346 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8347 if (i == 15) {
b0109805 8348 gen_bx(s, tmp);
5856d44e
YO
8349 } else if (i == rn) {
8350 loaded_var = tmp;
8351 loaded_base = 1;
9ee6e8bb 8352 } else {
b0109805 8353 store_reg(s, i, tmp);
9ee6e8bb
PB
8354 }
8355 } else {
8356 /* Store. */
b0109805 8357 tmp = load_reg(s, i);
e2592fad
PM
8358 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
8359 tcg_temp_free_i32(tmp);
9ee6e8bb 8360 }
b0109805 8361 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8362 }
5856d44e
YO
8363 if (loaded_base) {
8364 store_reg(s, rn, loaded_var);
8365 }
9ee6e8bb
PB
8366 if (insn & (1 << 21)) {
8367 /* Base register writeback. */
8368 if (insn & (1 << 24)) {
b0109805 8369 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8370 }
8371 /* Fault if writeback register is in register list. */
8372 if (insn & (1 << rn))
8373 goto illegal_op;
b0109805
PB
8374 store_reg(s, rn, addr);
8375 } else {
7d1b0095 8376 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8377 }
8378 }
8379 }
8380 break;
2af9ab77
JB
8381 case 5:
8382
9ee6e8bb 8383 op = (insn >> 21) & 0xf;
2af9ab77
JB
8384 if (op == 6) {
8385 /* Halfword pack. */
8386 tmp = load_reg(s, rn);
8387 tmp2 = load_reg(s, rm);
8388 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8389 if (insn & (1 << 5)) {
8390 /* pkhtb */
8391 if (shift == 0)
8392 shift = 31;
8393 tcg_gen_sari_i32(tmp2, tmp2, shift);
8394 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8395 tcg_gen_ext16u_i32(tmp2, tmp2);
8396 } else {
8397 /* pkhbt */
8398 if (shift)
8399 tcg_gen_shli_i32(tmp2, tmp2, shift);
8400 tcg_gen_ext16u_i32(tmp, tmp);
8401 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8402 }
8403 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8404 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8405 store_reg(s, rd, tmp);
8406 } else {
2af9ab77
JB
8407 /* Data processing register constant shift. */
8408 if (rn == 15) {
7d1b0095 8409 tmp = tcg_temp_new_i32();
2af9ab77
JB
8410 tcg_gen_movi_i32(tmp, 0);
8411 } else {
8412 tmp = load_reg(s, rn);
8413 }
8414 tmp2 = load_reg(s, rm);
8415
8416 shiftop = (insn >> 4) & 3;
8417 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8418 conds = (insn & (1 << 20)) != 0;
8419 logic_cc = (conds && thumb2_logic_op(op));
8420 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8421 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8422 goto illegal_op;
7d1b0095 8423 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8424 if (rd != 15) {
8425 store_reg(s, rd, tmp);
8426 } else {
7d1b0095 8427 tcg_temp_free_i32(tmp);
2af9ab77 8428 }
3174f8e9 8429 }
9ee6e8bb
PB
8430 break;
8431 case 13: /* Misc data processing. */
8432 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8433 if (op < 4 && (insn & 0xf000) != 0xf000)
8434 goto illegal_op;
8435 switch (op) {
8436 case 0: /* Register controlled shift. */
8984bd2e
PB
8437 tmp = load_reg(s, rn);
8438 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8439 if ((insn & 0x70) != 0)
8440 goto illegal_op;
8441 op = (insn >> 21) & 3;
8984bd2e
PB
8442 logic_cc = (insn & (1 << 20)) != 0;
8443 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8444 if (logic_cc)
8445 gen_logic_CC(tmp);
21aeb343 8446 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8447 break;
8448 case 1: /* Sign/zero extend. */
5e3f878a 8449 tmp = load_reg(s, rm);
9ee6e8bb 8450 shift = (insn >> 4) & 3;
1301f322 8451 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8452 rotate, a shift is sufficient. */
8453 if (shift != 0)
f669df27 8454 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8455 op = (insn >> 20) & 7;
8456 switch (op) {
5e3f878a
PB
8457 case 0: gen_sxth(tmp); break;
8458 case 1: gen_uxth(tmp); break;
8459 case 2: gen_sxtb16(tmp); break;
8460 case 3: gen_uxtb16(tmp); break;
8461 case 4: gen_sxtb(tmp); break;
8462 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8463 default: goto illegal_op;
8464 }
8465 if (rn != 15) {
5e3f878a 8466 tmp2 = load_reg(s, rn);
9ee6e8bb 8467 if ((op >> 1) == 1) {
5e3f878a 8468 gen_add16(tmp, tmp2);
9ee6e8bb 8469 } else {
5e3f878a 8470 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8471 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8472 }
8473 }
5e3f878a 8474 store_reg(s, rd, tmp);
9ee6e8bb
PB
8475 break;
8476 case 2: /* SIMD add/subtract. */
8477 op = (insn >> 20) & 7;
8478 shift = (insn >> 4) & 7;
8479 if ((op & 3) == 3 || (shift & 3) == 3)
8480 goto illegal_op;
6ddbc6e4
PB
8481 tmp = load_reg(s, rn);
8482 tmp2 = load_reg(s, rm);
8483 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8484 tcg_temp_free_i32(tmp2);
6ddbc6e4 8485 store_reg(s, rd, tmp);
9ee6e8bb
PB
8486 break;
8487 case 3: /* Other data processing. */
8488 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8489 if (op < 4) {
8490 /* Saturating add/subtract. */
d9ba4830
PB
8491 tmp = load_reg(s, rn);
8492 tmp2 = load_reg(s, rm);
9ee6e8bb 8493 if (op & 1)
9ef39277 8494 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8495 if (op & 2)
9ef39277 8496 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8497 else
9ef39277 8498 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8499 tcg_temp_free_i32(tmp2);
9ee6e8bb 8500 } else {
d9ba4830 8501 tmp = load_reg(s, rn);
9ee6e8bb
PB
8502 switch (op) {
8503 case 0x0a: /* rbit */
d9ba4830 8504 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8505 break;
8506 case 0x08: /* rev */
66896cb8 8507 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8508 break;
8509 case 0x09: /* rev16 */
d9ba4830 8510 gen_rev16(tmp);
9ee6e8bb
PB
8511 break;
8512 case 0x0b: /* revsh */
d9ba4830 8513 gen_revsh(tmp);
9ee6e8bb
PB
8514 break;
8515 case 0x10: /* sel */
d9ba4830 8516 tmp2 = load_reg(s, rm);
7d1b0095 8517 tmp3 = tcg_temp_new_i32();
0ecb72a5 8518 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8519 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8520 tcg_temp_free_i32(tmp3);
8521 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8522 break;
8523 case 0x18: /* clz */
d9ba4830 8524 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8525 break;
8526 default:
8527 goto illegal_op;
8528 }
8529 }
d9ba4830 8530 store_reg(s, rd, tmp);
9ee6e8bb
PB
8531 break;
8532 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8533 op = (insn >> 4) & 0xf;
d9ba4830
PB
8534 tmp = load_reg(s, rn);
8535 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8536 switch ((insn >> 20) & 7) {
8537 case 0: /* 32 x 32 -> 32 */
d9ba4830 8538 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8539 tcg_temp_free_i32(tmp2);
9ee6e8bb 8540 if (rs != 15) {
d9ba4830 8541 tmp2 = load_reg(s, rs);
9ee6e8bb 8542 if (op)
d9ba4830 8543 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8544 else
d9ba4830 8545 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8546 tcg_temp_free_i32(tmp2);
9ee6e8bb 8547 }
9ee6e8bb
PB
8548 break;
8549 case 1: /* 16 x 16 -> 32 */
d9ba4830 8550 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8551 tcg_temp_free_i32(tmp2);
9ee6e8bb 8552 if (rs != 15) {
d9ba4830 8553 tmp2 = load_reg(s, rs);
9ef39277 8554 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8555 tcg_temp_free_i32(tmp2);
9ee6e8bb 8556 }
9ee6e8bb
PB
8557 break;
8558 case 2: /* Dual multiply add. */
8559 case 4: /* Dual multiply subtract. */
8560 if (op)
d9ba4830
PB
8561 gen_swap_half(tmp2);
8562 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8563 if (insn & (1 << 22)) {
e1d177b9 8564 /* This subtraction cannot overflow. */
d9ba4830 8565 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8566 } else {
e1d177b9
PM
8567 /* This addition cannot overflow 32 bits;
8568 * however it may overflow considered as a signed
8569 * operation, in which case we must set the Q flag.
8570 */
9ef39277 8571 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8572 }
7d1b0095 8573 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8574 if (rs != 15)
8575 {
d9ba4830 8576 tmp2 = load_reg(s, rs);
9ef39277 8577 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8578 tcg_temp_free_i32(tmp2);
9ee6e8bb 8579 }
9ee6e8bb
PB
8580 break;
8581 case 3: /* 32 * 16 -> 32msb */
8582 if (op)
d9ba4830 8583 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8584 else
d9ba4830 8585 gen_sxth(tmp2);
a7812ae4
PB
8586 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8587 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8588 tmp = tcg_temp_new_i32();
a7812ae4 8589 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8590 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8591 if (rs != 15)
8592 {
d9ba4830 8593 tmp2 = load_reg(s, rs);
9ef39277 8594 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8595 tcg_temp_free_i32(tmp2);
9ee6e8bb 8596 }
9ee6e8bb 8597 break;
838fa72d
AJ
8598 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8599 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8600 if (rs != 15) {
838fa72d
AJ
8601 tmp = load_reg(s, rs);
8602 if (insn & (1 << 20)) {
8603 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8604 } else {
838fa72d 8605 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8606 }
2c0262af 8607 }
838fa72d
AJ
8608 if (insn & (1 << 4)) {
8609 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8610 }
8611 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8612 tmp = tcg_temp_new_i32();
838fa72d
AJ
8613 tcg_gen_trunc_i64_i32(tmp, tmp64);
8614 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8615 break;
8616 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8617 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8618 tcg_temp_free_i32(tmp2);
9ee6e8bb 8619 if (rs != 15) {
d9ba4830
PB
8620 tmp2 = load_reg(s, rs);
8621 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8622 tcg_temp_free_i32(tmp2);
5fd46862 8623 }
9ee6e8bb 8624 break;
2c0262af 8625 }
d9ba4830 8626 store_reg(s, rd, tmp);
2c0262af 8627 break;
9ee6e8bb
PB
8628 case 6: case 7: /* 64-bit multiply, Divide. */
8629 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8630 tmp = load_reg(s, rn);
8631 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8632 if ((op & 0x50) == 0x10) {
8633 /* sdiv, udiv */
47789990 8634 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8635 goto illegal_op;
47789990 8636 }
9ee6e8bb 8637 if (op & 0x20)
5e3f878a 8638 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8639 else
5e3f878a 8640 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8641 tcg_temp_free_i32(tmp2);
5e3f878a 8642 store_reg(s, rd, tmp);
9ee6e8bb
PB
8643 } else if ((op & 0xe) == 0xc) {
8644 /* Dual multiply accumulate long. */
8645 if (op & 1)
5e3f878a
PB
8646 gen_swap_half(tmp2);
8647 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8648 if (op & 0x10) {
5e3f878a 8649 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8650 } else {
5e3f878a 8651 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8652 }
7d1b0095 8653 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8654 /* BUGFIX */
8655 tmp64 = tcg_temp_new_i64();
8656 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8657 tcg_temp_free_i32(tmp);
a7812ae4
PB
8658 gen_addq(s, tmp64, rs, rd);
8659 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8660 tcg_temp_free_i64(tmp64);
2c0262af 8661 } else {
9ee6e8bb
PB
8662 if (op & 0x20) {
8663 /* Unsigned 64-bit multiply */
a7812ae4 8664 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8665 } else {
9ee6e8bb
PB
8666 if (op & 8) {
8667 /* smlalxy */
5e3f878a 8668 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8669 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8670 tmp64 = tcg_temp_new_i64();
8671 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8672 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8673 } else {
8674 /* Signed 64-bit multiply */
a7812ae4 8675 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8676 }
b5ff1b31 8677 }
9ee6e8bb
PB
8678 if (op & 4) {
8679 /* umaal */
a7812ae4
PB
8680 gen_addq_lo(s, tmp64, rs);
8681 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8682 } else if (op & 0x40) {
8683 /* 64-bit accumulate. */
a7812ae4 8684 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8685 }
a7812ae4 8686 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8687 tcg_temp_free_i64(tmp64);
5fd46862 8688 }
2c0262af 8689 break;
9ee6e8bb
PB
8690 }
8691 break;
8692 case 6: case 7: case 14: case 15:
8693 /* Coprocessor. */
8694 if (((insn >> 24) & 3) == 3) {
8695 /* Translate into the equivalent ARM encoding. */
f06053e3 8696 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8697 if (disas_neon_data_insn(env, s, insn))
8698 goto illegal_op;
8699 } else {
8700 if (insn & (1 << 28))
8701 goto illegal_op;
8702 if (disas_coproc_insn (env, s, insn))
8703 goto illegal_op;
8704 }
8705 break;
8706 case 8: case 9: case 10: case 11:
8707 if (insn & (1 << 15)) {
8708 /* Branches, misc control. */
8709 if (insn & 0x5000) {
8710 /* Unconditional branch. */
8711 /* signextend(hw1[10:0]) -> offset[:12]. */
8712 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8713 /* hw1[10:0] -> offset[11:1]. */
8714 offset |= (insn & 0x7ff) << 1;
8715 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8716 offset[24:22] already have the same value because of the
8717 sign extension above. */
8718 offset ^= ((~insn) & (1 << 13)) << 10;
8719 offset ^= ((~insn) & (1 << 11)) << 11;
8720
9ee6e8bb
PB
8721 if (insn & (1 << 14)) {
8722 /* Branch and link. */
3174f8e9 8723 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8724 }
3b46e624 8725
b0109805 8726 offset += s->pc;
9ee6e8bb
PB
8727 if (insn & (1 << 12)) {
8728 /* b/bl */
b0109805 8729 gen_jmp(s, offset);
9ee6e8bb
PB
8730 } else {
8731 /* blx */
b0109805 8732 offset &= ~(uint32_t)2;
be5e7a76 8733 /* thumb2 bx, no need to check */
b0109805 8734 gen_bx_im(s, offset);
2c0262af 8735 }
9ee6e8bb
PB
8736 } else if (((insn >> 23) & 7) == 7) {
8737 /* Misc control */
8738 if (insn & (1 << 13))
8739 goto illegal_op;
8740
8741 if (insn & (1 << 26)) {
8742 /* Secure monitor call (v6Z) */
8743 goto illegal_op; /* not implemented. */
2c0262af 8744 } else {
9ee6e8bb
PB
8745 op = (insn >> 20) & 7;
8746 switch (op) {
8747 case 0: /* msr cpsr. */
8748 if (IS_M(env)) {
8984bd2e
PB
8749 tmp = load_reg(s, rn);
8750 addr = tcg_const_i32(insn & 0xff);
8751 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8752 tcg_temp_free_i32(addr);
7d1b0095 8753 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8754 gen_lookup_tb(s);
8755 break;
8756 }
8757 /* fall through */
8758 case 1: /* msr spsr. */
8759 if (IS_M(env))
8760 goto illegal_op;
2fbac54b
FN
8761 tmp = load_reg(s, rn);
8762 if (gen_set_psr(s,
9ee6e8bb 8763 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8764 op == 1, tmp))
9ee6e8bb
PB
8765 goto illegal_op;
8766 break;
8767 case 2: /* cps, nop-hint. */
8768 if (((insn >> 8) & 7) == 0) {
8769 gen_nop_hint(s, insn & 0xff);
8770 }
8771 /* Implemented as NOP in user mode. */
8772 if (IS_USER(s))
8773 break;
8774 offset = 0;
8775 imm = 0;
8776 if (insn & (1 << 10)) {
8777 if (insn & (1 << 7))
8778 offset |= CPSR_A;
8779 if (insn & (1 << 6))
8780 offset |= CPSR_I;
8781 if (insn & (1 << 5))
8782 offset |= CPSR_F;
8783 if (insn & (1 << 9))
8784 imm = CPSR_A | CPSR_I | CPSR_F;
8785 }
8786 if (insn & (1 << 8)) {
8787 offset |= 0x1f;
8788 imm |= (insn & 0x1f);
8789 }
8790 if (offset) {
2fbac54b 8791 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8792 }
8793 break;
8794 case 3: /* Special control operations. */
426f5abc 8795 ARCH(7);
9ee6e8bb
PB
8796 op = (insn >> 4) & 0xf;
8797 switch (op) {
8798 case 2: /* clrex */
426f5abc 8799 gen_clrex(s);
9ee6e8bb
PB
8800 break;
8801 case 4: /* dsb */
8802 case 5: /* dmb */
8803 case 6: /* isb */
8804 /* These execute as NOPs. */
9ee6e8bb
PB
8805 break;
8806 default:
8807 goto illegal_op;
8808 }
8809 break;
8810 case 4: /* bxj */
8811 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8812 tmp = load_reg(s, rn);
8813 gen_bx(s, tmp);
9ee6e8bb
PB
8814 break;
8815 case 5: /* Exception return. */
b8b45b68
RV
8816 if (IS_USER(s)) {
8817 goto illegal_op;
8818 }
8819 if (rn != 14 || rd != 15) {
8820 goto illegal_op;
8821 }
8822 tmp = load_reg(s, rn);
8823 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8824 gen_exception_return(s, tmp);
8825 break;
9ee6e8bb 8826 case 6: /* mrs cpsr. */
7d1b0095 8827 tmp = tcg_temp_new_i32();
9ee6e8bb 8828 if (IS_M(env)) {
8984bd2e
PB
8829 addr = tcg_const_i32(insn & 0xff);
8830 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8831 tcg_temp_free_i32(addr);
9ee6e8bb 8832 } else {
9ef39277 8833 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8834 }
8984bd2e 8835 store_reg(s, rd, tmp);
9ee6e8bb
PB
8836 break;
8837 case 7: /* mrs spsr. */
8838 /* Not accessible in user mode. */
8839 if (IS_USER(s) || IS_M(env))
8840 goto illegal_op;
d9ba4830
PB
8841 tmp = load_cpu_field(spsr);
8842 store_reg(s, rd, tmp);
9ee6e8bb 8843 break;
2c0262af
FB
8844 }
8845 }
9ee6e8bb
PB
8846 } else {
8847 /* Conditional branch. */
8848 op = (insn >> 22) & 0xf;
8849 /* Generate a conditional jump to next instruction. */
8850 s->condlabel = gen_new_label();
d9ba4830 8851 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8852 s->condjmp = 1;
8853
8854 /* offset[11:1] = insn[10:0] */
8855 offset = (insn & 0x7ff) << 1;
8856 /* offset[17:12] = insn[21:16]. */
8857 offset |= (insn & 0x003f0000) >> 4;
8858 /* offset[31:20] = insn[26]. */
8859 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8860 /* offset[18] = insn[13]. */
8861 offset |= (insn & (1 << 13)) << 5;
8862 /* offset[19] = insn[11]. */
8863 offset |= (insn & (1 << 11)) << 8;
8864
8865 /* jump to the offset */
b0109805 8866 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8867 }
8868 } else {
8869 /* Data processing immediate. */
8870 if (insn & (1 << 25)) {
8871 if (insn & (1 << 24)) {
8872 if (insn & (1 << 20))
8873 goto illegal_op;
8874 /* Bitfield/Saturate. */
8875 op = (insn >> 21) & 7;
8876 imm = insn & 0x1f;
8877 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8878 if (rn == 15) {
7d1b0095 8879 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8880 tcg_gen_movi_i32(tmp, 0);
8881 } else {
8882 tmp = load_reg(s, rn);
8883 }
9ee6e8bb
PB
8884 switch (op) {
8885 case 2: /* Signed bitfield extract. */
8886 imm++;
8887 if (shift + imm > 32)
8888 goto illegal_op;
8889 if (imm < 32)
6ddbc6e4 8890 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8891 break;
8892 case 6: /* Unsigned bitfield extract. */
8893 imm++;
8894 if (shift + imm > 32)
8895 goto illegal_op;
8896 if (imm < 32)
6ddbc6e4 8897 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8898 break;
8899 case 3: /* Bitfield insert/clear. */
8900 if (imm < shift)
8901 goto illegal_op;
8902 imm = imm + 1 - shift;
8903 if (imm != 32) {
6ddbc6e4 8904 tmp2 = load_reg(s, rd);
d593c48e 8905 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8906 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8907 }
8908 break;
8909 case 7:
8910 goto illegal_op;
8911 default: /* Saturate. */
9ee6e8bb
PB
8912 if (shift) {
8913 if (op & 1)
6ddbc6e4 8914 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8915 else
6ddbc6e4 8916 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8917 }
6ddbc6e4 8918 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8919 if (op & 4) {
8920 /* Unsigned. */
9ee6e8bb 8921 if ((op & 1) && shift == 0)
9ef39277 8922 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8923 else
9ef39277 8924 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8925 } else {
9ee6e8bb 8926 /* Signed. */
9ee6e8bb 8927 if ((op & 1) && shift == 0)
9ef39277 8928 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8929 else
9ef39277 8930 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8931 }
b75263d6 8932 tcg_temp_free_i32(tmp2);
9ee6e8bb 8933 break;
2c0262af 8934 }
6ddbc6e4 8935 store_reg(s, rd, tmp);
9ee6e8bb
PB
8936 } else {
8937 imm = ((insn & 0x04000000) >> 15)
8938 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8939 if (insn & (1 << 22)) {
8940 /* 16-bit immediate. */
8941 imm |= (insn >> 4) & 0xf000;
8942 if (insn & (1 << 23)) {
8943 /* movt */
5e3f878a 8944 tmp = load_reg(s, rd);
86831435 8945 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8946 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8947 } else {
9ee6e8bb 8948 /* movw */
7d1b0095 8949 tmp = tcg_temp_new_i32();
5e3f878a 8950 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8951 }
8952 } else {
9ee6e8bb
PB
8953 /* Add/sub 12-bit immediate. */
8954 if (rn == 15) {
b0109805 8955 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8956 if (insn & (1 << 23))
b0109805 8957 offset -= imm;
9ee6e8bb 8958 else
b0109805 8959 offset += imm;
7d1b0095 8960 tmp = tcg_temp_new_i32();
5e3f878a 8961 tcg_gen_movi_i32(tmp, offset);
2c0262af 8962 } else {
5e3f878a 8963 tmp = load_reg(s, rn);
9ee6e8bb 8964 if (insn & (1 << 23))
5e3f878a 8965 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8966 else
5e3f878a 8967 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8968 }
9ee6e8bb 8969 }
5e3f878a 8970 store_reg(s, rd, tmp);
191abaa2 8971 }
9ee6e8bb
PB
8972 } else {
8973 int shifter_out = 0;
8974 /* modified 12-bit immediate. */
8975 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8976 imm = (insn & 0xff);
8977 switch (shift) {
8978 case 0: /* XY */
8979 /* Nothing to do. */
8980 break;
8981 case 1: /* 00XY00XY */
8982 imm |= imm << 16;
8983 break;
8984 case 2: /* XY00XY00 */
8985 imm |= imm << 16;
8986 imm <<= 8;
8987 break;
8988 case 3: /* XYXYXYXY */
8989 imm |= imm << 16;
8990 imm |= imm << 8;
8991 break;
8992 default: /* Rotated constant. */
8993 shift = (shift << 1) | (imm >> 7);
8994 imm |= 0x80;
8995 imm = imm << (32 - shift);
8996 shifter_out = 1;
8997 break;
b5ff1b31 8998 }
7d1b0095 8999 tmp2 = tcg_temp_new_i32();
3174f8e9 9000 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9001 rn = (insn >> 16) & 0xf;
3174f8e9 9002 if (rn == 15) {
7d1b0095 9003 tmp = tcg_temp_new_i32();
3174f8e9
FN
9004 tcg_gen_movi_i32(tmp, 0);
9005 } else {
9006 tmp = load_reg(s, rn);
9007 }
9ee6e8bb
PB
9008 op = (insn >> 21) & 0xf;
9009 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9010 shifter_out, tmp, tmp2))
9ee6e8bb 9011 goto illegal_op;
7d1b0095 9012 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9013 rd = (insn >> 8) & 0xf;
9014 if (rd != 15) {
3174f8e9
FN
9015 store_reg(s, rd, tmp);
9016 } else {
7d1b0095 9017 tcg_temp_free_i32(tmp);
2c0262af 9018 }
2c0262af 9019 }
9ee6e8bb
PB
9020 }
9021 break;
9022 case 12: /* Load/store single data item. */
9023 {
9024 int postinc = 0;
9025 int writeback = 0;
b0109805 9026 int user;
9ee6e8bb
PB
9027 if ((insn & 0x01100000) == 0x01000000) {
9028 if (disas_neon_ls_insn(env, s, insn))
c1713132 9029 goto illegal_op;
9ee6e8bb
PB
9030 break;
9031 }
a2fdc890
PM
9032 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9033 if (rs == 15) {
9034 if (!(insn & (1 << 20))) {
9035 goto illegal_op;
9036 }
9037 if (op != 2) {
9038 /* Byte or halfword load space with dest == r15 : memory hints.
9039 * Catch them early so we don't emit pointless addressing code.
9040 * This space is a mix of:
9041 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9042 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9043 * cores)
9044 * unallocated hints, which must be treated as NOPs
9045 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9046 * which is easiest for the decoding logic
9047 * Some space which must UNDEF
9048 */
9049 int op1 = (insn >> 23) & 3;
9050 int op2 = (insn >> 6) & 0x3f;
9051 if (op & 2) {
9052 goto illegal_op;
9053 }
9054 if (rn == 15) {
02afbf64
PM
9055 /* UNPREDICTABLE, unallocated hint or
9056 * PLD/PLDW/PLI (literal)
9057 */
a2fdc890
PM
9058 return 0;
9059 }
9060 if (op1 & 1) {
02afbf64 9061 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9062 }
9063 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9064 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9065 }
9066 /* UNDEF space, or an UNPREDICTABLE */
9067 return 1;
9068 }
9069 }
b0109805 9070 user = IS_USER(s);
9ee6e8bb 9071 if (rn == 15) {
7d1b0095 9072 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9073 /* PC relative. */
9074 /* s->pc has already been incremented by 4. */
9075 imm = s->pc & 0xfffffffc;
9076 if (insn & (1 << 23))
9077 imm += insn & 0xfff;
9078 else
9079 imm -= insn & 0xfff;
b0109805 9080 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9081 } else {
b0109805 9082 addr = load_reg(s, rn);
9ee6e8bb
PB
9083 if (insn & (1 << 23)) {
9084 /* Positive offset. */
9085 imm = insn & 0xfff;
b0109805 9086 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9087 } else {
9ee6e8bb 9088 imm = insn & 0xff;
2a0308c5
PM
9089 switch ((insn >> 8) & 0xf) {
9090 case 0x0: /* Shifted Register. */
9ee6e8bb 9091 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9092 if (shift > 3) {
9093 tcg_temp_free_i32(addr);
18c9b560 9094 goto illegal_op;
2a0308c5 9095 }
b26eefb6 9096 tmp = load_reg(s, rm);
9ee6e8bb 9097 if (shift)
b26eefb6 9098 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9099 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9100 tcg_temp_free_i32(tmp);
9ee6e8bb 9101 break;
2a0308c5 9102 case 0xc: /* Negative offset. */
b0109805 9103 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9104 break;
2a0308c5 9105 case 0xe: /* User privilege. */
b0109805
PB
9106 tcg_gen_addi_i32(addr, addr, imm);
9107 user = 1;
9ee6e8bb 9108 break;
2a0308c5 9109 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9110 imm = -imm;
9111 /* Fall through. */
2a0308c5 9112 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9113 postinc = 1;
9114 writeback = 1;
9115 break;
2a0308c5 9116 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9117 imm = -imm;
9118 /* Fall through. */
2a0308c5 9119 case 0xf: /* Pre-increment. */
b0109805 9120 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9121 writeback = 1;
9122 break;
9123 default:
2a0308c5 9124 tcg_temp_free_i32(addr);
b7bcbe95 9125 goto illegal_op;
9ee6e8bb
PB
9126 }
9127 }
9128 }
9ee6e8bb
PB
9129 if (insn & (1 << 20)) {
9130 /* Load. */
5a839c0d 9131 tmp = tcg_temp_new_i32();
a2fdc890 9132 switch (op) {
5a839c0d
PM
9133 case 0:
9134 tcg_gen_qemu_ld8u(tmp, addr, user);
9135 break;
9136 case 4:
9137 tcg_gen_qemu_ld8s(tmp, addr, user);
9138 break;
9139 case 1:
9140 tcg_gen_qemu_ld16u(tmp, addr, user);
9141 break;
9142 case 5:
9143 tcg_gen_qemu_ld16s(tmp, addr, user);
9144 break;
9145 case 2:
9146 tcg_gen_qemu_ld32u(tmp, addr, user);
9147 break;
2a0308c5 9148 default:
5a839c0d 9149 tcg_temp_free_i32(tmp);
2a0308c5
PM
9150 tcg_temp_free_i32(addr);
9151 goto illegal_op;
a2fdc890
PM
9152 }
9153 if (rs == 15) {
9154 gen_bx(s, tmp);
9ee6e8bb 9155 } else {
a2fdc890 9156 store_reg(s, rs, tmp);
9ee6e8bb
PB
9157 }
9158 } else {
9159 /* Store. */
b0109805 9160 tmp = load_reg(s, rs);
9ee6e8bb 9161 switch (op) {
5a839c0d
PM
9162 case 0:
9163 tcg_gen_qemu_st8(tmp, addr, user);
9164 break;
9165 case 1:
9166 tcg_gen_qemu_st16(tmp, addr, user);
9167 break;
9168 case 2:
9169 tcg_gen_qemu_st32(tmp, addr, user);
9170 break;
2a0308c5 9171 default:
5a839c0d 9172 tcg_temp_free_i32(tmp);
2a0308c5
PM
9173 tcg_temp_free_i32(addr);
9174 goto illegal_op;
b7bcbe95 9175 }
5a839c0d 9176 tcg_temp_free_i32(tmp);
2c0262af 9177 }
9ee6e8bb 9178 if (postinc)
b0109805
PB
9179 tcg_gen_addi_i32(addr, addr, imm);
9180 if (writeback) {
9181 store_reg(s, rn, addr);
9182 } else {
7d1b0095 9183 tcg_temp_free_i32(addr);
b0109805 9184 }
9ee6e8bb
PB
9185 }
9186 break;
9187 default:
9188 goto illegal_op;
2c0262af 9189 }
9ee6e8bb
PB
9190 return 0;
9191illegal_op:
9192 return 1;
2c0262af
FB
9193}
9194
0ecb72a5 9195static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9196{
9197 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9198 int32_t offset;
9199 int i;
39d5492a
PM
9200 TCGv_i32 tmp;
9201 TCGv_i32 tmp2;
9202 TCGv_i32 addr;
99c475ab 9203
9ee6e8bb
PB
9204 if (s->condexec_mask) {
9205 cond = s->condexec_cond;
bedd2912
JB
9206 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9207 s->condlabel = gen_new_label();
9208 gen_test_cc(cond ^ 1, s->condlabel);
9209 s->condjmp = 1;
9210 }
9ee6e8bb
PB
9211 }
9212
d31dd73e 9213 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9214 s->pc += 2;
b5ff1b31 9215
99c475ab
FB
9216 switch (insn >> 12) {
9217 case 0: case 1:
396e467c 9218
99c475ab
FB
9219 rd = insn & 7;
9220 op = (insn >> 11) & 3;
9221 if (op == 3) {
9222 /* add/subtract */
9223 rn = (insn >> 3) & 7;
396e467c 9224 tmp = load_reg(s, rn);
99c475ab
FB
9225 if (insn & (1 << 10)) {
9226 /* immediate */
7d1b0095 9227 tmp2 = tcg_temp_new_i32();
396e467c 9228 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9229 } else {
9230 /* reg */
9231 rm = (insn >> 6) & 7;
396e467c 9232 tmp2 = load_reg(s, rm);
99c475ab 9233 }
9ee6e8bb
PB
9234 if (insn & (1 << 9)) {
9235 if (s->condexec_mask)
396e467c 9236 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9237 else
72485ec4 9238 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9239 } else {
9240 if (s->condexec_mask)
396e467c 9241 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9242 else
72485ec4 9243 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9244 }
7d1b0095 9245 tcg_temp_free_i32(tmp2);
396e467c 9246 store_reg(s, rd, tmp);
99c475ab
FB
9247 } else {
9248 /* shift immediate */
9249 rm = (insn >> 3) & 7;
9250 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9251 tmp = load_reg(s, rm);
9252 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9253 if (!s->condexec_mask)
9254 gen_logic_CC(tmp);
9255 store_reg(s, rd, tmp);
99c475ab
FB
9256 }
9257 break;
9258 case 2: case 3:
9259 /* arithmetic large immediate */
9260 op = (insn >> 11) & 3;
9261 rd = (insn >> 8) & 0x7;
396e467c 9262 if (op == 0) { /* mov */
7d1b0095 9263 tmp = tcg_temp_new_i32();
396e467c 9264 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9265 if (!s->condexec_mask)
396e467c
FN
9266 gen_logic_CC(tmp);
9267 store_reg(s, rd, tmp);
9268 } else {
9269 tmp = load_reg(s, rd);
7d1b0095 9270 tmp2 = tcg_temp_new_i32();
396e467c
FN
9271 tcg_gen_movi_i32(tmp2, insn & 0xff);
9272 switch (op) {
9273 case 1: /* cmp */
72485ec4 9274 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9275 tcg_temp_free_i32(tmp);
9276 tcg_temp_free_i32(tmp2);
396e467c
FN
9277 break;
9278 case 2: /* add */
9279 if (s->condexec_mask)
9280 tcg_gen_add_i32(tmp, tmp, tmp2);
9281 else
72485ec4 9282 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9283 tcg_temp_free_i32(tmp2);
396e467c
FN
9284 store_reg(s, rd, tmp);
9285 break;
9286 case 3: /* sub */
9287 if (s->condexec_mask)
9288 tcg_gen_sub_i32(tmp, tmp, tmp2);
9289 else
72485ec4 9290 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9291 tcg_temp_free_i32(tmp2);
396e467c
FN
9292 store_reg(s, rd, tmp);
9293 break;
9294 }
99c475ab 9295 }
99c475ab
FB
9296 break;
9297 case 4:
9298 if (insn & (1 << 11)) {
9299 rd = (insn >> 8) & 7;
5899f386
FB
9300 /* load pc-relative. Bit 1 of PC is ignored. */
9301 val = s->pc + 2 + ((insn & 0xff) * 4);
9302 val &= ~(uint32_t)2;
7d1b0095 9303 addr = tcg_temp_new_i32();
b0109805 9304 tcg_gen_movi_i32(addr, val);
c40c8556
PM
9305 tmp = tcg_temp_new_i32();
9306 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9307 tcg_temp_free_i32(addr);
b0109805 9308 store_reg(s, rd, tmp);
99c475ab
FB
9309 break;
9310 }
9311 if (insn & (1 << 10)) {
9312 /* data processing extended or blx */
9313 rd = (insn & 7) | ((insn >> 4) & 8);
9314 rm = (insn >> 3) & 0xf;
9315 op = (insn >> 8) & 3;
9316 switch (op) {
9317 case 0: /* add */
396e467c
FN
9318 tmp = load_reg(s, rd);
9319 tmp2 = load_reg(s, rm);
9320 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9321 tcg_temp_free_i32(tmp2);
396e467c 9322 store_reg(s, rd, tmp);
99c475ab
FB
9323 break;
9324 case 1: /* cmp */
396e467c
FN
9325 tmp = load_reg(s, rd);
9326 tmp2 = load_reg(s, rm);
72485ec4 9327 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9328 tcg_temp_free_i32(tmp2);
9329 tcg_temp_free_i32(tmp);
99c475ab
FB
9330 break;
9331 case 2: /* mov/cpy */
396e467c
FN
9332 tmp = load_reg(s, rm);
9333 store_reg(s, rd, tmp);
99c475ab
FB
9334 break;
9335 case 3:/* branch [and link] exchange thumb register */
b0109805 9336 tmp = load_reg(s, rm);
99c475ab 9337 if (insn & (1 << 7)) {
be5e7a76 9338 ARCH(5);
99c475ab 9339 val = (uint32_t)s->pc | 1;
7d1b0095 9340 tmp2 = tcg_temp_new_i32();
b0109805
PB
9341 tcg_gen_movi_i32(tmp2, val);
9342 store_reg(s, 14, tmp2);
99c475ab 9343 }
be5e7a76 9344 /* already thumb, no need to check */
d9ba4830 9345 gen_bx(s, tmp);
99c475ab
FB
9346 break;
9347 }
9348 break;
9349 }
9350
9351 /* data processing register */
9352 rd = insn & 7;
9353 rm = (insn >> 3) & 7;
9354 op = (insn >> 6) & 0xf;
9355 if (op == 2 || op == 3 || op == 4 || op == 7) {
9356 /* the shift/rotate ops want the operands backwards */
9357 val = rm;
9358 rm = rd;
9359 rd = val;
9360 val = 1;
9361 } else {
9362 val = 0;
9363 }
9364
396e467c 9365 if (op == 9) { /* neg */
7d1b0095 9366 tmp = tcg_temp_new_i32();
396e467c
FN
9367 tcg_gen_movi_i32(tmp, 0);
9368 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9369 tmp = load_reg(s, rd);
9370 } else {
39d5492a 9371 TCGV_UNUSED_I32(tmp);
396e467c 9372 }
99c475ab 9373
396e467c 9374 tmp2 = load_reg(s, rm);
5899f386 9375 switch (op) {
99c475ab 9376 case 0x0: /* and */
396e467c 9377 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9378 if (!s->condexec_mask)
396e467c 9379 gen_logic_CC(tmp);
99c475ab
FB
9380 break;
9381 case 0x1: /* eor */
396e467c 9382 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9383 if (!s->condexec_mask)
396e467c 9384 gen_logic_CC(tmp);
99c475ab
FB
9385 break;
9386 case 0x2: /* lsl */
9ee6e8bb 9387 if (s->condexec_mask) {
365af80e 9388 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9389 } else {
9ef39277 9390 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9391 gen_logic_CC(tmp2);
9ee6e8bb 9392 }
99c475ab
FB
9393 break;
9394 case 0x3: /* lsr */
9ee6e8bb 9395 if (s->condexec_mask) {
365af80e 9396 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9397 } else {
9ef39277 9398 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9399 gen_logic_CC(tmp2);
9ee6e8bb 9400 }
99c475ab
FB
9401 break;
9402 case 0x4: /* asr */
9ee6e8bb 9403 if (s->condexec_mask) {
365af80e 9404 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9405 } else {
9ef39277 9406 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9407 gen_logic_CC(tmp2);
9ee6e8bb 9408 }
99c475ab
FB
9409 break;
9410 case 0x5: /* adc */
49b4c31e 9411 if (s->condexec_mask) {
396e467c 9412 gen_adc(tmp, tmp2);
49b4c31e
RH
9413 } else {
9414 gen_adc_CC(tmp, tmp, tmp2);
9415 }
99c475ab
FB
9416 break;
9417 case 0x6: /* sbc */
2de68a49 9418 if (s->condexec_mask) {
396e467c 9419 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9420 } else {
9421 gen_sbc_CC(tmp, tmp, tmp2);
9422 }
99c475ab
FB
9423 break;
9424 case 0x7: /* ror */
9ee6e8bb 9425 if (s->condexec_mask) {
f669df27
AJ
9426 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9427 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9428 } else {
9ef39277 9429 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9430 gen_logic_CC(tmp2);
9ee6e8bb 9431 }
99c475ab
FB
9432 break;
9433 case 0x8: /* tst */
396e467c
FN
9434 tcg_gen_and_i32(tmp, tmp, tmp2);
9435 gen_logic_CC(tmp);
99c475ab 9436 rd = 16;
5899f386 9437 break;
99c475ab 9438 case 0x9: /* neg */
9ee6e8bb 9439 if (s->condexec_mask)
396e467c 9440 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9441 else
72485ec4 9442 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9443 break;
9444 case 0xa: /* cmp */
72485ec4 9445 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9446 rd = 16;
9447 break;
9448 case 0xb: /* cmn */
72485ec4 9449 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9450 rd = 16;
9451 break;
9452 case 0xc: /* orr */
396e467c 9453 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9454 if (!s->condexec_mask)
396e467c 9455 gen_logic_CC(tmp);
99c475ab
FB
9456 break;
9457 case 0xd: /* mul */
7b2919a0 9458 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9459 if (!s->condexec_mask)
396e467c 9460 gen_logic_CC(tmp);
99c475ab
FB
9461 break;
9462 case 0xe: /* bic */
f669df27 9463 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9464 if (!s->condexec_mask)
396e467c 9465 gen_logic_CC(tmp);
99c475ab
FB
9466 break;
9467 case 0xf: /* mvn */
396e467c 9468 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9469 if (!s->condexec_mask)
396e467c 9470 gen_logic_CC(tmp2);
99c475ab 9471 val = 1;
5899f386 9472 rm = rd;
99c475ab
FB
9473 break;
9474 }
9475 if (rd != 16) {
396e467c
FN
9476 if (val) {
9477 store_reg(s, rm, tmp2);
9478 if (op != 0xf)
7d1b0095 9479 tcg_temp_free_i32(tmp);
396e467c
FN
9480 } else {
9481 store_reg(s, rd, tmp);
7d1b0095 9482 tcg_temp_free_i32(tmp2);
396e467c
FN
9483 }
9484 } else {
7d1b0095
PM
9485 tcg_temp_free_i32(tmp);
9486 tcg_temp_free_i32(tmp2);
99c475ab
FB
9487 }
9488 break;
9489
9490 case 5:
9491 /* load/store register offset. */
9492 rd = insn & 7;
9493 rn = (insn >> 3) & 7;
9494 rm = (insn >> 6) & 7;
9495 op = (insn >> 9) & 7;
b0109805 9496 addr = load_reg(s, rn);
b26eefb6 9497 tmp = load_reg(s, rm);
b0109805 9498 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9499 tcg_temp_free_i32(tmp);
99c475ab 9500
c40c8556 9501 if (op < 3) { /* store */
b0109805 9502 tmp = load_reg(s, rd);
c40c8556
PM
9503 } else {
9504 tmp = tcg_temp_new_i32();
9505 }
99c475ab
FB
9506
9507 switch (op) {
9508 case 0: /* str */
c40c8556 9509 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9510 break;
9511 case 1: /* strh */
c40c8556 9512 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9513 break;
9514 case 2: /* strb */
c40c8556 9515 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9516 break;
9517 case 3: /* ldrsb */
c40c8556 9518 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9519 break;
9520 case 4: /* ldr */
c40c8556 9521 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9522 break;
9523 case 5: /* ldrh */
c40c8556 9524 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9525 break;
9526 case 6: /* ldrb */
c40c8556 9527 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9528 break;
9529 case 7: /* ldrsh */
c40c8556 9530 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9531 break;
9532 }
c40c8556 9533 if (op >= 3) { /* load */
b0109805 9534 store_reg(s, rd, tmp);
c40c8556
PM
9535 } else {
9536 tcg_temp_free_i32(tmp);
9537 }
7d1b0095 9538 tcg_temp_free_i32(addr);
99c475ab
FB
9539 break;
9540
9541 case 6:
9542 /* load/store word immediate offset */
9543 rd = insn & 7;
9544 rn = (insn >> 3) & 7;
b0109805 9545 addr = load_reg(s, rn);
99c475ab 9546 val = (insn >> 4) & 0x7c;
b0109805 9547 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9548
9549 if (insn & (1 << 11)) {
9550 /* load */
c40c8556
PM
9551 tmp = tcg_temp_new_i32();
9552 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9553 store_reg(s, rd, tmp);
99c475ab
FB
9554 } else {
9555 /* store */
b0109805 9556 tmp = load_reg(s, rd);
c40c8556
PM
9557 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9558 tcg_temp_free_i32(tmp);
99c475ab 9559 }
7d1b0095 9560 tcg_temp_free_i32(addr);
99c475ab
FB
9561 break;
9562
9563 case 7:
9564 /* load/store byte immediate offset */
9565 rd = insn & 7;
9566 rn = (insn >> 3) & 7;
b0109805 9567 addr = load_reg(s, rn);
99c475ab 9568 val = (insn >> 6) & 0x1f;
b0109805 9569 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9570
9571 if (insn & (1 << 11)) {
9572 /* load */
c40c8556
PM
9573 tmp = tcg_temp_new_i32();
9574 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
b0109805 9575 store_reg(s, rd, tmp);
99c475ab
FB
9576 } else {
9577 /* store */
b0109805 9578 tmp = load_reg(s, rd);
c40c8556
PM
9579 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9580 tcg_temp_free_i32(tmp);
99c475ab 9581 }
7d1b0095 9582 tcg_temp_free_i32(addr);
99c475ab
FB
9583 break;
9584
9585 case 8:
9586 /* load/store halfword immediate offset */
9587 rd = insn & 7;
9588 rn = (insn >> 3) & 7;
b0109805 9589 addr = load_reg(s, rn);
99c475ab 9590 val = (insn >> 5) & 0x3e;
b0109805 9591 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9592
9593 if (insn & (1 << 11)) {
9594 /* load */
c40c8556
PM
9595 tmp = tcg_temp_new_i32();
9596 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
b0109805 9597 store_reg(s, rd, tmp);
99c475ab
FB
9598 } else {
9599 /* store */
b0109805 9600 tmp = load_reg(s, rd);
c40c8556
PM
9601 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9602 tcg_temp_free_i32(tmp);
99c475ab 9603 }
7d1b0095 9604 tcg_temp_free_i32(addr);
99c475ab
FB
9605 break;
9606
9607 case 9:
9608 /* load/store from stack */
9609 rd = (insn >> 8) & 7;
b0109805 9610 addr = load_reg(s, 13);
99c475ab 9611 val = (insn & 0xff) * 4;
b0109805 9612 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9613
9614 if (insn & (1 << 11)) {
9615 /* load */
c40c8556
PM
9616 tmp = tcg_temp_new_i32();
9617 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9618 store_reg(s, rd, tmp);
99c475ab
FB
9619 } else {
9620 /* store */
b0109805 9621 tmp = load_reg(s, rd);
c40c8556
PM
9622 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9623 tcg_temp_free_i32(tmp);
99c475ab 9624 }
7d1b0095 9625 tcg_temp_free_i32(addr);
99c475ab
FB
9626 break;
9627
9628 case 10:
9629 /* add to high reg */
9630 rd = (insn >> 8) & 7;
5899f386
FB
9631 if (insn & (1 << 11)) {
9632 /* SP */
5e3f878a 9633 tmp = load_reg(s, 13);
5899f386
FB
9634 } else {
9635 /* PC. bit 1 is ignored. */
7d1b0095 9636 tmp = tcg_temp_new_i32();
5e3f878a 9637 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9638 }
99c475ab 9639 val = (insn & 0xff) * 4;
5e3f878a
PB
9640 tcg_gen_addi_i32(tmp, tmp, val);
9641 store_reg(s, rd, tmp);
99c475ab
FB
9642 break;
9643
9644 case 11:
9645 /* misc */
9646 op = (insn >> 8) & 0xf;
9647 switch (op) {
9648 case 0:
9649 /* adjust stack pointer */
b26eefb6 9650 tmp = load_reg(s, 13);
99c475ab
FB
9651 val = (insn & 0x7f) * 4;
9652 if (insn & (1 << 7))
6a0d8a1d 9653 val = -(int32_t)val;
b26eefb6
PB
9654 tcg_gen_addi_i32(tmp, tmp, val);
9655 store_reg(s, 13, tmp);
99c475ab
FB
9656 break;
9657
9ee6e8bb
PB
9658 case 2: /* sign/zero extend. */
9659 ARCH(6);
9660 rd = insn & 7;
9661 rm = (insn >> 3) & 7;
b0109805 9662 tmp = load_reg(s, rm);
9ee6e8bb 9663 switch ((insn >> 6) & 3) {
b0109805
PB
9664 case 0: gen_sxth(tmp); break;
9665 case 1: gen_sxtb(tmp); break;
9666 case 2: gen_uxth(tmp); break;
9667 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9668 }
b0109805 9669 store_reg(s, rd, tmp);
9ee6e8bb 9670 break;
99c475ab
FB
9671 case 4: case 5: case 0xc: case 0xd:
9672 /* push/pop */
b0109805 9673 addr = load_reg(s, 13);
5899f386
FB
9674 if (insn & (1 << 8))
9675 offset = 4;
99c475ab 9676 else
5899f386
FB
9677 offset = 0;
9678 for (i = 0; i < 8; i++) {
9679 if (insn & (1 << i))
9680 offset += 4;
9681 }
9682 if ((insn & (1 << 11)) == 0) {
b0109805 9683 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9684 }
99c475ab
FB
9685 for (i = 0; i < 8; i++) {
9686 if (insn & (1 << i)) {
9687 if (insn & (1 << 11)) {
9688 /* pop */
c40c8556
PM
9689 tmp = tcg_temp_new_i32();
9690 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9691 store_reg(s, i, tmp);
99c475ab
FB
9692 } else {
9693 /* push */
b0109805 9694 tmp = load_reg(s, i);
c40c8556
PM
9695 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9696 tcg_temp_free_i32(tmp);
99c475ab 9697 }
5899f386 9698 /* advance to the next address. */
b0109805 9699 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9700 }
9701 }
39d5492a 9702 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9703 if (insn & (1 << 8)) {
9704 if (insn & (1 << 11)) {
9705 /* pop pc */
c40c8556
PM
9706 tmp = tcg_temp_new_i32();
9707 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9708 /* don't set the pc until the rest of the instruction
9709 has completed */
9710 } else {
9711 /* push lr */
b0109805 9712 tmp = load_reg(s, 14);
c40c8556
PM
9713 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9714 tcg_temp_free_i32(tmp);
99c475ab 9715 }
b0109805 9716 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9717 }
5899f386 9718 if ((insn & (1 << 11)) == 0) {
b0109805 9719 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9720 }
99c475ab 9721 /* write back the new stack pointer */
b0109805 9722 store_reg(s, 13, addr);
99c475ab 9723 /* set the new PC value */
be5e7a76
DES
9724 if ((insn & 0x0900) == 0x0900) {
9725 store_reg_from_load(env, s, 15, tmp);
9726 }
99c475ab
FB
9727 break;
9728
9ee6e8bb
PB
9729 case 1: case 3: case 9: case 11: /* czb */
9730 rm = insn & 7;
d9ba4830 9731 tmp = load_reg(s, rm);
9ee6e8bb
PB
9732 s->condlabel = gen_new_label();
9733 s->condjmp = 1;
9734 if (insn & (1 << 11))
cb63669a 9735 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9736 else
cb63669a 9737 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9738 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9739 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9740 val = (uint32_t)s->pc + 2;
9741 val += offset;
9742 gen_jmp(s, val);
9743 break;
9744
9745 case 15: /* IT, nop-hint. */
9746 if ((insn & 0xf) == 0) {
9747 gen_nop_hint(s, (insn >> 4) & 0xf);
9748 break;
9749 }
9750 /* If Then. */
9751 s->condexec_cond = (insn >> 4) & 0xe;
9752 s->condexec_mask = insn & 0x1f;
9753 /* No actual code generated for this insn, just setup state. */
9754 break;
9755
06c949e6 9756 case 0xe: /* bkpt */
be5e7a76 9757 ARCH(5);
bc4a0de0 9758 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9759 break;
9760
9ee6e8bb
PB
9761 case 0xa: /* rev */
9762 ARCH(6);
9763 rn = (insn >> 3) & 0x7;
9764 rd = insn & 0x7;
b0109805 9765 tmp = load_reg(s, rn);
9ee6e8bb 9766 switch ((insn >> 6) & 3) {
66896cb8 9767 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9768 case 1: gen_rev16(tmp); break;
9769 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9770 default: goto illegal_op;
9771 }
b0109805 9772 store_reg(s, rd, tmp);
9ee6e8bb
PB
9773 break;
9774
d9e028c1
PM
9775 case 6:
9776 switch ((insn >> 5) & 7) {
9777 case 2:
9778 /* setend */
9779 ARCH(6);
10962fd5
PM
9780 if (((insn >> 3) & 1) != s->bswap_code) {
9781 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9782 goto illegal_op;
9783 }
9ee6e8bb 9784 break;
d9e028c1
PM
9785 case 3:
9786 /* cps */
9787 ARCH(6);
9788 if (IS_USER(s)) {
9789 break;
8984bd2e 9790 }
d9e028c1
PM
9791 if (IS_M(env)) {
9792 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9793 /* FAULTMASK */
9794 if (insn & 1) {
9795 addr = tcg_const_i32(19);
9796 gen_helper_v7m_msr(cpu_env, addr, tmp);
9797 tcg_temp_free_i32(addr);
9798 }
9799 /* PRIMASK */
9800 if (insn & 2) {
9801 addr = tcg_const_i32(16);
9802 gen_helper_v7m_msr(cpu_env, addr, tmp);
9803 tcg_temp_free_i32(addr);
9804 }
9805 tcg_temp_free_i32(tmp);
9806 gen_lookup_tb(s);
9807 } else {
9808 if (insn & (1 << 4)) {
9809 shift = CPSR_A | CPSR_I | CPSR_F;
9810 } else {
9811 shift = 0;
9812 }
9813 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9814 }
d9e028c1
PM
9815 break;
9816 default:
9817 goto undef;
9ee6e8bb
PB
9818 }
9819 break;
9820
99c475ab
FB
9821 default:
9822 goto undef;
9823 }
9824 break;
9825
9826 case 12:
a7d3970d 9827 {
99c475ab 9828 /* load/store multiple */
39d5492a
PM
9829 TCGv_i32 loaded_var;
9830 TCGV_UNUSED_I32(loaded_var);
99c475ab 9831 rn = (insn >> 8) & 0x7;
b0109805 9832 addr = load_reg(s, rn);
99c475ab
FB
9833 for (i = 0; i < 8; i++) {
9834 if (insn & (1 << i)) {
99c475ab
FB
9835 if (insn & (1 << 11)) {
9836 /* load */
c40c8556
PM
9837 tmp = tcg_temp_new_i32();
9838 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9839 if (i == rn) {
9840 loaded_var = tmp;
9841 } else {
9842 store_reg(s, i, tmp);
9843 }
99c475ab
FB
9844 } else {
9845 /* store */
b0109805 9846 tmp = load_reg(s, i);
c40c8556
PM
9847 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9848 tcg_temp_free_i32(tmp);
99c475ab 9849 }
5899f386 9850 /* advance to the next address */
b0109805 9851 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9852 }
9853 }
b0109805 9854 if ((insn & (1 << rn)) == 0) {
a7d3970d 9855 /* base reg not in list: base register writeback */
b0109805
PB
9856 store_reg(s, rn, addr);
9857 } else {
a7d3970d
PM
9858 /* base reg in list: if load, complete it now */
9859 if (insn & (1 << 11)) {
9860 store_reg(s, rn, loaded_var);
9861 }
7d1b0095 9862 tcg_temp_free_i32(addr);
b0109805 9863 }
99c475ab 9864 break;
a7d3970d 9865 }
99c475ab
FB
9866 case 13:
9867 /* conditional branch or swi */
9868 cond = (insn >> 8) & 0xf;
9869 if (cond == 0xe)
9870 goto undef;
9871
9872 if (cond == 0xf) {
9873 /* swi */
422ebf69 9874 gen_set_pc_im(s->pc);
9ee6e8bb 9875 s->is_jmp = DISAS_SWI;
99c475ab
FB
9876 break;
9877 }
9878 /* generate a conditional jump to next instruction */
e50e6a20 9879 s->condlabel = gen_new_label();
d9ba4830 9880 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9881 s->condjmp = 1;
99c475ab
FB
9882
9883 /* jump to the offset */
5899f386 9884 val = (uint32_t)s->pc + 2;
99c475ab 9885 offset = ((int32_t)insn << 24) >> 24;
5899f386 9886 val += offset << 1;
8aaca4c0 9887 gen_jmp(s, val);
99c475ab
FB
9888 break;
9889
9890 case 14:
358bf29e 9891 if (insn & (1 << 11)) {
9ee6e8bb
PB
9892 if (disas_thumb2_insn(env, s, insn))
9893 goto undef32;
358bf29e
PB
9894 break;
9895 }
9ee6e8bb 9896 /* unconditional branch */
99c475ab
FB
9897 val = (uint32_t)s->pc;
9898 offset = ((int32_t)insn << 21) >> 21;
9899 val += (offset << 1) + 2;
8aaca4c0 9900 gen_jmp(s, val);
99c475ab
FB
9901 break;
9902
9903 case 15:
9ee6e8bb 9904 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9905 goto undef32;
9ee6e8bb 9906 break;
99c475ab
FB
9907 }
9908 return;
9ee6e8bb 9909undef32:
bc4a0de0 9910 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9911 return;
9912illegal_op:
99c475ab 9913undef:
bc4a0de0 9914 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9915}
9916
2c0262af
FB
9917/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9918 basic block 'tb'. If search_pc is TRUE, also generate PC
9919 information for each intermediate instruction. */
5639c3f2 9920static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 9921 TranslationBlock *tb,
5639c3f2 9922 bool search_pc)
2c0262af 9923{
ed2803da 9924 CPUState *cs = CPU(cpu);
5639c3f2 9925 CPUARMState *env = &cpu->env;
2c0262af 9926 DisasContext dc1, *dc = &dc1;
a1d1bb31 9927 CPUBreakpoint *bp;
2c0262af
FB
9928 uint16_t *gen_opc_end;
9929 int j, lj;
0fa85d43 9930 target_ulong pc_start;
b5ff1b31 9931 uint32_t next_page_start;
2e70f6ef
PB
9932 int num_insns;
9933 int max_insns;
3b46e624 9934
2c0262af 9935 /* generate intermediate code */
0fa85d43 9936 pc_start = tb->pc;
3b46e624 9937
2c0262af
FB
9938 dc->tb = tb;
9939
92414b31 9940 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9941
9942 dc->is_jmp = DISAS_NEXT;
9943 dc->pc = pc_start;
ed2803da 9944 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 9945 dc->condjmp = 0;
7204ab88 9946 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9947 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9948 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9949 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9950#if !defined(CONFIG_USER_ONLY)
61f74d6a 9951 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9952#endif
5df8bac1 9953 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9954 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9955 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9956 cpu_F0s = tcg_temp_new_i32();
9957 cpu_F1s = tcg_temp_new_i32();
9958 cpu_F0d = tcg_temp_new_i64();
9959 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9960 cpu_V0 = cpu_F0d;
9961 cpu_V1 = cpu_F1d;
e677137d 9962 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9963 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9964 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9965 lj = -1;
2e70f6ef
PB
9966 num_insns = 0;
9967 max_insns = tb->cflags & CF_COUNT_MASK;
9968 if (max_insns == 0)
9969 max_insns = CF_COUNT_MASK;
9970
806f352d 9971 gen_tb_start();
e12ce78d 9972
3849902c
PM
9973 tcg_clear_temp_count();
9974
e12ce78d
PM
9975 /* A note on handling of the condexec (IT) bits:
9976 *
9977 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9978 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9979 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9980 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9981 * to do it at the end of the block. (For example if we don't do this
9982 * it's hard to identify whether we can safely skip writing condexec
9983 * at the end of the TB, which we definitely want to do for the case
9984 * where a TB doesn't do anything with the IT state at all.)
9985 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9986 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9987 * This is done both for leaving the TB at the end, and for leaving
9988 * it because of an exception we know will happen, which is done in
9989 * gen_exception_insn(). The latter is necessary because we need to
9990 * leave the TB with the PC/IT state just prior to execution of the
9991 * instruction which caused the exception.
9992 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9993 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9994 * This is handled in the same way as restoration of the
9995 * PC in these situations: we will be called again with search_pc=1
9996 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9997 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9998 * this to restore the condexec bits.
e12ce78d
PM
9999 *
10000 * Note that there are no instructions which can read the condexec
10001 * bits, and none which can write non-static values to them, so
0ecb72a5 10002 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10003 * middle of a TB.
10004 */
10005
9ee6e8bb
PB
10006 /* Reset the conditional execution bits immediately. This avoids
10007 complications trying to do it at the end of the block. */
98eac7ca 10008 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10009 {
39d5492a 10010 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10011 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10012 store_cpu_field(tmp, condexec_bits);
8f01245e 10013 }
2c0262af 10014 do {
fbb4a2e3
PB
10015#ifdef CONFIG_USER_ONLY
10016 /* Intercept jump to the magic kernel page. */
10017 if (dc->pc >= 0xffff0000) {
10018 /* We always get here via a jump, so know we are not in a
10019 conditional execution block. */
10020 gen_exception(EXCP_KERNEL_TRAP);
10021 dc->is_jmp = DISAS_UPDATE;
10022 break;
10023 }
10024#else
9ee6e8bb
PB
10025 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10026 /* We always get here via a jump, so know we are not in a
10027 conditional execution block. */
d9ba4830 10028 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10029 dc->is_jmp = DISAS_UPDATE;
10030 break;
9ee6e8bb
PB
10031 }
10032#endif
10033
72cf2d4f
BS
10034 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10035 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10036 if (bp->pc == dc->pc) {
bc4a0de0 10037 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10038 /* Advance PC so that clearing the breakpoint will
10039 invalidate this TB. */
10040 dc->pc += 2;
10041 goto done_generating;
1fddef4b
FB
10042 }
10043 }
10044 }
2c0262af 10045 if (search_pc) {
92414b31 10046 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10047 if (lj < j) {
10048 lj++;
10049 while (lj < j)
ab1103de 10050 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10051 }
25983cad 10052 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10053 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10054 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10055 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10056 }
e50e6a20 10057
2e70f6ef
PB
10058 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10059 gen_io_start();
10060
fdefe51c 10061 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10062 tcg_gen_debug_insn_start(dc->pc);
10063 }
10064
7204ab88 10065 if (dc->thumb) {
9ee6e8bb
PB
10066 disas_thumb_insn(env, dc);
10067 if (dc->condexec_mask) {
10068 dc->condexec_cond = (dc->condexec_cond & 0xe)
10069 | ((dc->condexec_mask >> 4) & 1);
10070 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10071 if (dc->condexec_mask == 0) {
10072 dc->condexec_cond = 0;
10073 }
10074 }
10075 } else {
10076 disas_arm_insn(env, dc);
10077 }
e50e6a20
FB
10078
10079 if (dc->condjmp && !dc->is_jmp) {
10080 gen_set_label(dc->condlabel);
10081 dc->condjmp = 0;
10082 }
3849902c
PM
10083
10084 if (tcg_check_temp_count()) {
10085 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10086 }
10087
aaf2d97d 10088 /* Translation stops when a conditional branch is encountered.
e50e6a20 10089 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10090 * Also stop translation when a page boundary is reached. This
bf20dc07 10091 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10092 num_insns ++;
efd7f486 10093 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10094 !cs->singlestep_enabled &&
1b530a6d 10095 !singlestep &&
2e70f6ef
PB
10096 dc->pc < next_page_start &&
10097 num_insns < max_insns);
10098
10099 if (tb->cflags & CF_LAST_IO) {
10100 if (dc->condjmp) {
10101 /* FIXME: This can theoretically happen with self-modifying
10102 code. */
10103 cpu_abort(env, "IO on conditional branch instruction");
10104 }
10105 gen_io_end();
10106 }
9ee6e8bb 10107
b5ff1b31 10108 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10109 instruction was a conditional branch or trap, and the PC has
10110 already been written. */
ed2803da 10111 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10112 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10113 if (dc->condjmp) {
9ee6e8bb
PB
10114 gen_set_condexec(dc);
10115 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10116 gen_exception(EXCP_SWI);
9ee6e8bb 10117 } else {
d9ba4830 10118 gen_exception(EXCP_DEBUG);
9ee6e8bb 10119 }
e50e6a20
FB
10120 gen_set_label(dc->condlabel);
10121 }
10122 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 10123 gen_set_pc_im(dc->pc);
e50e6a20 10124 dc->condjmp = 0;
8aaca4c0 10125 }
9ee6e8bb
PB
10126 gen_set_condexec(dc);
10127 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10128 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10129 } else {
10130 /* FIXME: Single stepping a WFI insn will not halt
10131 the CPU. */
d9ba4830 10132 gen_exception(EXCP_DEBUG);
9ee6e8bb 10133 }
8aaca4c0 10134 } else {
9ee6e8bb
PB
10135 /* While branches must always occur at the end of an IT block,
10136 there are a few other things that can cause us to terminate
65626741 10137 the TB in the middle of an IT block:
9ee6e8bb
PB
10138 - Exception generating instructions (bkpt, swi, undefined).
10139 - Page boundaries.
10140 - Hardware watchpoints.
10141 Hardware breakpoints have already been handled and skip this code.
10142 */
10143 gen_set_condexec(dc);
8aaca4c0 10144 switch(dc->is_jmp) {
8aaca4c0 10145 case DISAS_NEXT:
6e256c93 10146 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10147 break;
10148 default:
10149 case DISAS_JUMP:
10150 case DISAS_UPDATE:
10151 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10152 tcg_gen_exit_tb(0);
8aaca4c0
FB
10153 break;
10154 case DISAS_TB_JUMP:
10155 /* nothing more to generate */
10156 break;
9ee6e8bb 10157 case DISAS_WFI:
1ce94f81 10158 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10159 break;
10160 case DISAS_SWI:
d9ba4830 10161 gen_exception(EXCP_SWI);
9ee6e8bb 10162 break;
8aaca4c0 10163 }
e50e6a20
FB
10164 if (dc->condjmp) {
10165 gen_set_label(dc->condlabel);
9ee6e8bb 10166 gen_set_condexec(dc);
6e256c93 10167 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10168 dc->condjmp = 0;
10169 }
2c0262af 10170 }
2e70f6ef 10171
9ee6e8bb 10172done_generating:
806f352d 10173 gen_tb_end(tb, num_insns);
efd7f486 10174 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10175
10176#ifdef DEBUG_DISAS
8fec2b8c 10177 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10178 qemu_log("----------------\n");
10179 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10180 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10181 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10182 qemu_log("\n");
2c0262af
FB
10183 }
10184#endif
b5ff1b31 10185 if (search_pc) {
92414b31 10186 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10187 lj++;
10188 while (lj <= j)
ab1103de 10189 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10190 } else {
2c0262af 10191 tb->size = dc->pc - pc_start;
2e70f6ef 10192 tb->icount = num_insns;
b5ff1b31 10193 }
2c0262af
FB
10194}
10195
0ecb72a5 10196void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10197{
5639c3f2 10198 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10199}
10200
0ecb72a5 10201void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10202{
5639c3f2 10203 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10204}
10205
b5ff1b31
FB
10206static const char *cpu_mode_names[16] = {
10207 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10208 "???", "???", "???", "und", "???", "???", "???", "sys"
10209};
9ee6e8bb 10210
878096ee
AF
10211void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10212 int flags)
2c0262af 10213{
878096ee
AF
10214 ARMCPU *cpu = ARM_CPU(cs);
10215 CPUARMState *env = &cpu->env;
2c0262af 10216 int i;
b5ff1b31 10217 uint32_t psr;
2c0262af
FB
10218
10219 for(i=0;i<16;i++) {
7fe48483 10220 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10221 if ((i % 4) == 3)
7fe48483 10222 cpu_fprintf(f, "\n");
2c0262af 10223 else
7fe48483 10224 cpu_fprintf(f, " ");
2c0262af 10225 }
b5ff1b31 10226 psr = cpsr_read(env);
687fa640
TS
10227 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10228 psr,
b5ff1b31
FB
10229 psr & (1 << 31) ? 'N' : '-',
10230 psr & (1 << 30) ? 'Z' : '-',
10231 psr & (1 << 29) ? 'C' : '-',
10232 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10233 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10234 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10235
f2617cfc
PM
10236 if (flags & CPU_DUMP_FPU) {
10237 int numvfpregs = 0;
10238 if (arm_feature(env, ARM_FEATURE_VFP)) {
10239 numvfpregs += 16;
10240 }
10241 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10242 numvfpregs += 16;
10243 }
10244 for (i = 0; i < numvfpregs; i++) {
10245 uint64_t v = float64_val(env->vfp.regs[i]);
10246 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10247 i * 2, (uint32_t)v,
10248 i * 2 + 1, (uint32_t)(v >> 32),
10249 i, v);
10250 }
10251 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10252 }
2c0262af 10253}
a6b025d3 10254
0ecb72a5 10255void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10256{
25983cad 10257 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10258 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10259}