]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
hw/i386/Makefile.obj: use $(PYTHON) to run .py scripts consistently
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
534df156 31#include "qemu/bitops.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 46#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
f570c61e 50#include "translate.h"
e12ce78d
PM
51static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
9ee6e8bb 59/* These instructions trap after executing, so defer them until after the
b90372ad 60 conditional execution state has been updated. */
9ee6e8bb
PB
61#define DISAS_WFI 4
62#define DISAS_SWI 5
2c0262af 63
3407ad0e 64TCGv_ptr cpu_env;
ad69471c 65/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 66static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 67static TCGv_i32 cpu_R[16];
66c374de 68static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_addr;
70static TCGv_i32 cpu_exclusive_val;
71static TCGv_i32 cpu_exclusive_high;
72#ifdef CONFIG_USER_ONLY
73static TCGv_i32 cpu_exclusive_test;
74static TCGv_i32 cpu_exclusive_info;
75#endif
ad69471c 76
b26eefb6 77/* FIXME: These should be removed. */
39d5492a 78static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 79static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 80
022c62cb 81#include "exec/gen-icount.h"
2e70f6ef 82
155c3eac
FN
83static const char *regnames[] =
84 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
85 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
86
b26eefb6
PB
87/* initialize TCG globals. */
88void arm_translate_init(void)
89{
155c3eac
FN
90 int i;
91
a7812ae4
PB
92 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93
155c3eac
FN
94 for (i = 0; i < 16; i++) {
95 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 96 offsetof(CPUARMState, regs[i]),
155c3eac
FN
97 regnames[i]);
98 }
66c374de
AJ
99 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
100 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
101 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
102 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
103
426f5abc 104 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 106 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 108 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 109 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
110#ifdef CONFIG_USER_ONLY
111 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 112 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 113 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 114 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 115#endif
155c3eac 116
14ade10f 117 a64_translate_init();
b26eefb6
PB
118}
119
39d5492a 120static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 121{
39d5492a 122 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
123 tcg_gen_ld_i32(tmp, cpu_env, offset);
124 return tmp;
125}
126
0ecb72a5 127#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 128
39d5492a 129static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
130{
131 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 132 tcg_temp_free_i32(var);
d9ba4830
PB
133}
134
135#define store_cpu_field(var, name) \
0ecb72a5 136 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 137
b26eefb6 138/* Set a variable to the value of a CPU register. */
39d5492a 139static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
140{
141 if (reg == 15) {
142 uint32_t addr;
b90372ad 143 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
144 if (s->thumb)
145 addr = (long)s->pc + 2;
146 else
147 addr = (long)s->pc + 4;
148 tcg_gen_movi_i32(var, addr);
149 } else {
155c3eac 150 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
151 }
152}
153
154/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 155static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 156{
39d5492a 157 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
158 load_reg_var(s, tmp, reg);
159 return tmp;
160}
161
162/* Set a CPU register. The source must be a temporary and will be
163 marked as dead. */
39d5492a 164static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
165{
166 if (reg == 15) {
167 tcg_gen_andi_i32(var, var, ~1);
168 s->is_jmp = DISAS_JUMP;
169 }
155c3eac 170 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 171 tcg_temp_free_i32(var);
b26eefb6
PB
172}
173
b26eefb6 174/* Value extensions. */
86831435
PB
175#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
176#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
177#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
178#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
179
1497c961
PB
180#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
181#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 182
b26eefb6 183
39d5492a 184static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 185{
39d5492a 186 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 187 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
188 tcg_temp_free_i32(tmp_mask);
189}
d9ba4830
PB
190/* Set NZCV flags from the high 4 bits of var. */
191#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
192
193static void gen_exception(int excp)
194{
39d5492a 195 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 196 tcg_gen_movi_i32(tmp, excp);
1ce94f81 197 gen_helper_exception(cpu_env, tmp);
7d1b0095 198 tcg_temp_free_i32(tmp);
d9ba4830
PB
199}
200
39d5492a 201static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 202{
39d5492a
PM
203 TCGv_i32 tmp1 = tcg_temp_new_i32();
204 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
205 tcg_gen_ext16s_i32(tmp1, a);
206 tcg_gen_ext16s_i32(tmp2, b);
3670669c 207 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 208 tcg_temp_free_i32(tmp2);
3670669c
PB
209 tcg_gen_sari_i32(a, a, 16);
210 tcg_gen_sari_i32(b, b, 16);
211 tcg_gen_mul_i32(b, b, a);
212 tcg_gen_mov_i32(a, tmp1);
7d1b0095 213 tcg_temp_free_i32(tmp1);
3670669c
PB
214}
215
216/* Byteswap each halfword. */
39d5492a 217static void gen_rev16(TCGv_i32 var)
3670669c 218{
39d5492a 219 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
220 tcg_gen_shri_i32(tmp, var, 8);
221 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
222 tcg_gen_shli_i32(var, var, 8);
223 tcg_gen_andi_i32(var, var, 0xff00ff00);
224 tcg_gen_or_i32(var, var, tmp);
7d1b0095 225 tcg_temp_free_i32(tmp);
3670669c
PB
226}
227
228/* Byteswap low halfword and sign extend. */
39d5492a 229static void gen_revsh(TCGv_i32 var)
3670669c 230{
1a855029
AJ
231 tcg_gen_ext16u_i32(var, var);
232 tcg_gen_bswap16_i32(var, var);
233 tcg_gen_ext16s_i32(var, var);
3670669c
PB
234}
235
236/* Unsigned bitfield extract. */
39d5492a 237static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
238{
239 if (shift)
240 tcg_gen_shri_i32(var, var, shift);
241 tcg_gen_andi_i32(var, var, mask);
242}
243
244/* Signed bitfield extract. */
39d5492a 245static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
246{
247 uint32_t signbit;
248
249 if (shift)
250 tcg_gen_sari_i32(var, var, shift);
251 if (shift + width < 32) {
252 signbit = 1u << (width - 1);
253 tcg_gen_andi_i32(var, var, (1u << width) - 1);
254 tcg_gen_xori_i32(var, var, signbit);
255 tcg_gen_subi_i32(var, var, signbit);
256 }
257}
258
838fa72d 259/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 260static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 261{
838fa72d
AJ
262 TCGv_i64 tmp64 = tcg_temp_new_i64();
263
264 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 265 tcg_temp_free_i32(b);
838fa72d
AJ
266 tcg_gen_shli_i64(tmp64, tmp64, 32);
267 tcg_gen_add_i64(a, tmp64, a);
268
269 tcg_temp_free_i64(tmp64);
270 return a;
271}
272
273/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 274static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
275{
276 TCGv_i64 tmp64 = tcg_temp_new_i64();
277
278 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 279 tcg_temp_free_i32(b);
838fa72d
AJ
280 tcg_gen_shli_i64(tmp64, tmp64, 32);
281 tcg_gen_sub_i64(a, tmp64, a);
282
283 tcg_temp_free_i64(tmp64);
284 return a;
3670669c
PB
285}
286
5e3f878a 287/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 288static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 289{
39d5492a
PM
290 TCGv_i32 lo = tcg_temp_new_i32();
291 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 292 TCGv_i64 ret;
5e3f878a 293
831d7fe8 294 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 295 tcg_temp_free_i32(a);
7d1b0095 296 tcg_temp_free_i32(b);
831d7fe8
RH
297
298 ret = tcg_temp_new_i64();
299 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
300 tcg_temp_free_i32(lo);
301 tcg_temp_free_i32(hi);
831d7fe8
RH
302
303 return ret;
5e3f878a
PB
304}
305
39d5492a 306static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 307{
39d5492a
PM
308 TCGv_i32 lo = tcg_temp_new_i32();
309 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 310 TCGv_i64 ret;
5e3f878a 311
831d7fe8 312 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 313 tcg_temp_free_i32(a);
7d1b0095 314 tcg_temp_free_i32(b);
831d7fe8
RH
315
316 ret = tcg_temp_new_i64();
317 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
318 tcg_temp_free_i32(lo);
319 tcg_temp_free_i32(hi);
831d7fe8
RH
320
321 return ret;
5e3f878a
PB
322}
323
8f01245e 324/* Swap low and high halfwords. */
39d5492a 325static void gen_swap_half(TCGv_i32 var)
8f01245e 326{
39d5492a 327 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
328 tcg_gen_shri_i32(tmp, var, 16);
329 tcg_gen_shli_i32(var, var, 16);
330 tcg_gen_or_i32(var, var, tmp);
7d1b0095 331 tcg_temp_free_i32(tmp);
8f01245e
PB
332}
333
b26eefb6
PB
334/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
335 tmp = (t0 ^ t1) & 0x8000;
336 t0 &= ~0x8000;
337 t1 &= ~0x8000;
338 t0 = (t0 + t1) ^ tmp;
339 */
340
39d5492a 341static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 342{
39d5492a 343 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
344 tcg_gen_xor_i32(tmp, t0, t1);
345 tcg_gen_andi_i32(tmp, tmp, 0x8000);
346 tcg_gen_andi_i32(t0, t0, ~0x8000);
347 tcg_gen_andi_i32(t1, t1, ~0x8000);
348 tcg_gen_add_i32(t0, t0, t1);
349 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
350 tcg_temp_free_i32(tmp);
351 tcg_temp_free_i32(t1);
b26eefb6
PB
352}
353
354/* Set CF to the top bit of var. */
39d5492a 355static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 356{
66c374de 357 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
358}
359
360/* Set N and Z flags from var. */
39d5492a 361static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 362{
66c374de
AJ
363 tcg_gen_mov_i32(cpu_NF, var);
364 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
365}
366
367/* T0 += T1 + CF. */
39d5492a 368static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 369{
396e467c 370 tcg_gen_add_i32(t0, t0, t1);
66c374de 371 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
372}
373
e9bb4aa9 374/* dest = T0 + T1 + CF. */
39d5492a 375static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 376{
e9bb4aa9 377 tcg_gen_add_i32(dest, t0, t1);
66c374de 378 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
379}
380
3670669c 381/* dest = T0 - T1 + CF - 1. */
39d5492a 382static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 383{
3670669c 384 tcg_gen_sub_i32(dest, t0, t1);
66c374de 385 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 386 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
387}
388
72485ec4 389/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 390static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 391{
39d5492a 392 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
393 tcg_gen_movi_i32(tmp, 0);
394 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 395 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 396 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
397 tcg_gen_xor_i32(tmp, t0, t1);
398 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
399 tcg_temp_free_i32(tmp);
400 tcg_gen_mov_i32(dest, cpu_NF);
401}
402
49b4c31e 403/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 404static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 405{
39d5492a 406 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
407 if (TCG_TARGET_HAS_add2_i32) {
408 tcg_gen_movi_i32(tmp, 0);
409 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 410 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
411 } else {
412 TCGv_i64 q0 = tcg_temp_new_i64();
413 TCGv_i64 q1 = tcg_temp_new_i64();
414 tcg_gen_extu_i32_i64(q0, t0);
415 tcg_gen_extu_i32_i64(q1, t1);
416 tcg_gen_add_i64(q0, q0, q1);
417 tcg_gen_extu_i32_i64(q1, cpu_CF);
418 tcg_gen_add_i64(q0, q0, q1);
419 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
420 tcg_temp_free_i64(q0);
421 tcg_temp_free_i64(q1);
422 }
423 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
424 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
425 tcg_gen_xor_i32(tmp, t0, t1);
426 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
427 tcg_temp_free_i32(tmp);
428 tcg_gen_mov_i32(dest, cpu_NF);
429}
430
72485ec4 431/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 432static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 433{
39d5492a 434 TCGv_i32 tmp;
72485ec4
AJ
435 tcg_gen_sub_i32(cpu_NF, t0, t1);
436 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
437 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
438 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
439 tmp = tcg_temp_new_i32();
440 tcg_gen_xor_i32(tmp, t0, t1);
441 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
442 tcg_temp_free_i32(tmp);
443 tcg_gen_mov_i32(dest, cpu_NF);
444}
445
e77f0832 446/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 447static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 448{
39d5492a 449 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
450 tcg_gen_not_i32(tmp, t1);
451 gen_adc_CC(dest, t0, tmp);
39d5492a 452 tcg_temp_free_i32(tmp);
2de68a49
RH
453}
454
365af80e 455#define GEN_SHIFT(name) \
39d5492a 456static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 457{ \
39d5492a 458 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
459 tmp1 = tcg_temp_new_i32(); \
460 tcg_gen_andi_i32(tmp1, t1, 0xff); \
461 tmp2 = tcg_const_i32(0); \
462 tmp3 = tcg_const_i32(0x1f); \
463 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
464 tcg_temp_free_i32(tmp3); \
465 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
466 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
467 tcg_temp_free_i32(tmp2); \
468 tcg_temp_free_i32(tmp1); \
469}
470GEN_SHIFT(shl)
471GEN_SHIFT(shr)
472#undef GEN_SHIFT
473
39d5492a 474static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 475{
39d5492a 476 TCGv_i32 tmp1, tmp2;
365af80e
AJ
477 tmp1 = tcg_temp_new_i32();
478 tcg_gen_andi_i32(tmp1, t1, 0xff);
479 tmp2 = tcg_const_i32(0x1f);
480 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
481 tcg_temp_free_i32(tmp2);
482 tcg_gen_sar_i32(dest, t0, tmp1);
483 tcg_temp_free_i32(tmp1);
484}
485
39d5492a 486static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 487{
39d5492a
PM
488 TCGv_i32 c0 = tcg_const_i32(0);
489 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
490 tcg_gen_neg_i32(tmp, src);
491 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
492 tcg_temp_free_i32(c0);
493 tcg_temp_free_i32(tmp);
494}
ad69471c 495
39d5492a 496static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 497{
9a119ff6 498 if (shift == 0) {
66c374de 499 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 500 } else {
66c374de
AJ
501 tcg_gen_shri_i32(cpu_CF, var, shift);
502 if (shift != 31) {
503 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
504 }
9a119ff6 505 }
9a119ff6 506}
b26eefb6 507
9a119ff6 508/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
509static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
510 int shift, int flags)
9a119ff6
PB
511{
512 switch (shiftop) {
513 case 0: /* LSL */
514 if (shift != 0) {
515 if (flags)
516 shifter_out_im(var, 32 - shift);
517 tcg_gen_shli_i32(var, var, shift);
518 }
519 break;
520 case 1: /* LSR */
521 if (shift == 0) {
522 if (flags) {
66c374de 523 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
524 }
525 tcg_gen_movi_i32(var, 0);
526 } else {
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 tcg_gen_shri_i32(var, var, shift);
530 }
531 break;
532 case 2: /* ASR */
533 if (shift == 0)
534 shift = 32;
535 if (flags)
536 shifter_out_im(var, shift - 1);
537 if (shift == 32)
538 shift = 31;
539 tcg_gen_sari_i32(var, var, shift);
540 break;
541 case 3: /* ROR/RRX */
542 if (shift != 0) {
543 if (flags)
544 shifter_out_im(var, shift - 1);
f669df27 545 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 546 } else {
39d5492a 547 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 548 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
549 if (flags)
550 shifter_out_im(var, 0);
551 tcg_gen_shri_i32(var, var, 1);
b26eefb6 552 tcg_gen_or_i32(var, var, tmp);
7d1b0095 553 tcg_temp_free_i32(tmp);
b26eefb6
PB
554 }
555 }
556};
557
39d5492a
PM
558static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
559 TCGv_i32 shift, int flags)
8984bd2e
PB
560{
561 if (flags) {
562 switch (shiftop) {
9ef39277
BS
563 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
564 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
565 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
566 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
567 }
568 } else {
569 switch (shiftop) {
365af80e
AJ
570 case 0:
571 gen_shl(var, var, shift);
572 break;
573 case 1:
574 gen_shr(var, var, shift);
575 break;
576 case 2:
577 gen_sar(var, var, shift);
578 break;
f669df27
AJ
579 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
580 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
581 }
582 }
7d1b0095 583 tcg_temp_free_i32(shift);
8984bd2e
PB
584}
585
6ddbc6e4
PB
586#define PAS_OP(pfx) \
587 switch (op2) { \
588 case 0: gen_pas_helper(glue(pfx,add16)); break; \
589 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
590 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
591 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
592 case 4: gen_pas_helper(glue(pfx,add8)); break; \
593 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
594 }
39d5492a 595static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 596{
a7812ae4 597 TCGv_ptr tmp;
6ddbc6e4
PB
598
599 switch (op1) {
600#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
601 case 1:
a7812ae4 602 tmp = tcg_temp_new_ptr();
0ecb72a5 603 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 604 PAS_OP(s)
b75263d6 605 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
606 break;
607 case 5:
a7812ae4 608 tmp = tcg_temp_new_ptr();
0ecb72a5 609 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 610 PAS_OP(u)
b75263d6 611 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
612 break;
613#undef gen_pas_helper
614#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
615 case 2:
616 PAS_OP(q);
617 break;
618 case 3:
619 PAS_OP(sh);
620 break;
621 case 6:
622 PAS_OP(uq);
623 break;
624 case 7:
625 PAS_OP(uh);
626 break;
627#undef gen_pas_helper
628 }
629}
9ee6e8bb
PB
630#undef PAS_OP
631
6ddbc6e4
PB
632/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
633#define PAS_OP(pfx) \
ed89a2f1 634 switch (op1) { \
6ddbc6e4
PB
635 case 0: gen_pas_helper(glue(pfx,add8)); break; \
636 case 1: gen_pas_helper(glue(pfx,add16)); break; \
637 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
638 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
639 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
640 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
641 }
39d5492a 642static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 643{
a7812ae4 644 TCGv_ptr tmp;
6ddbc6e4 645
ed89a2f1 646 switch (op2) {
6ddbc6e4
PB
647#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
648 case 0:
a7812ae4 649 tmp = tcg_temp_new_ptr();
0ecb72a5 650 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 651 PAS_OP(s)
b75263d6 652 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
653 break;
654 case 4:
a7812ae4 655 tmp = tcg_temp_new_ptr();
0ecb72a5 656 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 657 PAS_OP(u)
b75263d6 658 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
659 break;
660#undef gen_pas_helper
661#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
662 case 1:
663 PAS_OP(q);
664 break;
665 case 2:
666 PAS_OP(sh);
667 break;
668 case 5:
669 PAS_OP(uq);
670 break;
671 case 6:
672 PAS_OP(uh);
673 break;
674#undef gen_pas_helper
675 }
676}
9ee6e8bb
PB
677#undef PAS_OP
678
d9ba4830
PB
679static void gen_test_cc(int cc, int label)
680{
39d5492a 681 TCGv_i32 tmp;
d9ba4830
PB
682 int inv;
683
d9ba4830
PB
684 switch (cc) {
685 case 0: /* eq: Z */
66c374de 686 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
687 break;
688 case 1: /* ne: !Z */
66c374de 689 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
690 break;
691 case 2: /* cs: C */
66c374de 692 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
693 break;
694 case 3: /* cc: !C */
66c374de 695 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
696 break;
697 case 4: /* mi: N */
66c374de 698 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
699 break;
700 case 5: /* pl: !N */
66c374de 701 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
702 break;
703 case 6: /* vs: V */
66c374de 704 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
705 break;
706 case 7: /* vc: !V */
66c374de 707 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
708 break;
709 case 8: /* hi: C && !Z */
710 inv = gen_new_label();
66c374de
AJ
711 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
712 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
713 gen_set_label(inv);
714 break;
715 case 9: /* ls: !C || Z */
66c374de
AJ
716 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
718 break;
719 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
720 tmp = tcg_temp_new_i32();
721 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 722 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 723 tcg_temp_free_i32(tmp);
d9ba4830
PB
724 break;
725 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
726 tmp = tcg_temp_new_i32();
727 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 728 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 729 tcg_temp_free_i32(tmp);
d9ba4830
PB
730 break;
731 case 12: /* gt: !Z && N == V */
732 inv = gen_new_label();
66c374de
AJ
733 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
734 tmp = tcg_temp_new_i32();
735 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 737 tcg_temp_free_i32(tmp);
d9ba4830
PB
738 gen_set_label(inv);
739 break;
740 case 13: /* le: Z || N != V */
66c374de
AJ
741 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
742 tmp = tcg_temp_new_i32();
743 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 745 tcg_temp_free_i32(tmp);
d9ba4830
PB
746 break;
747 default:
748 fprintf(stderr, "Bad condition code 0x%x\n", cc);
749 abort();
750 }
d9ba4830 751}
2c0262af 752
b1d8e52e 753static const uint8_t table_logic_cc[16] = {
2c0262af
FB
754 1, /* and */
755 1, /* xor */
756 0, /* sub */
757 0, /* rsb */
758 0, /* add */
759 0, /* adc */
760 0, /* sbc */
761 0, /* rsc */
762 1, /* andl */
763 1, /* xorl */
764 0, /* cmp */
765 0, /* cmn */
766 1, /* orr */
767 1, /* mov */
768 1, /* bic */
769 1, /* mvn */
770};
3b46e624 771
d9ba4830
PB
772/* Set PC and Thumb state from an immediate address. */
773static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 774{
39d5492a 775 TCGv_i32 tmp;
99c475ab 776
b26eefb6 777 s->is_jmp = DISAS_UPDATE;
d9ba4830 778 if (s->thumb != (addr & 1)) {
7d1b0095 779 tmp = tcg_temp_new_i32();
d9ba4830 780 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 781 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 782 tcg_temp_free_i32(tmp);
d9ba4830 783 }
155c3eac 784 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
785}
786
787/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 788static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 789{
d9ba4830 790 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
791 tcg_gen_andi_i32(cpu_R[15], var, ~1);
792 tcg_gen_andi_i32(var, var, 1);
793 store_cpu_field(var, thumb);
d9ba4830
PB
794}
795
21aeb343
JR
796/* Variant of store_reg which uses branch&exchange logic when storing
797 to r15 in ARM architecture v7 and above. The source must be a temporary
798 and will be marked as dead. */
0ecb72a5 799static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 800 int reg, TCGv_i32 var)
21aeb343
JR
801{
802 if (reg == 15 && ENABLE_ARCH_7) {
803 gen_bx(s, var);
804 } else {
805 store_reg(s, reg, var);
806 }
807}
808
be5e7a76
DES
809/* Variant of store_reg which uses branch&exchange logic when storing
810 * to r15 in ARM architecture v5T and above. This is used for storing
811 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
812 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 813static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 814 int reg, TCGv_i32 var)
be5e7a76
DES
815{
816 if (reg == 15 && ENABLE_ARCH_5) {
817 gen_bx(s, var);
818 } else {
819 store_reg(s, reg, var);
820 }
821}
822
08307563
PM
823/* Abstractions of "generate code to do a guest load/store for
824 * AArch32", where a vaddr is always 32 bits (and is zero
825 * extended if we're a 64 bit core) and data is also
826 * 32 bits unless specifically doing a 64 bit access.
827 * These functions work like tcg_gen_qemu_{ld,st}* except
828 * that their arguments are TCGv_i32 rather than TCGv.
829 */
830#if TARGET_LONG_BITS == 32
831
832#define DO_GEN_LD(OP) \
833static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
834{ \
835 tcg_gen_qemu_##OP(val, addr, index); \
836}
837
838#define DO_GEN_ST(OP) \
839static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
840{ \
841 tcg_gen_qemu_##OP(val, addr, index); \
842}
843
844static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
845{
846 tcg_gen_qemu_ld64(val, addr, index);
847}
848
849static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
850{
851 tcg_gen_qemu_st64(val, addr, index);
852}
853
854#else
855
856#define DO_GEN_LD(OP) \
857static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
858{ \
859 TCGv addr64 = tcg_temp_new(); \
860 TCGv val64 = tcg_temp_new(); \
861 tcg_gen_extu_i32_i64(addr64, addr); \
862 tcg_gen_qemu_##OP(val64, addr64, index); \
863 tcg_temp_free(addr64); \
864 tcg_gen_trunc_i64_i32(val, val64); \
865 tcg_temp_free(val64); \
866}
867
868#define DO_GEN_ST(OP) \
869static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
870{ \
871 TCGv addr64 = tcg_temp_new(); \
872 TCGv val64 = tcg_temp_new(); \
873 tcg_gen_extu_i32_i64(addr64, addr); \
874 tcg_gen_extu_i32_i64(val64, val); \
875 tcg_gen_qemu_##OP(val64, addr64, index); \
876 tcg_temp_free(addr64); \
877 tcg_temp_free(val64); \
878}
879
880static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
881{
882 TCGv addr64 = tcg_temp_new();
883 tcg_gen_extu_i32_i64(addr64, addr);
884 tcg_gen_qemu_ld64(val, addr64, index);
885 tcg_temp_free(addr64);
886}
887
888static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
889{
890 TCGv addr64 = tcg_temp_new();
891 tcg_gen_extu_i32_i64(addr64, addr);
892 tcg_gen_qemu_st64(val, addr64, index);
893 tcg_temp_free(addr64);
894}
895
896#endif
897
898DO_GEN_LD(ld8s)
899DO_GEN_LD(ld8u)
900DO_GEN_LD(ld16s)
901DO_GEN_LD(ld16u)
902DO_GEN_LD(ld32u)
903DO_GEN_ST(st8)
904DO_GEN_ST(st16)
905DO_GEN_ST(st32)
906
eaed129d 907static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 908{
14ade10f
AG
909 if (s->aarch64) {
910 gen_a64_set_pc_im(val);
911 } else {
912 tcg_gen_movi_i32(cpu_R[15], val);
913 }
5e3f878a
PB
914}
915
b5ff1b31
FB
916/* Force a TB lookup after an instruction that changes the CPU state. */
917static inline void gen_lookup_tb(DisasContext *s)
918{
a6445c52 919 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
920 s->is_jmp = DISAS_UPDATE;
921}
922
b0109805 923static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 924 TCGv_i32 var)
2c0262af 925{
1e8d4eec 926 int val, rm, shift, shiftop;
39d5492a 927 TCGv_i32 offset;
2c0262af
FB
928
929 if (!(insn & (1 << 25))) {
930 /* immediate */
931 val = insn & 0xfff;
932 if (!(insn & (1 << 23)))
933 val = -val;
537730b9 934 if (val != 0)
b0109805 935 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
936 } else {
937 /* shift/register */
938 rm = (insn) & 0xf;
939 shift = (insn >> 7) & 0x1f;
1e8d4eec 940 shiftop = (insn >> 5) & 3;
b26eefb6 941 offset = load_reg(s, rm);
9a119ff6 942 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 943 if (!(insn & (1 << 23)))
b0109805 944 tcg_gen_sub_i32(var, var, offset);
2c0262af 945 else
b0109805 946 tcg_gen_add_i32(var, var, offset);
7d1b0095 947 tcg_temp_free_i32(offset);
2c0262af
FB
948 }
949}
950
191f9a93 951static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 952 int extra, TCGv_i32 var)
2c0262af
FB
953{
954 int val, rm;
39d5492a 955 TCGv_i32 offset;
3b46e624 956
2c0262af
FB
957 if (insn & (1 << 22)) {
958 /* immediate */
959 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
960 if (!(insn & (1 << 23)))
961 val = -val;
18acad92 962 val += extra;
537730b9 963 if (val != 0)
b0109805 964 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
965 } else {
966 /* register */
191f9a93 967 if (extra)
b0109805 968 tcg_gen_addi_i32(var, var, extra);
2c0262af 969 rm = (insn) & 0xf;
b26eefb6 970 offset = load_reg(s, rm);
2c0262af 971 if (!(insn & (1 << 23)))
b0109805 972 tcg_gen_sub_i32(var, var, offset);
2c0262af 973 else
b0109805 974 tcg_gen_add_i32(var, var, offset);
7d1b0095 975 tcg_temp_free_i32(offset);
2c0262af
FB
976 }
977}
978
5aaebd13
PM
979static TCGv_ptr get_fpstatus_ptr(int neon)
980{
981 TCGv_ptr statusptr = tcg_temp_new_ptr();
982 int offset;
983 if (neon) {
0ecb72a5 984 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 985 } else {
0ecb72a5 986 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
987 }
988 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
989 return statusptr;
990}
991
4373f3ce
PB
992#define VFP_OP2(name) \
993static inline void gen_vfp_##name(int dp) \
994{ \
ae1857ec
PM
995 TCGv_ptr fpst = get_fpstatus_ptr(0); \
996 if (dp) { \
997 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
998 } else { \
999 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1000 } \
1001 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1002}
1003
4373f3ce
PB
1004VFP_OP2(add)
1005VFP_OP2(sub)
1006VFP_OP2(mul)
1007VFP_OP2(div)
1008
1009#undef VFP_OP2
1010
605a6aed
PM
1011static inline void gen_vfp_F1_mul(int dp)
1012{
1013 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1014 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1015 if (dp) {
ae1857ec 1016 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1017 } else {
ae1857ec 1018 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1019 }
ae1857ec 1020 tcg_temp_free_ptr(fpst);
605a6aed
PM
1021}
1022
1023static inline void gen_vfp_F1_neg(int dp)
1024{
1025 /* Like gen_vfp_neg() but put result in F1 */
1026 if (dp) {
1027 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1028 } else {
1029 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1030 }
1031}
1032
4373f3ce
PB
1033static inline void gen_vfp_abs(int dp)
1034{
1035 if (dp)
1036 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1037 else
1038 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1039}
1040
1041static inline void gen_vfp_neg(int dp)
1042{
1043 if (dp)
1044 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1045 else
1046 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1047}
1048
1049static inline void gen_vfp_sqrt(int dp)
1050{
1051 if (dp)
1052 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1053 else
1054 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1055}
1056
1057static inline void gen_vfp_cmp(int dp)
1058{
1059 if (dp)
1060 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1061 else
1062 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1063}
1064
1065static inline void gen_vfp_cmpe(int dp)
1066{
1067 if (dp)
1068 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1069 else
1070 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1071}
1072
1073static inline void gen_vfp_F1_ld0(int dp)
1074{
1075 if (dp)
5b340b51 1076 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1077 else
5b340b51 1078 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1079}
1080
5500b06c
PM
1081#define VFP_GEN_ITOF(name) \
1082static inline void gen_vfp_##name(int dp, int neon) \
1083{ \
5aaebd13 1084 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1085 if (dp) { \
1086 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1087 } else { \
1088 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1089 } \
b7fa9214 1090 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1091}
1092
5500b06c
PM
1093VFP_GEN_ITOF(uito)
1094VFP_GEN_ITOF(sito)
1095#undef VFP_GEN_ITOF
4373f3ce 1096
5500b06c
PM
1097#define VFP_GEN_FTOI(name) \
1098static inline void gen_vfp_##name(int dp, int neon) \
1099{ \
5aaebd13 1100 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1101 if (dp) { \
1102 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1103 } else { \
1104 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1105 } \
b7fa9214 1106 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1107}
1108
5500b06c
PM
1109VFP_GEN_FTOI(toui)
1110VFP_GEN_FTOI(touiz)
1111VFP_GEN_FTOI(tosi)
1112VFP_GEN_FTOI(tosiz)
1113#undef VFP_GEN_FTOI
4373f3ce
PB
1114
1115#define VFP_GEN_FIX(name) \
5500b06c 1116static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1117{ \
39d5492a 1118 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1119 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1120 if (dp) { \
1121 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1122 } else { \
1123 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1124 } \
b75263d6 1125 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1126 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1127}
4373f3ce
PB
1128VFP_GEN_FIX(tosh)
1129VFP_GEN_FIX(tosl)
1130VFP_GEN_FIX(touh)
1131VFP_GEN_FIX(toul)
1132VFP_GEN_FIX(shto)
1133VFP_GEN_FIX(slto)
1134VFP_GEN_FIX(uhto)
1135VFP_GEN_FIX(ulto)
1136#undef VFP_GEN_FIX
9ee6e8bb 1137
39d5492a 1138static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1139{
08307563
PM
1140 if (dp) {
1141 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1142 } else {
1143 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1144 }
b5ff1b31
FB
1145}
1146
39d5492a 1147static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1148{
08307563
PM
1149 if (dp) {
1150 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1151 } else {
1152 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1153 }
b5ff1b31
FB
1154}
1155
8e96005d
FB
1156static inline long
1157vfp_reg_offset (int dp, int reg)
1158{
1159 if (dp)
1160 return offsetof(CPUARMState, vfp.regs[reg]);
1161 else if (reg & 1) {
1162 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1163 + offsetof(CPU_DoubleU, l.upper);
1164 } else {
1165 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1166 + offsetof(CPU_DoubleU, l.lower);
1167 }
1168}
9ee6e8bb
PB
1169
1170/* Return the offset of a 32-bit piece of a NEON register.
1171 zero is the least significant end of the register. */
1172static inline long
1173neon_reg_offset (int reg, int n)
1174{
1175 int sreg;
1176 sreg = reg * 2 + n;
1177 return vfp_reg_offset(0, sreg);
1178}
1179
39d5492a 1180static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1181{
39d5492a 1182 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1183 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1184 return tmp;
1185}
1186
39d5492a 1187static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1188{
1189 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1190 tcg_temp_free_i32(var);
8f8e3aa4
PB
1191}
1192
a7812ae4 1193static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1194{
1195 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1196}
1197
a7812ae4 1198static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1199{
1200 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1201}
1202
4373f3ce
PB
1203#define tcg_gen_ld_f32 tcg_gen_ld_i32
1204#define tcg_gen_ld_f64 tcg_gen_ld_i64
1205#define tcg_gen_st_f32 tcg_gen_st_i32
1206#define tcg_gen_st_f64 tcg_gen_st_i64
1207
b7bcbe95
FB
1208static inline void gen_mov_F0_vreg(int dp, int reg)
1209{
1210 if (dp)
4373f3ce 1211 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1212 else
4373f3ce 1213 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1214}
1215
1216static inline void gen_mov_F1_vreg(int dp, int reg)
1217{
1218 if (dp)
4373f3ce 1219 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1220 else
4373f3ce 1221 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1222}
1223
1224static inline void gen_mov_vreg_F0(int dp, int reg)
1225{
1226 if (dp)
4373f3ce 1227 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1228 else
4373f3ce 1229 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1230}
1231
18c9b560
AZ
1232#define ARM_CP_RW_BIT (1 << 20)
1233
a7812ae4 1234static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1235{
0ecb72a5 1236 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1237}
1238
a7812ae4 1239static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1240{
0ecb72a5 1241 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1242}
1243
39d5492a 1244static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1245{
39d5492a 1246 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1247 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1248 return var;
e677137d
PB
1249}
1250
39d5492a 1251static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1252{
0ecb72a5 1253 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1254 tcg_temp_free_i32(var);
e677137d
PB
1255}
1256
1257static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1258{
1259 iwmmxt_store_reg(cpu_M0, rn);
1260}
1261
1262static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1263{
1264 iwmmxt_load_reg(cpu_M0, rn);
1265}
1266
1267static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1268{
1269 iwmmxt_load_reg(cpu_V1, rn);
1270 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1271}
1272
1273static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1274{
1275 iwmmxt_load_reg(cpu_V1, rn);
1276 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1277}
1278
1279static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1280{
1281 iwmmxt_load_reg(cpu_V1, rn);
1282 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1283}
1284
1285#define IWMMXT_OP(name) \
1286static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1287{ \
1288 iwmmxt_load_reg(cpu_V1, rn); \
1289 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1290}
1291
477955bd
PM
1292#define IWMMXT_OP_ENV(name) \
1293static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1294{ \
1295 iwmmxt_load_reg(cpu_V1, rn); \
1296 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1297}
1298
1299#define IWMMXT_OP_ENV_SIZE(name) \
1300IWMMXT_OP_ENV(name##b) \
1301IWMMXT_OP_ENV(name##w) \
1302IWMMXT_OP_ENV(name##l)
e677137d 1303
477955bd 1304#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1305static inline void gen_op_iwmmxt_##name##_M0(void) \
1306{ \
477955bd 1307 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1308}
1309
1310IWMMXT_OP(maddsq)
1311IWMMXT_OP(madduq)
1312IWMMXT_OP(sadb)
1313IWMMXT_OP(sadw)
1314IWMMXT_OP(mulslw)
1315IWMMXT_OP(mulshw)
1316IWMMXT_OP(mululw)
1317IWMMXT_OP(muluhw)
1318IWMMXT_OP(macsw)
1319IWMMXT_OP(macuw)
1320
477955bd
PM
1321IWMMXT_OP_ENV_SIZE(unpackl)
1322IWMMXT_OP_ENV_SIZE(unpackh)
1323
1324IWMMXT_OP_ENV1(unpacklub)
1325IWMMXT_OP_ENV1(unpackluw)
1326IWMMXT_OP_ENV1(unpacklul)
1327IWMMXT_OP_ENV1(unpackhub)
1328IWMMXT_OP_ENV1(unpackhuw)
1329IWMMXT_OP_ENV1(unpackhul)
1330IWMMXT_OP_ENV1(unpacklsb)
1331IWMMXT_OP_ENV1(unpacklsw)
1332IWMMXT_OP_ENV1(unpacklsl)
1333IWMMXT_OP_ENV1(unpackhsb)
1334IWMMXT_OP_ENV1(unpackhsw)
1335IWMMXT_OP_ENV1(unpackhsl)
1336
1337IWMMXT_OP_ENV_SIZE(cmpeq)
1338IWMMXT_OP_ENV_SIZE(cmpgtu)
1339IWMMXT_OP_ENV_SIZE(cmpgts)
1340
1341IWMMXT_OP_ENV_SIZE(mins)
1342IWMMXT_OP_ENV_SIZE(minu)
1343IWMMXT_OP_ENV_SIZE(maxs)
1344IWMMXT_OP_ENV_SIZE(maxu)
1345
1346IWMMXT_OP_ENV_SIZE(subn)
1347IWMMXT_OP_ENV_SIZE(addn)
1348IWMMXT_OP_ENV_SIZE(subu)
1349IWMMXT_OP_ENV_SIZE(addu)
1350IWMMXT_OP_ENV_SIZE(subs)
1351IWMMXT_OP_ENV_SIZE(adds)
1352
1353IWMMXT_OP_ENV(avgb0)
1354IWMMXT_OP_ENV(avgb1)
1355IWMMXT_OP_ENV(avgw0)
1356IWMMXT_OP_ENV(avgw1)
e677137d
PB
1357
1358IWMMXT_OP(msadb)
1359
477955bd
PM
1360IWMMXT_OP_ENV(packuw)
1361IWMMXT_OP_ENV(packul)
1362IWMMXT_OP_ENV(packuq)
1363IWMMXT_OP_ENV(packsw)
1364IWMMXT_OP_ENV(packsl)
1365IWMMXT_OP_ENV(packsq)
e677137d 1366
e677137d
PB
1367static void gen_op_iwmmxt_set_mup(void)
1368{
39d5492a 1369 TCGv_i32 tmp;
e677137d
PB
1370 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1371 tcg_gen_ori_i32(tmp, tmp, 2);
1372 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1373}
1374
1375static void gen_op_iwmmxt_set_cup(void)
1376{
39d5492a 1377 TCGv_i32 tmp;
e677137d
PB
1378 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1379 tcg_gen_ori_i32(tmp, tmp, 1);
1380 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1381}
1382
1383static void gen_op_iwmmxt_setpsr_nz(void)
1384{
39d5492a 1385 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1386 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1387 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1388}
1389
1390static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1391{
1392 iwmmxt_load_reg(cpu_V1, rn);
86831435 1393 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1394 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1395}
1396
39d5492a
PM
1397static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1398 TCGv_i32 dest)
18c9b560
AZ
1399{
1400 int rd;
1401 uint32_t offset;
39d5492a 1402 TCGv_i32 tmp;
18c9b560
AZ
1403
1404 rd = (insn >> 16) & 0xf;
da6b5335 1405 tmp = load_reg(s, rd);
18c9b560
AZ
1406
1407 offset = (insn & 0xff) << ((insn >> 7) & 2);
1408 if (insn & (1 << 24)) {
1409 /* Pre indexed */
1410 if (insn & (1 << 23))
da6b5335 1411 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1412 else
da6b5335
FN
1413 tcg_gen_addi_i32(tmp, tmp, -offset);
1414 tcg_gen_mov_i32(dest, tmp);
18c9b560 1415 if (insn & (1 << 21))
da6b5335
FN
1416 store_reg(s, rd, tmp);
1417 else
7d1b0095 1418 tcg_temp_free_i32(tmp);
18c9b560
AZ
1419 } else if (insn & (1 << 21)) {
1420 /* Post indexed */
da6b5335 1421 tcg_gen_mov_i32(dest, tmp);
18c9b560 1422 if (insn & (1 << 23))
da6b5335 1423 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1424 else
da6b5335
FN
1425 tcg_gen_addi_i32(tmp, tmp, -offset);
1426 store_reg(s, rd, tmp);
18c9b560
AZ
1427 } else if (!(insn & (1 << 23)))
1428 return 1;
1429 return 0;
1430}
1431
39d5492a 1432static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1433{
1434 int rd = (insn >> 0) & 0xf;
39d5492a 1435 TCGv_i32 tmp;
18c9b560 1436
da6b5335
FN
1437 if (insn & (1 << 8)) {
1438 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1439 return 1;
da6b5335
FN
1440 } else {
1441 tmp = iwmmxt_load_creg(rd);
1442 }
1443 } else {
7d1b0095 1444 tmp = tcg_temp_new_i32();
da6b5335
FN
1445 iwmmxt_load_reg(cpu_V0, rd);
1446 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1447 }
1448 tcg_gen_andi_i32(tmp, tmp, mask);
1449 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1450 tcg_temp_free_i32(tmp);
18c9b560
AZ
1451 return 0;
1452}
1453
a1c7273b 1454/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1455 (ie. an undefined instruction). */
0ecb72a5 1456static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1457{
1458 int rd, wrd;
1459 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1460 TCGv_i32 addr;
1461 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1462
1463 if ((insn & 0x0e000e00) == 0x0c000000) {
1464 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1465 wrd = insn & 0xf;
1466 rdlo = (insn >> 12) & 0xf;
1467 rdhi = (insn >> 16) & 0xf;
1468 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1469 iwmmxt_load_reg(cpu_V0, wrd);
1470 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1471 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1472 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1473 } else { /* TMCRR */
da6b5335
FN
1474 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1475 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1476 gen_op_iwmmxt_set_mup();
1477 }
1478 return 0;
1479 }
1480
1481 wrd = (insn >> 12) & 0xf;
7d1b0095 1482 addr = tcg_temp_new_i32();
da6b5335 1483 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1484 tcg_temp_free_i32(addr);
18c9b560 1485 return 1;
da6b5335 1486 }
18c9b560
AZ
1487 if (insn & ARM_CP_RW_BIT) {
1488 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1489 tmp = tcg_temp_new_i32();
08307563 1490 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1491 iwmmxt_store_creg(wrd, tmp);
18c9b560 1492 } else {
e677137d
PB
1493 i = 1;
1494 if (insn & (1 << 8)) {
1495 if (insn & (1 << 22)) { /* WLDRD */
08307563 1496 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1497 i = 0;
1498 } else { /* WLDRW wRd */
29531141 1499 tmp = tcg_temp_new_i32();
08307563 1500 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1501 }
1502 } else {
29531141 1503 tmp = tcg_temp_new_i32();
e677137d 1504 if (insn & (1 << 22)) { /* WLDRH */
08307563 1505 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1506 } else { /* WLDRB */
08307563 1507 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1508 }
1509 }
1510 if (i) {
1511 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1512 tcg_temp_free_i32(tmp);
e677137d 1513 }
18c9b560
AZ
1514 gen_op_iwmmxt_movq_wRn_M0(wrd);
1515 }
1516 } else {
1517 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1518 tmp = iwmmxt_load_creg(wrd);
08307563 1519 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1520 } else {
1521 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1522 tmp = tcg_temp_new_i32();
e677137d
PB
1523 if (insn & (1 << 8)) {
1524 if (insn & (1 << 22)) { /* WSTRD */
08307563 1525 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1526 } else { /* WSTRW wRd */
1527 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1528 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1529 }
1530 } else {
1531 if (insn & (1 << 22)) { /* WSTRH */
1532 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1533 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1534 } else { /* WSTRB */
1535 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1536 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1537 }
1538 }
18c9b560 1539 }
29531141 1540 tcg_temp_free_i32(tmp);
18c9b560 1541 }
7d1b0095 1542 tcg_temp_free_i32(addr);
18c9b560
AZ
1543 return 0;
1544 }
1545
1546 if ((insn & 0x0f000000) != 0x0e000000)
1547 return 1;
1548
1549 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1550 case 0x000: /* WOR */
1551 wrd = (insn >> 12) & 0xf;
1552 rd0 = (insn >> 0) & 0xf;
1553 rd1 = (insn >> 16) & 0xf;
1554 gen_op_iwmmxt_movq_M0_wRn(rd0);
1555 gen_op_iwmmxt_orq_M0_wRn(rd1);
1556 gen_op_iwmmxt_setpsr_nz();
1557 gen_op_iwmmxt_movq_wRn_M0(wrd);
1558 gen_op_iwmmxt_set_mup();
1559 gen_op_iwmmxt_set_cup();
1560 break;
1561 case 0x011: /* TMCR */
1562 if (insn & 0xf)
1563 return 1;
1564 rd = (insn >> 12) & 0xf;
1565 wrd = (insn >> 16) & 0xf;
1566 switch (wrd) {
1567 case ARM_IWMMXT_wCID:
1568 case ARM_IWMMXT_wCASF:
1569 break;
1570 case ARM_IWMMXT_wCon:
1571 gen_op_iwmmxt_set_cup();
1572 /* Fall through. */
1573 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1574 tmp = iwmmxt_load_creg(wrd);
1575 tmp2 = load_reg(s, rd);
f669df27 1576 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1577 tcg_temp_free_i32(tmp2);
da6b5335 1578 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1579 break;
1580 case ARM_IWMMXT_wCGR0:
1581 case ARM_IWMMXT_wCGR1:
1582 case ARM_IWMMXT_wCGR2:
1583 case ARM_IWMMXT_wCGR3:
1584 gen_op_iwmmxt_set_cup();
da6b5335
FN
1585 tmp = load_reg(s, rd);
1586 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1587 break;
1588 default:
1589 return 1;
1590 }
1591 break;
1592 case 0x100: /* WXOR */
1593 wrd = (insn >> 12) & 0xf;
1594 rd0 = (insn >> 0) & 0xf;
1595 rd1 = (insn >> 16) & 0xf;
1596 gen_op_iwmmxt_movq_M0_wRn(rd0);
1597 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1598 gen_op_iwmmxt_setpsr_nz();
1599 gen_op_iwmmxt_movq_wRn_M0(wrd);
1600 gen_op_iwmmxt_set_mup();
1601 gen_op_iwmmxt_set_cup();
1602 break;
1603 case 0x111: /* TMRC */
1604 if (insn & 0xf)
1605 return 1;
1606 rd = (insn >> 12) & 0xf;
1607 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1608 tmp = iwmmxt_load_creg(wrd);
1609 store_reg(s, rd, tmp);
18c9b560
AZ
1610 break;
1611 case 0x300: /* WANDN */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1616 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1617 gen_op_iwmmxt_andq_M0_wRn(rd1);
1618 gen_op_iwmmxt_setpsr_nz();
1619 gen_op_iwmmxt_movq_wRn_M0(wrd);
1620 gen_op_iwmmxt_set_mup();
1621 gen_op_iwmmxt_set_cup();
1622 break;
1623 case 0x200: /* WAND */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 0) & 0xf;
1626 rd1 = (insn >> 16) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 gen_op_iwmmxt_andq_M0_wRn(rd1);
1629 gen_op_iwmmxt_setpsr_nz();
1630 gen_op_iwmmxt_movq_wRn_M0(wrd);
1631 gen_op_iwmmxt_set_mup();
1632 gen_op_iwmmxt_set_cup();
1633 break;
1634 case 0x810: case 0xa10: /* WMADD */
1635 wrd = (insn >> 12) & 0xf;
1636 rd0 = (insn >> 0) & 0xf;
1637 rd1 = (insn >> 16) & 0xf;
1638 gen_op_iwmmxt_movq_M0_wRn(rd0);
1639 if (insn & (1 << 21))
1640 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1641 else
1642 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1643 gen_op_iwmmxt_movq_wRn_M0(wrd);
1644 gen_op_iwmmxt_set_mup();
1645 break;
1646 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1647 wrd = (insn >> 12) & 0xf;
1648 rd0 = (insn >> 16) & 0xf;
1649 rd1 = (insn >> 0) & 0xf;
1650 gen_op_iwmmxt_movq_M0_wRn(rd0);
1651 switch ((insn >> 22) & 3) {
1652 case 0:
1653 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1654 break;
1655 case 1:
1656 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1657 break;
1658 case 2:
1659 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1660 break;
1661 case 3:
1662 return 1;
1663 }
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 gen_op_iwmmxt_set_cup();
1667 break;
1668 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 if (insn & (1 << 22))
1696 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1697 else
1698 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1699 if (!(insn & (1 << 20)))
1700 gen_op_iwmmxt_addl_M0_wRn(wrd);
1701 gen_op_iwmmxt_movq_wRn_M0(wrd);
1702 gen_op_iwmmxt_set_mup();
1703 break;
1704 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1705 wrd = (insn >> 12) & 0xf;
1706 rd0 = (insn >> 16) & 0xf;
1707 rd1 = (insn >> 0) & 0xf;
1708 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1709 if (insn & (1 << 21)) {
1710 if (insn & (1 << 20))
1711 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1712 else
1713 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1714 } else {
1715 if (insn & (1 << 20))
1716 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1717 else
1718 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1719 }
18c9b560
AZ
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
1728 if (insn & (1 << 21))
1729 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1730 else
1731 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1732 if (!(insn & (1 << 20))) {
e677137d
PB
1733 iwmmxt_load_reg(cpu_V1, wrd);
1734 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1735 }
1736 gen_op_iwmmxt_movq_wRn_M0(wrd);
1737 gen_op_iwmmxt_set_mup();
1738 break;
1739 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1740 wrd = (insn >> 12) & 0xf;
1741 rd0 = (insn >> 16) & 0xf;
1742 rd1 = (insn >> 0) & 0xf;
1743 gen_op_iwmmxt_movq_M0_wRn(rd0);
1744 switch ((insn >> 22) & 3) {
1745 case 0:
1746 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1747 break;
1748 case 1:
1749 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1750 break;
1751 case 2:
1752 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1753 break;
1754 case 3:
1755 return 1;
1756 }
1757 gen_op_iwmmxt_movq_wRn_M0(wrd);
1758 gen_op_iwmmxt_set_mup();
1759 gen_op_iwmmxt_set_cup();
1760 break;
1761 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1766 if (insn & (1 << 22)) {
1767 if (insn & (1 << 20))
1768 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1769 else
1770 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1771 } else {
1772 if (insn & (1 << 20))
1773 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1774 else
1775 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1776 }
18c9b560
AZ
1777 gen_op_iwmmxt_movq_wRn_M0(wrd);
1778 gen_op_iwmmxt_set_mup();
1779 gen_op_iwmmxt_set_cup();
1780 break;
1781 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1782 wrd = (insn >> 12) & 0xf;
1783 rd0 = (insn >> 16) & 0xf;
1784 rd1 = (insn >> 0) & 0xf;
1785 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1786 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1787 tcg_gen_andi_i32(tmp, tmp, 7);
1788 iwmmxt_load_reg(cpu_V1, rd1);
1789 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1790 tcg_temp_free_i32(tmp);
18c9b560
AZ
1791 gen_op_iwmmxt_movq_wRn_M0(wrd);
1792 gen_op_iwmmxt_set_mup();
1793 break;
1794 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1795 if (((insn >> 6) & 3) == 3)
1796 return 1;
18c9b560
AZ
1797 rd = (insn >> 12) & 0xf;
1798 wrd = (insn >> 16) & 0xf;
da6b5335 1799 tmp = load_reg(s, rd);
18c9b560
AZ
1800 gen_op_iwmmxt_movq_M0_wRn(wrd);
1801 switch ((insn >> 6) & 3) {
1802 case 0:
da6b5335
FN
1803 tmp2 = tcg_const_i32(0xff);
1804 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1805 break;
1806 case 1:
da6b5335
FN
1807 tmp2 = tcg_const_i32(0xffff);
1808 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1809 break;
1810 case 2:
da6b5335
FN
1811 tmp2 = tcg_const_i32(0xffffffff);
1812 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1813 break;
da6b5335 1814 default:
39d5492a
PM
1815 TCGV_UNUSED_I32(tmp2);
1816 TCGV_UNUSED_I32(tmp3);
18c9b560 1817 }
da6b5335 1818 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1819 tcg_temp_free_i32(tmp3);
1820 tcg_temp_free_i32(tmp2);
7d1b0095 1821 tcg_temp_free_i32(tmp);
18c9b560
AZ
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 gen_op_iwmmxt_set_mup();
1824 break;
1825 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1826 rd = (insn >> 12) & 0xf;
1827 wrd = (insn >> 16) & 0xf;
da6b5335 1828 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1829 return 1;
1830 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1831 tmp = tcg_temp_new_i32();
18c9b560
AZ
1832 switch ((insn >> 22) & 3) {
1833 case 0:
da6b5335
FN
1834 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1835 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1836 if (insn & 8) {
1837 tcg_gen_ext8s_i32(tmp, tmp);
1838 } else {
1839 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1840 }
1841 break;
1842 case 1:
da6b5335
FN
1843 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1844 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1845 if (insn & 8) {
1846 tcg_gen_ext16s_i32(tmp, tmp);
1847 } else {
1848 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1849 }
1850 break;
1851 case 2:
da6b5335
FN
1852 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1853 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1854 break;
18c9b560 1855 }
da6b5335 1856 store_reg(s, rd, tmp);
18c9b560
AZ
1857 break;
1858 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1859 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1860 return 1;
da6b5335 1861 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1862 switch ((insn >> 22) & 3) {
1863 case 0:
da6b5335 1864 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1865 break;
1866 case 1:
da6b5335 1867 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1868 break;
1869 case 2:
da6b5335 1870 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1871 break;
18c9b560 1872 }
da6b5335
FN
1873 tcg_gen_shli_i32(tmp, tmp, 28);
1874 gen_set_nzcv(tmp);
7d1b0095 1875 tcg_temp_free_i32(tmp);
18c9b560
AZ
1876 break;
1877 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1878 if (((insn >> 6) & 3) == 3)
1879 return 1;
18c9b560
AZ
1880 rd = (insn >> 12) & 0xf;
1881 wrd = (insn >> 16) & 0xf;
da6b5335 1882 tmp = load_reg(s, rd);
18c9b560
AZ
1883 switch ((insn >> 6) & 3) {
1884 case 0:
da6b5335 1885 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1886 break;
1887 case 1:
da6b5335 1888 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1889 break;
1890 case 2:
da6b5335 1891 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1892 break;
18c9b560 1893 }
7d1b0095 1894 tcg_temp_free_i32(tmp);
18c9b560
AZ
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 break;
1898 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1899 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1900 return 1;
da6b5335 1901 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1902 tmp2 = tcg_temp_new_i32();
da6b5335 1903 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1904 switch ((insn >> 22) & 3) {
1905 case 0:
1906 for (i = 0; i < 7; i ++) {
da6b5335
FN
1907 tcg_gen_shli_i32(tmp2, tmp2, 4);
1908 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1909 }
1910 break;
1911 case 1:
1912 for (i = 0; i < 3; i ++) {
da6b5335
FN
1913 tcg_gen_shli_i32(tmp2, tmp2, 8);
1914 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1915 }
1916 break;
1917 case 2:
da6b5335
FN
1918 tcg_gen_shli_i32(tmp2, tmp2, 16);
1919 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1920 break;
18c9b560 1921 }
da6b5335 1922 gen_set_nzcv(tmp);
7d1b0095
PM
1923 tcg_temp_free_i32(tmp2);
1924 tcg_temp_free_i32(tmp);
18c9b560
AZ
1925 break;
1926 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1927 wrd = (insn >> 12) & 0xf;
1928 rd0 = (insn >> 16) & 0xf;
1929 gen_op_iwmmxt_movq_M0_wRn(rd0);
1930 switch ((insn >> 22) & 3) {
1931 case 0:
e677137d 1932 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1933 break;
1934 case 1:
e677137d 1935 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1936 break;
1937 case 2:
e677137d 1938 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1939 break;
1940 case 3:
1941 return 1;
1942 }
1943 gen_op_iwmmxt_movq_wRn_M0(wrd);
1944 gen_op_iwmmxt_set_mup();
1945 break;
1946 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1947 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1948 return 1;
da6b5335 1949 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1950 tmp2 = tcg_temp_new_i32();
da6b5335 1951 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1952 switch ((insn >> 22) & 3) {
1953 case 0:
1954 for (i = 0; i < 7; i ++) {
da6b5335
FN
1955 tcg_gen_shli_i32(tmp2, tmp2, 4);
1956 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1957 }
1958 break;
1959 case 1:
1960 for (i = 0; i < 3; i ++) {
da6b5335
FN
1961 tcg_gen_shli_i32(tmp2, tmp2, 8);
1962 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1963 }
1964 break;
1965 case 2:
da6b5335
FN
1966 tcg_gen_shli_i32(tmp2, tmp2, 16);
1967 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1968 break;
18c9b560 1969 }
da6b5335 1970 gen_set_nzcv(tmp);
7d1b0095
PM
1971 tcg_temp_free_i32(tmp2);
1972 tcg_temp_free_i32(tmp);
18c9b560
AZ
1973 break;
1974 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1975 rd = (insn >> 12) & 0xf;
1976 rd0 = (insn >> 16) & 0xf;
da6b5335 1977 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1978 return 1;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1980 tmp = tcg_temp_new_i32();
18c9b560
AZ
1981 switch ((insn >> 22) & 3) {
1982 case 0:
da6b5335 1983 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1984 break;
1985 case 1:
da6b5335 1986 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1987 break;
1988 case 2:
da6b5335 1989 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1990 break;
18c9b560 1991 }
da6b5335 1992 store_reg(s, rd, tmp);
18c9b560
AZ
1993 break;
1994 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1995 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1996 wrd = (insn >> 12) & 0xf;
1997 rd0 = (insn >> 16) & 0xf;
1998 rd1 = (insn >> 0) & 0xf;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
2000 switch ((insn >> 22) & 3) {
2001 case 0:
2002 if (insn & (1 << 21))
2003 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2004 else
2005 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2006 break;
2007 case 1:
2008 if (insn & (1 << 21))
2009 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2010 else
2011 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2012 break;
2013 case 2:
2014 if (insn & (1 << 21))
2015 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2016 else
2017 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2018 break;
2019 case 3:
2020 return 1;
2021 }
2022 gen_op_iwmmxt_movq_wRn_M0(wrd);
2023 gen_op_iwmmxt_set_mup();
2024 gen_op_iwmmxt_set_cup();
2025 break;
2026 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2027 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2028 wrd = (insn >> 12) & 0xf;
2029 rd0 = (insn >> 16) & 0xf;
2030 gen_op_iwmmxt_movq_M0_wRn(rd0);
2031 switch ((insn >> 22) & 3) {
2032 case 0:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_unpacklsb_M0();
2035 else
2036 gen_op_iwmmxt_unpacklub_M0();
2037 break;
2038 case 1:
2039 if (insn & (1 << 21))
2040 gen_op_iwmmxt_unpacklsw_M0();
2041 else
2042 gen_op_iwmmxt_unpackluw_M0();
2043 break;
2044 case 2:
2045 if (insn & (1 << 21))
2046 gen_op_iwmmxt_unpacklsl_M0();
2047 else
2048 gen_op_iwmmxt_unpacklul_M0();
2049 break;
2050 case 3:
2051 return 1;
2052 }
2053 gen_op_iwmmxt_movq_wRn_M0(wrd);
2054 gen_op_iwmmxt_set_mup();
2055 gen_op_iwmmxt_set_cup();
2056 break;
2057 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2058 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 switch ((insn >> 22) & 3) {
2063 case 0:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpackhsb_M0();
2066 else
2067 gen_op_iwmmxt_unpackhub_M0();
2068 break;
2069 case 1:
2070 if (insn & (1 << 21))
2071 gen_op_iwmmxt_unpackhsw_M0();
2072 else
2073 gen_op_iwmmxt_unpackhuw_M0();
2074 break;
2075 case 2:
2076 if (insn & (1 << 21))
2077 gen_op_iwmmxt_unpackhsl_M0();
2078 else
2079 gen_op_iwmmxt_unpackhul_M0();
2080 break;
2081 case 3:
2082 return 1;
2083 }
2084 gen_op_iwmmxt_movq_wRn_M0(wrd);
2085 gen_op_iwmmxt_set_mup();
2086 gen_op_iwmmxt_set_cup();
2087 break;
2088 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2089 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2090 if (((insn >> 22) & 3) == 0)
2091 return 1;
18c9b560
AZ
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2095 tmp = tcg_temp_new_i32();
da6b5335 2096 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2097 tcg_temp_free_i32(tmp);
18c9b560 2098 return 1;
da6b5335 2099 }
18c9b560 2100 switch ((insn >> 22) & 3) {
18c9b560 2101 case 1:
477955bd 2102 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2103 break;
2104 case 2:
477955bd 2105 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 case 3:
477955bd 2108 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2109 break;
2110 }
7d1b0095 2111 tcg_temp_free_i32(tmp);
18c9b560
AZ
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2115 break;
2116 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2117 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2118 if (((insn >> 22) & 3) == 0)
2119 return 1;
18c9b560
AZ
2120 wrd = (insn >> 12) & 0xf;
2121 rd0 = (insn >> 16) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2123 tmp = tcg_temp_new_i32();
da6b5335 2124 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2125 tcg_temp_free_i32(tmp);
18c9b560 2126 return 1;
da6b5335 2127 }
18c9b560 2128 switch ((insn >> 22) & 3) {
18c9b560 2129 case 1:
477955bd 2130 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2131 break;
2132 case 2:
477955bd 2133 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2134 break;
2135 case 3:
477955bd 2136 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2137 break;
2138 }
7d1b0095 2139 tcg_temp_free_i32(tmp);
18c9b560
AZ
2140 gen_op_iwmmxt_movq_wRn_M0(wrd);
2141 gen_op_iwmmxt_set_mup();
2142 gen_op_iwmmxt_set_cup();
2143 break;
2144 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2145 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2146 if (((insn >> 22) & 3) == 0)
2147 return 1;
18c9b560
AZ
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2151 tmp = tcg_temp_new_i32();
da6b5335 2152 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2153 tcg_temp_free_i32(tmp);
18c9b560 2154 return 1;
da6b5335 2155 }
18c9b560 2156 switch ((insn >> 22) & 3) {
18c9b560 2157 case 1:
477955bd 2158 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2159 break;
2160 case 2:
477955bd 2161 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2162 break;
2163 case 3:
477955bd 2164 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2165 break;
2166 }
7d1b0095 2167 tcg_temp_free_i32(tmp);
18c9b560
AZ
2168 gen_op_iwmmxt_movq_wRn_M0(wrd);
2169 gen_op_iwmmxt_set_mup();
2170 gen_op_iwmmxt_set_cup();
2171 break;
2172 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2173 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2174 if (((insn >> 22) & 3) == 0)
2175 return 1;
18c9b560
AZ
2176 wrd = (insn >> 12) & 0xf;
2177 rd0 = (insn >> 16) & 0xf;
2178 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2179 tmp = tcg_temp_new_i32();
18c9b560 2180 switch ((insn >> 22) & 3) {
18c9b560 2181 case 1:
da6b5335 2182 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2183 tcg_temp_free_i32(tmp);
18c9b560 2184 return 1;
da6b5335 2185 }
477955bd 2186 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2187 break;
2188 case 2:
da6b5335 2189 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2190 tcg_temp_free_i32(tmp);
18c9b560 2191 return 1;
da6b5335 2192 }
477955bd 2193 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2194 break;
2195 case 3:
da6b5335 2196 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2197 tcg_temp_free_i32(tmp);
18c9b560 2198 return 1;
da6b5335 2199 }
477955bd 2200 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2201 break;
2202 }
7d1b0095 2203 tcg_temp_free_i32(tmp);
18c9b560
AZ
2204 gen_op_iwmmxt_movq_wRn_M0(wrd);
2205 gen_op_iwmmxt_set_mup();
2206 gen_op_iwmmxt_set_cup();
2207 break;
2208 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2209 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2210 wrd = (insn >> 12) & 0xf;
2211 rd0 = (insn >> 16) & 0xf;
2212 rd1 = (insn >> 0) & 0xf;
2213 gen_op_iwmmxt_movq_M0_wRn(rd0);
2214 switch ((insn >> 22) & 3) {
2215 case 0:
2216 if (insn & (1 << 21))
2217 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2218 else
2219 gen_op_iwmmxt_minub_M0_wRn(rd1);
2220 break;
2221 case 1:
2222 if (insn & (1 << 21))
2223 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2224 else
2225 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2226 break;
2227 case 2:
2228 if (insn & (1 << 21))
2229 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2230 else
2231 gen_op_iwmmxt_minul_M0_wRn(rd1);
2232 break;
2233 case 3:
2234 return 1;
2235 }
2236 gen_op_iwmmxt_movq_wRn_M0(wrd);
2237 gen_op_iwmmxt_set_mup();
2238 break;
2239 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2240 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2241 wrd = (insn >> 12) & 0xf;
2242 rd0 = (insn >> 16) & 0xf;
2243 rd1 = (insn >> 0) & 0xf;
2244 gen_op_iwmmxt_movq_M0_wRn(rd0);
2245 switch ((insn >> 22) & 3) {
2246 case 0:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2251 break;
2252 case 1:
2253 if (insn & (1 << 21))
2254 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2255 else
2256 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2257 break;
2258 case 2:
2259 if (insn & (1 << 21))
2260 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2261 else
2262 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2263 break;
2264 case 3:
2265 return 1;
2266 }
2267 gen_op_iwmmxt_movq_wRn_M0(wrd);
2268 gen_op_iwmmxt_set_mup();
2269 break;
2270 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2271 case 0x402: case 0x502: case 0x602: case 0x702:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2276 tmp = tcg_const_i32((insn >> 20) & 3);
2277 iwmmxt_load_reg(cpu_V1, rd1);
2278 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2279 tcg_temp_free_i32(tmp);
18c9b560
AZ
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 break;
2283 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2284 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2285 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2286 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2287 wrd = (insn >> 12) & 0xf;
2288 rd0 = (insn >> 16) & 0xf;
2289 rd1 = (insn >> 0) & 0xf;
2290 gen_op_iwmmxt_movq_M0_wRn(rd0);
2291 switch ((insn >> 20) & 0xf) {
2292 case 0x0:
2293 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2294 break;
2295 case 0x1:
2296 gen_op_iwmmxt_subub_M0_wRn(rd1);
2297 break;
2298 case 0x3:
2299 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2300 break;
2301 case 0x4:
2302 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2303 break;
2304 case 0x5:
2305 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2306 break;
2307 case 0x7:
2308 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2309 break;
2310 case 0x8:
2311 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2312 break;
2313 case 0x9:
2314 gen_op_iwmmxt_subul_M0_wRn(rd1);
2315 break;
2316 case 0xb:
2317 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2318 break;
2319 default:
2320 return 1;
2321 }
2322 gen_op_iwmmxt_movq_wRn_M0(wrd);
2323 gen_op_iwmmxt_set_mup();
2324 gen_op_iwmmxt_set_cup();
2325 break;
2326 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2327 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2328 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2329 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2330 wrd = (insn >> 12) & 0xf;
2331 rd0 = (insn >> 16) & 0xf;
2332 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2333 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2334 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2335 tcg_temp_free_i32(tmp);
18c9b560
AZ
2336 gen_op_iwmmxt_movq_wRn_M0(wrd);
2337 gen_op_iwmmxt_set_mup();
2338 gen_op_iwmmxt_set_cup();
2339 break;
2340 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2341 case 0x418: case 0x518: case 0x618: case 0x718:
2342 case 0x818: case 0x918: case 0xa18: case 0xb18:
2343 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2344 wrd = (insn >> 12) & 0xf;
2345 rd0 = (insn >> 16) & 0xf;
2346 rd1 = (insn >> 0) & 0xf;
2347 gen_op_iwmmxt_movq_M0_wRn(rd0);
2348 switch ((insn >> 20) & 0xf) {
2349 case 0x0:
2350 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2351 break;
2352 case 0x1:
2353 gen_op_iwmmxt_addub_M0_wRn(rd1);
2354 break;
2355 case 0x3:
2356 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2357 break;
2358 case 0x4:
2359 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2360 break;
2361 case 0x5:
2362 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2363 break;
2364 case 0x7:
2365 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2366 break;
2367 case 0x8:
2368 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2369 break;
2370 case 0x9:
2371 gen_op_iwmmxt_addul_M0_wRn(rd1);
2372 break;
2373 case 0xb:
2374 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2375 break;
2376 default:
2377 return 1;
2378 }
2379 gen_op_iwmmxt_movq_wRn_M0(wrd);
2380 gen_op_iwmmxt_set_mup();
2381 gen_op_iwmmxt_set_cup();
2382 break;
2383 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2384 case 0x408: case 0x508: case 0x608: case 0x708:
2385 case 0x808: case 0x908: case 0xa08: case 0xb08:
2386 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2387 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2388 return 1;
18c9b560
AZ
2389 wrd = (insn >> 12) & 0xf;
2390 rd0 = (insn >> 16) & 0xf;
2391 rd1 = (insn >> 0) & 0xf;
2392 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2393 switch ((insn >> 22) & 3) {
18c9b560
AZ
2394 case 1:
2395 if (insn & (1 << 21))
2396 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2397 else
2398 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2399 break;
2400 case 2:
2401 if (insn & (1 << 21))
2402 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2403 else
2404 gen_op_iwmmxt_packul_M0_wRn(rd1);
2405 break;
2406 case 3:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2409 else
2410 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2411 break;
2412 }
2413 gen_op_iwmmxt_movq_wRn_M0(wrd);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2416 break;
2417 case 0x201: case 0x203: case 0x205: case 0x207:
2418 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2419 case 0x211: case 0x213: case 0x215: case 0x217:
2420 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2421 wrd = (insn >> 5) & 0xf;
2422 rd0 = (insn >> 12) & 0xf;
2423 rd1 = (insn >> 0) & 0xf;
2424 if (rd0 == 0xf || rd1 == 0xf)
2425 return 1;
2426 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2427 tmp = load_reg(s, rd0);
2428 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2429 switch ((insn >> 16) & 0xf) {
2430 case 0x0: /* TMIA */
da6b5335 2431 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2432 break;
2433 case 0x8: /* TMIAPH */
da6b5335 2434 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2435 break;
2436 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2437 if (insn & (1 << 16))
da6b5335 2438 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2439 if (insn & (1 << 17))
da6b5335
FN
2440 tcg_gen_shri_i32(tmp2, tmp2, 16);
2441 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2442 break;
2443 default:
7d1b0095
PM
2444 tcg_temp_free_i32(tmp2);
2445 tcg_temp_free_i32(tmp);
18c9b560
AZ
2446 return 1;
2447 }
7d1b0095
PM
2448 tcg_temp_free_i32(tmp2);
2449 tcg_temp_free_i32(tmp);
18c9b560
AZ
2450 gen_op_iwmmxt_movq_wRn_M0(wrd);
2451 gen_op_iwmmxt_set_mup();
2452 break;
2453 default:
2454 return 1;
2455 }
2456
2457 return 0;
2458}
2459
a1c7273b 2460/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2461 (ie. an undefined instruction). */
0ecb72a5 2462static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2463{
2464 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2465 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2466
2467 if ((insn & 0x0ff00f10) == 0x0e200010) {
2468 /* Multiply with Internal Accumulate Format */
2469 rd0 = (insn >> 12) & 0xf;
2470 rd1 = insn & 0xf;
2471 acc = (insn >> 5) & 7;
2472
2473 if (acc != 0)
2474 return 1;
2475
3a554c0f
FN
2476 tmp = load_reg(s, rd0);
2477 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2478 switch ((insn >> 16) & 0xf) {
2479 case 0x0: /* MIA */
3a554c0f 2480 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2481 break;
2482 case 0x8: /* MIAPH */
3a554c0f 2483 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2484 break;
2485 case 0xc: /* MIABB */
2486 case 0xd: /* MIABT */
2487 case 0xe: /* MIATB */
2488 case 0xf: /* MIATT */
18c9b560 2489 if (insn & (1 << 16))
3a554c0f 2490 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2491 if (insn & (1 << 17))
3a554c0f
FN
2492 tcg_gen_shri_i32(tmp2, tmp2, 16);
2493 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2494 break;
2495 default:
2496 return 1;
2497 }
7d1b0095
PM
2498 tcg_temp_free_i32(tmp2);
2499 tcg_temp_free_i32(tmp);
18c9b560
AZ
2500
2501 gen_op_iwmmxt_movq_wRn_M0(acc);
2502 return 0;
2503 }
2504
2505 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2506 /* Internal Accumulator Access Format */
2507 rdhi = (insn >> 16) & 0xf;
2508 rdlo = (insn >> 12) & 0xf;
2509 acc = insn & 7;
2510
2511 if (acc != 0)
2512 return 1;
2513
2514 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2515 iwmmxt_load_reg(cpu_V0, acc);
2516 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2517 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2518 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2519 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2520 } else { /* MAR */
3a554c0f
FN
2521 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2522 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2523 }
2524 return 0;
2525 }
2526
2527 return 1;
2528}
2529
9ee6e8bb
PB
2530#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2531#define VFP_SREG(insn, bigbit, smallbit) \
2532 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2533#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2534 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2535 reg = (((insn) >> (bigbit)) & 0x0f) \
2536 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2537 } else { \
2538 if (insn & (1 << (smallbit))) \
2539 return 1; \
2540 reg = ((insn) >> (bigbit)) & 0x0f; \
2541 }} while (0)
2542
2543#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2544#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2545#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2546#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2547#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2548#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2549
4373f3ce 2550/* Move between integer and VFP cores. */
39d5492a 2551static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2552{
39d5492a 2553 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2554 tcg_gen_mov_i32(tmp, cpu_F0s);
2555 return tmp;
2556}
2557
39d5492a 2558static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2559{
2560 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2561 tcg_temp_free_i32(tmp);
4373f3ce
PB
2562}
2563
39d5492a 2564static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2565{
39d5492a 2566 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2567 if (shift)
2568 tcg_gen_shri_i32(var, var, shift);
86831435 2569 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2570 tcg_gen_shli_i32(tmp, var, 8);
2571 tcg_gen_or_i32(var, var, tmp);
2572 tcg_gen_shli_i32(tmp, var, 16);
2573 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2574 tcg_temp_free_i32(tmp);
ad69471c
PB
2575}
2576
39d5492a 2577static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2578{
39d5492a 2579 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2580 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2581 tcg_gen_shli_i32(tmp, var, 16);
2582 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2583 tcg_temp_free_i32(tmp);
ad69471c
PB
2584}
2585
39d5492a 2586static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2587{
39d5492a 2588 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2589 tcg_gen_andi_i32(var, var, 0xffff0000);
2590 tcg_gen_shri_i32(tmp, var, 16);
2591 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2592 tcg_temp_free_i32(tmp);
ad69471c
PB
2593}
2594
39d5492a 2595static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2596{
2597 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2598 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2599 switch (size) {
2600 case 0:
08307563 2601 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2602 gen_neon_dup_u8(tmp, 0);
2603 break;
2604 case 1:
08307563 2605 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2606 gen_neon_dup_low16(tmp);
2607 break;
2608 case 2:
08307563 2609 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2610 break;
2611 default: /* Avoid compiler warnings. */
2612 abort();
2613 }
2614 return tmp;
2615}
2616
a1c7273b 2617/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2618 (ie. an undefined instruction). */
0ecb72a5 2619static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2620{
2621 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2622 int dp, veclen;
39d5492a
PM
2623 TCGv_i32 addr;
2624 TCGv_i32 tmp;
2625 TCGv_i32 tmp2;
b7bcbe95 2626
40f137e1
PB
2627 if (!arm_feature(env, ARM_FEATURE_VFP))
2628 return 1;
2629
5df8bac1 2630 if (!s->vfp_enabled) {
9ee6e8bb 2631 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2632 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2633 return 1;
2634 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2635 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2636 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2637 return 1;
2638 }
b7bcbe95
FB
2639 dp = ((insn & 0xf00) == 0xb00);
2640 switch ((insn >> 24) & 0xf) {
2641 case 0xe:
2642 if (insn & (1 << 4)) {
2643 /* single register transfer */
b7bcbe95
FB
2644 rd = (insn >> 12) & 0xf;
2645 if (dp) {
9ee6e8bb
PB
2646 int size;
2647 int pass;
2648
2649 VFP_DREG_N(rn, insn);
2650 if (insn & 0xf)
b7bcbe95 2651 return 1;
9ee6e8bb
PB
2652 if (insn & 0x00c00060
2653 && !arm_feature(env, ARM_FEATURE_NEON))
2654 return 1;
2655
2656 pass = (insn >> 21) & 1;
2657 if (insn & (1 << 22)) {
2658 size = 0;
2659 offset = ((insn >> 5) & 3) * 8;
2660 } else if (insn & (1 << 5)) {
2661 size = 1;
2662 offset = (insn & (1 << 6)) ? 16 : 0;
2663 } else {
2664 size = 2;
2665 offset = 0;
2666 }
18c9b560 2667 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2668 /* vfp->arm */
ad69471c 2669 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2670 switch (size) {
2671 case 0:
9ee6e8bb 2672 if (offset)
ad69471c 2673 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2674 if (insn & (1 << 23))
ad69471c 2675 gen_uxtb(tmp);
9ee6e8bb 2676 else
ad69471c 2677 gen_sxtb(tmp);
9ee6e8bb
PB
2678 break;
2679 case 1:
9ee6e8bb
PB
2680 if (insn & (1 << 23)) {
2681 if (offset) {
ad69471c 2682 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2683 } else {
ad69471c 2684 gen_uxth(tmp);
9ee6e8bb
PB
2685 }
2686 } else {
2687 if (offset) {
ad69471c 2688 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2689 } else {
ad69471c 2690 gen_sxth(tmp);
9ee6e8bb
PB
2691 }
2692 }
2693 break;
2694 case 2:
9ee6e8bb
PB
2695 break;
2696 }
ad69471c 2697 store_reg(s, rd, tmp);
b7bcbe95
FB
2698 } else {
2699 /* arm->vfp */
ad69471c 2700 tmp = load_reg(s, rd);
9ee6e8bb
PB
2701 if (insn & (1 << 23)) {
2702 /* VDUP */
2703 if (size == 0) {
ad69471c 2704 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2705 } else if (size == 1) {
ad69471c 2706 gen_neon_dup_low16(tmp);
9ee6e8bb 2707 }
cbbccffc 2708 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2709 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2710 tcg_gen_mov_i32(tmp2, tmp);
2711 neon_store_reg(rn, n, tmp2);
2712 }
2713 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2714 } else {
2715 /* VMOV */
2716 switch (size) {
2717 case 0:
ad69471c 2718 tmp2 = neon_load_reg(rn, pass);
d593c48e 2719 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2720 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2721 break;
2722 case 1:
ad69471c 2723 tmp2 = neon_load_reg(rn, pass);
d593c48e 2724 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2725 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2726 break;
2727 case 2:
9ee6e8bb
PB
2728 break;
2729 }
ad69471c 2730 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2731 }
b7bcbe95 2732 }
9ee6e8bb
PB
2733 } else { /* !dp */
2734 if ((insn & 0x6f) != 0x00)
2735 return 1;
2736 rn = VFP_SREG_N(insn);
18c9b560 2737 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2738 /* vfp->arm */
2739 if (insn & (1 << 21)) {
2740 /* system register */
40f137e1 2741 rn >>= 1;
9ee6e8bb 2742
b7bcbe95 2743 switch (rn) {
40f137e1 2744 case ARM_VFP_FPSID:
4373f3ce 2745 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2746 VFP3 restricts all id registers to privileged
2747 accesses. */
2748 if (IS_USER(s)
2749 && arm_feature(env, ARM_FEATURE_VFP3))
2750 return 1;
4373f3ce 2751 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2752 break;
40f137e1 2753 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2754 if (IS_USER(s))
2755 return 1;
4373f3ce 2756 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2757 break;
40f137e1
PB
2758 case ARM_VFP_FPINST:
2759 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2760 /* Not present in VFP3. */
2761 if (IS_USER(s)
2762 || arm_feature(env, ARM_FEATURE_VFP3))
2763 return 1;
4373f3ce 2764 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2765 break;
40f137e1 2766 case ARM_VFP_FPSCR:
601d70b9 2767 if (rd == 15) {
4373f3ce
PB
2768 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2769 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2770 } else {
7d1b0095 2771 tmp = tcg_temp_new_i32();
4373f3ce
PB
2772 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2773 }
b7bcbe95 2774 break;
9ee6e8bb
PB
2775 case ARM_VFP_MVFR0:
2776 case ARM_VFP_MVFR1:
2777 if (IS_USER(s)
06ed5d66 2778 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2779 return 1;
4373f3ce 2780 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2781 break;
b7bcbe95
FB
2782 default:
2783 return 1;
2784 }
2785 } else {
2786 gen_mov_F0_vreg(0, rn);
4373f3ce 2787 tmp = gen_vfp_mrs();
b7bcbe95
FB
2788 }
2789 if (rd == 15) {
b5ff1b31 2790 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2791 gen_set_nzcv(tmp);
7d1b0095 2792 tcg_temp_free_i32(tmp);
4373f3ce
PB
2793 } else {
2794 store_reg(s, rd, tmp);
2795 }
b7bcbe95
FB
2796 } else {
2797 /* arm->vfp */
b7bcbe95 2798 if (insn & (1 << 21)) {
40f137e1 2799 rn >>= 1;
b7bcbe95
FB
2800 /* system register */
2801 switch (rn) {
40f137e1 2802 case ARM_VFP_FPSID:
9ee6e8bb
PB
2803 case ARM_VFP_MVFR0:
2804 case ARM_VFP_MVFR1:
b7bcbe95
FB
2805 /* Writes are ignored. */
2806 break;
40f137e1 2807 case ARM_VFP_FPSCR:
e4c1cfa5 2808 tmp = load_reg(s, rd);
4373f3ce 2809 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2810 tcg_temp_free_i32(tmp);
b5ff1b31 2811 gen_lookup_tb(s);
b7bcbe95 2812 break;
40f137e1 2813 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2814 if (IS_USER(s))
2815 return 1;
71b3c3de
JR
2816 /* TODO: VFP subarchitecture support.
2817 * For now, keep the EN bit only */
e4c1cfa5 2818 tmp = load_reg(s, rd);
71b3c3de 2819 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2820 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2821 gen_lookup_tb(s);
2822 break;
2823 case ARM_VFP_FPINST:
2824 case ARM_VFP_FPINST2:
e4c1cfa5 2825 tmp = load_reg(s, rd);
4373f3ce 2826 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2827 break;
b7bcbe95
FB
2828 default:
2829 return 1;
2830 }
2831 } else {
e4c1cfa5 2832 tmp = load_reg(s, rd);
4373f3ce 2833 gen_vfp_msr(tmp);
b7bcbe95
FB
2834 gen_mov_vreg_F0(0, rn);
2835 }
2836 }
2837 }
2838 } else {
2839 /* data processing */
2840 /* The opcode is in bits 23, 21, 20 and 6. */
2841 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2842 if (dp) {
2843 if (op == 15) {
2844 /* rn is opcode */
2845 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2846 } else {
2847 /* rn is register number */
9ee6e8bb 2848 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2849 }
2850
04595bf6 2851 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2852 /* Integer or single precision destination. */
9ee6e8bb 2853 rd = VFP_SREG_D(insn);
b7bcbe95 2854 } else {
9ee6e8bb 2855 VFP_DREG_D(rd, insn);
b7bcbe95 2856 }
04595bf6
PM
2857 if (op == 15 &&
2858 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2859 /* VCVT from int is always from S reg regardless of dp bit.
2860 * VCVT with immediate frac_bits has same format as SREG_M
2861 */
2862 rm = VFP_SREG_M(insn);
b7bcbe95 2863 } else {
9ee6e8bb 2864 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2865 }
2866 } else {
9ee6e8bb 2867 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2868 if (op == 15 && rn == 15) {
2869 /* Double precision destination. */
9ee6e8bb
PB
2870 VFP_DREG_D(rd, insn);
2871 } else {
2872 rd = VFP_SREG_D(insn);
2873 }
04595bf6
PM
2874 /* NB that we implicitly rely on the encoding for the frac_bits
2875 * in VCVT of fixed to float being the same as that of an SREG_M
2876 */
9ee6e8bb 2877 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2878 }
2879
69d1fc22 2880 veclen = s->vec_len;
b7bcbe95
FB
2881 if (op == 15 && rn > 3)
2882 veclen = 0;
2883
2884 /* Shut up compiler warnings. */
2885 delta_m = 0;
2886 delta_d = 0;
2887 bank_mask = 0;
3b46e624 2888
b7bcbe95
FB
2889 if (veclen > 0) {
2890 if (dp)
2891 bank_mask = 0xc;
2892 else
2893 bank_mask = 0x18;
2894
2895 /* Figure out what type of vector operation this is. */
2896 if ((rd & bank_mask) == 0) {
2897 /* scalar */
2898 veclen = 0;
2899 } else {
2900 if (dp)
69d1fc22 2901 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2902 else
69d1fc22 2903 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2904
2905 if ((rm & bank_mask) == 0) {
2906 /* mixed scalar/vector */
2907 delta_m = 0;
2908 } else {
2909 /* vector */
2910 delta_m = delta_d;
2911 }
2912 }
2913 }
2914
2915 /* Load the initial operands. */
2916 if (op == 15) {
2917 switch (rn) {
2918 case 16:
2919 case 17:
2920 /* Integer source */
2921 gen_mov_F0_vreg(0, rm);
2922 break;
2923 case 8:
2924 case 9:
2925 /* Compare */
2926 gen_mov_F0_vreg(dp, rd);
2927 gen_mov_F1_vreg(dp, rm);
2928 break;
2929 case 10:
2930 case 11:
2931 /* Compare with zero */
2932 gen_mov_F0_vreg(dp, rd);
2933 gen_vfp_F1_ld0(dp);
2934 break;
9ee6e8bb
PB
2935 case 20:
2936 case 21:
2937 case 22:
2938 case 23:
644ad806
PB
2939 case 28:
2940 case 29:
2941 case 30:
2942 case 31:
9ee6e8bb
PB
2943 /* Source and destination the same. */
2944 gen_mov_F0_vreg(dp, rd);
2945 break;
6e0c0ed1
PM
2946 case 4:
2947 case 5:
2948 case 6:
2949 case 7:
2950 /* VCVTB, VCVTT: only present with the halfprec extension,
2951 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2952 */
2953 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2954 return 1;
2955 }
2956 /* Otherwise fall through */
b7bcbe95
FB
2957 default:
2958 /* One source operand. */
2959 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2960 break;
b7bcbe95
FB
2961 }
2962 } else {
2963 /* Two source operands. */
2964 gen_mov_F0_vreg(dp, rn);
2965 gen_mov_F1_vreg(dp, rm);
2966 }
2967
2968 for (;;) {
2969 /* Perform the calculation. */
2970 switch (op) {
605a6aed
PM
2971 case 0: /* VMLA: fd + (fn * fm) */
2972 /* Note that order of inputs to the add matters for NaNs */
2973 gen_vfp_F1_mul(dp);
2974 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2975 gen_vfp_add(dp);
2976 break;
605a6aed 2977 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2978 gen_vfp_mul(dp);
605a6aed
PM
2979 gen_vfp_F1_neg(dp);
2980 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2981 gen_vfp_add(dp);
2982 break;
605a6aed
PM
2983 case 2: /* VNMLS: -fd + (fn * fm) */
2984 /* Note that it isn't valid to replace (-A + B) with (B - A)
2985 * or similar plausible looking simplifications
2986 * because this will give wrong results for NaNs.
2987 */
2988 gen_vfp_F1_mul(dp);
2989 gen_mov_F0_vreg(dp, rd);
2990 gen_vfp_neg(dp);
2991 gen_vfp_add(dp);
b7bcbe95 2992 break;
605a6aed 2993 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2994 gen_vfp_mul(dp);
605a6aed
PM
2995 gen_vfp_F1_neg(dp);
2996 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2997 gen_vfp_neg(dp);
605a6aed 2998 gen_vfp_add(dp);
b7bcbe95
FB
2999 break;
3000 case 4: /* mul: fn * fm */
3001 gen_vfp_mul(dp);
3002 break;
3003 case 5: /* nmul: -(fn * fm) */
3004 gen_vfp_mul(dp);
3005 gen_vfp_neg(dp);
3006 break;
3007 case 6: /* add: fn + fm */
3008 gen_vfp_add(dp);
3009 break;
3010 case 7: /* sub: fn - fm */
3011 gen_vfp_sub(dp);
3012 break;
3013 case 8: /* div: fn / fm */
3014 gen_vfp_div(dp);
3015 break;
da97f52c
PM
3016 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3017 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3018 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3019 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3020 /* These are fused multiply-add, and must be done as one
3021 * floating point operation with no rounding between the
3022 * multiplication and addition steps.
3023 * NB that doing the negations here as separate steps is
3024 * correct : an input NaN should come out with its sign bit
3025 * flipped if it is a negated-input.
3026 */
3027 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3028 return 1;
3029 }
3030 if (dp) {
3031 TCGv_ptr fpst;
3032 TCGv_i64 frd;
3033 if (op & 1) {
3034 /* VFNMS, VFMS */
3035 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3036 }
3037 frd = tcg_temp_new_i64();
3038 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3039 if (op & 2) {
3040 /* VFNMA, VFNMS */
3041 gen_helper_vfp_negd(frd, frd);
3042 }
3043 fpst = get_fpstatus_ptr(0);
3044 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3045 cpu_F1d, frd, fpst);
3046 tcg_temp_free_ptr(fpst);
3047 tcg_temp_free_i64(frd);
3048 } else {
3049 TCGv_ptr fpst;
3050 TCGv_i32 frd;
3051 if (op & 1) {
3052 /* VFNMS, VFMS */
3053 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3054 }
3055 frd = tcg_temp_new_i32();
3056 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3057 if (op & 2) {
3058 gen_helper_vfp_negs(frd, frd);
3059 }
3060 fpst = get_fpstatus_ptr(0);
3061 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3062 cpu_F1s, frd, fpst);
3063 tcg_temp_free_ptr(fpst);
3064 tcg_temp_free_i32(frd);
3065 }
3066 break;
9ee6e8bb
PB
3067 case 14: /* fconst */
3068 if (!arm_feature(env, ARM_FEATURE_VFP3))
3069 return 1;
3070
3071 n = (insn << 12) & 0x80000000;
3072 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3073 if (dp) {
3074 if (i & 0x40)
3075 i |= 0x3f80;
3076 else
3077 i |= 0x4000;
3078 n |= i << 16;
4373f3ce 3079 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3080 } else {
3081 if (i & 0x40)
3082 i |= 0x780;
3083 else
3084 i |= 0x800;
3085 n |= i << 19;
5b340b51 3086 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3087 }
9ee6e8bb 3088 break;
b7bcbe95
FB
3089 case 15: /* extension space */
3090 switch (rn) {
3091 case 0: /* cpy */
3092 /* no-op */
3093 break;
3094 case 1: /* abs */
3095 gen_vfp_abs(dp);
3096 break;
3097 case 2: /* neg */
3098 gen_vfp_neg(dp);
3099 break;
3100 case 3: /* sqrt */
3101 gen_vfp_sqrt(dp);
3102 break;
60011498 3103 case 4: /* vcvtb.f32.f16 */
60011498
PB
3104 tmp = gen_vfp_mrs();
3105 tcg_gen_ext16u_i32(tmp, tmp);
3106 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3107 tcg_temp_free_i32(tmp);
60011498
PB
3108 break;
3109 case 5: /* vcvtt.f32.f16 */
60011498
PB
3110 tmp = gen_vfp_mrs();
3111 tcg_gen_shri_i32(tmp, tmp, 16);
3112 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3113 tcg_temp_free_i32(tmp);
60011498
PB
3114 break;
3115 case 6: /* vcvtb.f16.f32 */
7d1b0095 3116 tmp = tcg_temp_new_i32();
60011498
PB
3117 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3118 gen_mov_F0_vreg(0, rd);
3119 tmp2 = gen_vfp_mrs();
3120 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3121 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3122 tcg_temp_free_i32(tmp2);
60011498
PB
3123 gen_vfp_msr(tmp);
3124 break;
3125 case 7: /* vcvtt.f16.f32 */
7d1b0095 3126 tmp = tcg_temp_new_i32();
60011498
PB
3127 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3128 tcg_gen_shli_i32(tmp, tmp, 16);
3129 gen_mov_F0_vreg(0, rd);
3130 tmp2 = gen_vfp_mrs();
3131 tcg_gen_ext16u_i32(tmp2, tmp2);
3132 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3133 tcg_temp_free_i32(tmp2);
60011498
PB
3134 gen_vfp_msr(tmp);
3135 break;
b7bcbe95
FB
3136 case 8: /* cmp */
3137 gen_vfp_cmp(dp);
3138 break;
3139 case 9: /* cmpe */
3140 gen_vfp_cmpe(dp);
3141 break;
3142 case 10: /* cmpz */
3143 gen_vfp_cmp(dp);
3144 break;
3145 case 11: /* cmpez */
3146 gen_vfp_F1_ld0(dp);
3147 gen_vfp_cmpe(dp);
3148 break;
3149 case 15: /* single<->double conversion */
3150 if (dp)
4373f3ce 3151 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3152 else
4373f3ce 3153 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3154 break;
3155 case 16: /* fuito */
5500b06c 3156 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3157 break;
3158 case 17: /* fsito */
5500b06c 3159 gen_vfp_sito(dp, 0);
b7bcbe95 3160 break;
9ee6e8bb
PB
3161 case 20: /* fshto */
3162 if (!arm_feature(env, ARM_FEATURE_VFP3))
3163 return 1;
5500b06c 3164 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3165 break;
3166 case 21: /* fslto */
3167 if (!arm_feature(env, ARM_FEATURE_VFP3))
3168 return 1;
5500b06c 3169 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3170 break;
3171 case 22: /* fuhto */
3172 if (!arm_feature(env, ARM_FEATURE_VFP3))
3173 return 1;
5500b06c 3174 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3175 break;
3176 case 23: /* fulto */
3177 if (!arm_feature(env, ARM_FEATURE_VFP3))
3178 return 1;
5500b06c 3179 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3180 break;
b7bcbe95 3181 case 24: /* ftoui */
5500b06c 3182 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3183 break;
3184 case 25: /* ftouiz */
5500b06c 3185 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3186 break;
3187 case 26: /* ftosi */
5500b06c 3188 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3189 break;
3190 case 27: /* ftosiz */
5500b06c 3191 gen_vfp_tosiz(dp, 0);
b7bcbe95 3192 break;
9ee6e8bb
PB
3193 case 28: /* ftosh */
3194 if (!arm_feature(env, ARM_FEATURE_VFP3))
3195 return 1;
5500b06c 3196 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3197 break;
3198 case 29: /* ftosl */
3199 if (!arm_feature(env, ARM_FEATURE_VFP3))
3200 return 1;
5500b06c 3201 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3202 break;
3203 case 30: /* ftouh */
3204 if (!arm_feature(env, ARM_FEATURE_VFP3))
3205 return 1;
5500b06c 3206 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3207 break;
3208 case 31: /* ftoul */
3209 if (!arm_feature(env, ARM_FEATURE_VFP3))
3210 return 1;
5500b06c 3211 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3212 break;
b7bcbe95 3213 default: /* undefined */
b7bcbe95
FB
3214 return 1;
3215 }
3216 break;
3217 default: /* undefined */
b7bcbe95
FB
3218 return 1;
3219 }
3220
3221 /* Write back the result. */
3222 if (op == 15 && (rn >= 8 && rn <= 11))
3223 ; /* Comparison, do nothing. */
04595bf6
PM
3224 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3225 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3226 gen_mov_vreg_F0(0, rd);
3227 else if (op == 15 && rn == 15)
3228 /* conversion */
3229 gen_mov_vreg_F0(!dp, rd);
3230 else
3231 gen_mov_vreg_F0(dp, rd);
3232
3233 /* break out of the loop if we have finished */
3234 if (veclen == 0)
3235 break;
3236
3237 if (op == 15 && delta_m == 0) {
3238 /* single source one-many */
3239 while (veclen--) {
3240 rd = ((rd + delta_d) & (bank_mask - 1))
3241 | (rd & bank_mask);
3242 gen_mov_vreg_F0(dp, rd);
3243 }
3244 break;
3245 }
3246 /* Setup the next operands. */
3247 veclen--;
3248 rd = ((rd + delta_d) & (bank_mask - 1))
3249 | (rd & bank_mask);
3250
3251 if (op == 15) {
3252 /* One source operand. */
3253 rm = ((rm + delta_m) & (bank_mask - 1))
3254 | (rm & bank_mask);
3255 gen_mov_F0_vreg(dp, rm);
3256 } else {
3257 /* Two source operands. */
3258 rn = ((rn + delta_d) & (bank_mask - 1))
3259 | (rn & bank_mask);
3260 gen_mov_F0_vreg(dp, rn);
3261 if (delta_m) {
3262 rm = ((rm + delta_m) & (bank_mask - 1))
3263 | (rm & bank_mask);
3264 gen_mov_F1_vreg(dp, rm);
3265 }
3266 }
3267 }
3268 }
3269 break;
3270 case 0xc:
3271 case 0xd:
8387da81 3272 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3273 /* two-register transfer */
3274 rn = (insn >> 16) & 0xf;
3275 rd = (insn >> 12) & 0xf;
3276 if (dp) {
9ee6e8bb
PB
3277 VFP_DREG_M(rm, insn);
3278 } else {
3279 rm = VFP_SREG_M(insn);
3280 }
b7bcbe95 3281
18c9b560 3282 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3283 /* vfp->arm */
3284 if (dp) {
4373f3ce
PB
3285 gen_mov_F0_vreg(0, rm * 2);
3286 tmp = gen_vfp_mrs();
3287 store_reg(s, rd, tmp);
3288 gen_mov_F0_vreg(0, rm * 2 + 1);
3289 tmp = gen_vfp_mrs();
3290 store_reg(s, rn, tmp);
b7bcbe95
FB
3291 } else {
3292 gen_mov_F0_vreg(0, rm);
4373f3ce 3293 tmp = gen_vfp_mrs();
8387da81 3294 store_reg(s, rd, tmp);
b7bcbe95 3295 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3296 tmp = gen_vfp_mrs();
8387da81 3297 store_reg(s, rn, tmp);
b7bcbe95
FB
3298 }
3299 } else {
3300 /* arm->vfp */
3301 if (dp) {
4373f3ce
PB
3302 tmp = load_reg(s, rd);
3303 gen_vfp_msr(tmp);
3304 gen_mov_vreg_F0(0, rm * 2);
3305 tmp = load_reg(s, rn);
3306 gen_vfp_msr(tmp);
3307 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3308 } else {
8387da81 3309 tmp = load_reg(s, rd);
4373f3ce 3310 gen_vfp_msr(tmp);
b7bcbe95 3311 gen_mov_vreg_F0(0, rm);
8387da81 3312 tmp = load_reg(s, rn);
4373f3ce 3313 gen_vfp_msr(tmp);
b7bcbe95
FB
3314 gen_mov_vreg_F0(0, rm + 1);
3315 }
3316 }
3317 } else {
3318 /* Load/store */
3319 rn = (insn >> 16) & 0xf;
3320 if (dp)
9ee6e8bb 3321 VFP_DREG_D(rd, insn);
b7bcbe95 3322 else
9ee6e8bb 3323 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3324 if ((insn & 0x01200000) == 0x01000000) {
3325 /* Single load/store */
3326 offset = (insn & 0xff) << 2;
3327 if ((insn & (1 << 23)) == 0)
3328 offset = -offset;
934814f1
PM
3329 if (s->thumb && rn == 15) {
3330 /* This is actually UNPREDICTABLE */
3331 addr = tcg_temp_new_i32();
3332 tcg_gen_movi_i32(addr, s->pc & ~2);
3333 } else {
3334 addr = load_reg(s, rn);
3335 }
312eea9f 3336 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3337 if (insn & (1 << 20)) {
312eea9f 3338 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3339 gen_mov_vreg_F0(dp, rd);
3340 } else {
3341 gen_mov_F0_vreg(dp, rd);
312eea9f 3342 gen_vfp_st(s, dp, addr);
b7bcbe95 3343 }
7d1b0095 3344 tcg_temp_free_i32(addr);
b7bcbe95
FB
3345 } else {
3346 /* load/store multiple */
934814f1 3347 int w = insn & (1 << 21);
b7bcbe95
FB
3348 if (dp)
3349 n = (insn >> 1) & 0x7f;
3350 else
3351 n = insn & 0xff;
3352
934814f1
PM
3353 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3354 /* P == U , W == 1 => UNDEF */
3355 return 1;
3356 }
3357 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3358 /* UNPREDICTABLE cases for bad immediates: we choose to
3359 * UNDEF to avoid generating huge numbers of TCG ops
3360 */
3361 return 1;
3362 }
3363 if (rn == 15 && w) {
3364 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3365 return 1;
3366 }
3367
3368 if (s->thumb && rn == 15) {
3369 /* This is actually UNPREDICTABLE */
3370 addr = tcg_temp_new_i32();
3371 tcg_gen_movi_i32(addr, s->pc & ~2);
3372 } else {
3373 addr = load_reg(s, rn);
3374 }
b7bcbe95 3375 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3376 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3377
3378 if (dp)
3379 offset = 8;
3380 else
3381 offset = 4;
3382 for (i = 0; i < n; i++) {
18c9b560 3383 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3384 /* load */
312eea9f 3385 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3386 gen_mov_vreg_F0(dp, rd + i);
3387 } else {
3388 /* store */
3389 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3390 gen_vfp_st(s, dp, addr);
b7bcbe95 3391 }
312eea9f 3392 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3393 }
934814f1 3394 if (w) {
b7bcbe95
FB
3395 /* writeback */
3396 if (insn & (1 << 24))
3397 offset = -offset * n;
3398 else if (dp && (insn & 1))
3399 offset = 4;
3400 else
3401 offset = 0;
3402
3403 if (offset != 0)
312eea9f
FN
3404 tcg_gen_addi_i32(addr, addr, offset);
3405 store_reg(s, rn, addr);
3406 } else {
7d1b0095 3407 tcg_temp_free_i32(addr);
b7bcbe95
FB
3408 }
3409 }
3410 }
3411 break;
3412 default:
3413 /* Should never happen. */
3414 return 1;
3415 }
3416 return 0;
3417}
3418
0a2461fa 3419static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3420{
6e256c93
FB
3421 TranslationBlock *tb;
3422
3423 tb = s->tb;
3424 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3425 tcg_gen_goto_tb(n);
eaed129d 3426 gen_set_pc_im(s, dest);
8cfd0495 3427 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3428 } else {
eaed129d 3429 gen_set_pc_im(s, dest);
57fec1fe 3430 tcg_gen_exit_tb(0);
6e256c93 3431 }
c53be334
FB
3432}
3433
8aaca4c0
FB
3434static inline void gen_jmp (DisasContext *s, uint32_t dest)
3435{
551bd27f 3436 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3437 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3438 if (s->thumb)
d9ba4830
PB
3439 dest |= 1;
3440 gen_bx_im(s, dest);
8aaca4c0 3441 } else {
6e256c93 3442 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3443 s->is_jmp = DISAS_TB_JUMP;
3444 }
3445}
3446
39d5492a 3447static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3448{
ee097184 3449 if (x)
d9ba4830 3450 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3451 else
d9ba4830 3452 gen_sxth(t0);
ee097184 3453 if (y)
d9ba4830 3454 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3455 else
d9ba4830
PB
3456 gen_sxth(t1);
3457 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3458}
3459
3460/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3461static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3462 uint32_t mask;
3463
3464 mask = 0;
3465 if (flags & (1 << 0))
3466 mask |= 0xff;
3467 if (flags & (1 << 1))
3468 mask |= 0xff00;
3469 if (flags & (1 << 2))
3470 mask |= 0xff0000;
3471 if (flags & (1 << 3))
3472 mask |= 0xff000000;
9ee6e8bb 3473
2ae23e75 3474 /* Mask out undefined bits. */
9ee6e8bb 3475 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3476 if (!arm_feature(env, ARM_FEATURE_V4T))
3477 mask &= ~CPSR_T;
3478 if (!arm_feature(env, ARM_FEATURE_V5))
3479 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3480 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3481 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3482 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3483 mask &= ~CPSR_IT;
9ee6e8bb 3484 /* Mask out execution state bits. */
2ae23e75 3485 if (!spsr)
e160c51c 3486 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3487 /* Mask out privileged bits. */
3488 if (IS_USER(s))
9ee6e8bb 3489 mask &= CPSR_USER;
b5ff1b31
FB
3490 return mask;
3491}
3492
2fbac54b 3493/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3494static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3495{
39d5492a 3496 TCGv_i32 tmp;
b5ff1b31
FB
3497 if (spsr) {
3498 /* ??? This is also undefined in system mode. */
3499 if (IS_USER(s))
3500 return 1;
d9ba4830
PB
3501
3502 tmp = load_cpu_field(spsr);
3503 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3504 tcg_gen_andi_i32(t0, t0, mask);
3505 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3506 store_cpu_field(tmp, spsr);
b5ff1b31 3507 } else {
2fbac54b 3508 gen_set_cpsr(t0, mask);
b5ff1b31 3509 }
7d1b0095 3510 tcg_temp_free_i32(t0);
b5ff1b31
FB
3511 gen_lookup_tb(s);
3512 return 0;
3513}
3514
2fbac54b
FN
3515/* Returns nonzero if access to the PSR is not permitted. */
3516static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3517{
39d5492a 3518 TCGv_i32 tmp;
7d1b0095 3519 tmp = tcg_temp_new_i32();
2fbac54b
FN
3520 tcg_gen_movi_i32(tmp, val);
3521 return gen_set_psr(s, mask, spsr, tmp);
3522}
3523
e9bb4aa9 3524/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3525static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3526{
39d5492a 3527 TCGv_i32 tmp;
e9bb4aa9 3528 store_reg(s, 15, pc);
d9ba4830
PB
3529 tmp = load_cpu_field(spsr);
3530 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3531 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3532 s->is_jmp = DISAS_UPDATE;
3533}
3534
b0109805 3535/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3536static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3537{
b0109805 3538 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3539 tcg_temp_free_i32(cpsr);
b0109805 3540 store_reg(s, 15, pc);
9ee6e8bb
PB
3541 s->is_jmp = DISAS_UPDATE;
3542}
3b46e624 3543
9ee6e8bb
PB
3544static inline void
3545gen_set_condexec (DisasContext *s)
3546{
3547 if (s->condexec_mask) {
8f01245e 3548 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3549 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3550 tcg_gen_movi_i32(tmp, val);
d9ba4830 3551 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3552 }
3553}
3b46e624 3554
bc4a0de0
PM
3555static void gen_exception_insn(DisasContext *s, int offset, int excp)
3556{
3557 gen_set_condexec(s);
eaed129d 3558 gen_set_pc_im(s, s->pc - offset);
bc4a0de0
PM
3559 gen_exception(excp);
3560 s->is_jmp = DISAS_JUMP;
3561}
3562
9ee6e8bb
PB
3563static void gen_nop_hint(DisasContext *s, int val)
3564{
3565 switch (val) {
3566 case 3: /* wfi */
eaed129d 3567 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3568 s->is_jmp = DISAS_WFI;
3569 break;
3570 case 2: /* wfe */
3571 case 4: /* sev */
12b10571
MR
3572 case 5: /* sevl */
3573 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3574 default: /* nop */
3575 break;
3576 }
3577}
99c475ab 3578
ad69471c 3579#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3580
39d5492a 3581static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3582{
3583 switch (size) {
dd8fbd78
FN
3584 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3585 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3586 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3587 default: abort();
9ee6e8bb 3588 }
9ee6e8bb
PB
3589}
3590
39d5492a 3591static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3592{
3593 switch (size) {
dd8fbd78
FN
3594 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3595 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3596 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3597 default: return;
3598 }
3599}
3600
3601/* 32-bit pairwise ops end up the same as the elementwise versions. */
3602#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3603#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3604#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3605#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3606
ad69471c
PB
3607#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3608 switch ((size << 1) | u) { \
3609 case 0: \
dd8fbd78 3610 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3611 break; \
3612 case 1: \
dd8fbd78 3613 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3614 break; \
3615 case 2: \
dd8fbd78 3616 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3617 break; \
3618 case 3: \
dd8fbd78 3619 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3620 break; \
3621 case 4: \
dd8fbd78 3622 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3623 break; \
3624 case 5: \
dd8fbd78 3625 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3626 break; \
3627 default: return 1; \
3628 }} while (0)
9ee6e8bb
PB
3629
3630#define GEN_NEON_INTEGER_OP(name) do { \
3631 switch ((size << 1) | u) { \
ad69471c 3632 case 0: \
dd8fbd78 3633 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3634 break; \
3635 case 1: \
dd8fbd78 3636 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3637 break; \
3638 case 2: \
dd8fbd78 3639 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3640 break; \
3641 case 3: \
dd8fbd78 3642 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3643 break; \
3644 case 4: \
dd8fbd78 3645 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3646 break; \
3647 case 5: \
dd8fbd78 3648 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3649 break; \
9ee6e8bb
PB
3650 default: return 1; \
3651 }} while (0)
3652
39d5492a 3653static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3654{
39d5492a 3655 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3656 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3657 return tmp;
9ee6e8bb
PB
3658}
3659
39d5492a 3660static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3661{
dd8fbd78 3662 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3663 tcg_temp_free_i32(var);
9ee6e8bb
PB
3664}
3665
39d5492a 3666static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3667{
39d5492a 3668 TCGv_i32 tmp;
9ee6e8bb 3669 if (size == 1) {
0fad6efc
PM
3670 tmp = neon_load_reg(reg & 7, reg >> 4);
3671 if (reg & 8) {
dd8fbd78 3672 gen_neon_dup_high16(tmp);
0fad6efc
PM
3673 } else {
3674 gen_neon_dup_low16(tmp);
dd8fbd78 3675 }
0fad6efc
PM
3676 } else {
3677 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3678 }
dd8fbd78 3679 return tmp;
9ee6e8bb
PB
3680}
3681
02acedf9 3682static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3683{
39d5492a 3684 TCGv_i32 tmp, tmp2;
600b828c 3685 if (!q && size == 2) {
02acedf9
PM
3686 return 1;
3687 }
3688 tmp = tcg_const_i32(rd);
3689 tmp2 = tcg_const_i32(rm);
3690 if (q) {
3691 switch (size) {
3692 case 0:
02da0b2d 3693 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3694 break;
3695 case 1:
02da0b2d 3696 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3697 break;
3698 case 2:
02da0b2d 3699 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3700 break;
3701 default:
3702 abort();
3703 }
3704 } else {
3705 switch (size) {
3706 case 0:
02da0b2d 3707 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3708 break;
3709 case 1:
02da0b2d 3710 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3711 break;
3712 default:
3713 abort();
3714 }
3715 }
3716 tcg_temp_free_i32(tmp);
3717 tcg_temp_free_i32(tmp2);
3718 return 0;
19457615
FN
3719}
3720
d68a6f3a 3721static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3722{
39d5492a 3723 TCGv_i32 tmp, tmp2;
600b828c 3724 if (!q && size == 2) {
d68a6f3a
PM
3725 return 1;
3726 }
3727 tmp = tcg_const_i32(rd);
3728 tmp2 = tcg_const_i32(rm);
3729 if (q) {
3730 switch (size) {
3731 case 0:
02da0b2d 3732 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3733 break;
3734 case 1:
02da0b2d 3735 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3736 break;
3737 case 2:
02da0b2d 3738 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3739 break;
3740 default:
3741 abort();
3742 }
3743 } else {
3744 switch (size) {
3745 case 0:
02da0b2d 3746 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3747 break;
3748 case 1:
02da0b2d 3749 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3750 break;
3751 default:
3752 abort();
3753 }
3754 }
3755 tcg_temp_free_i32(tmp);
3756 tcg_temp_free_i32(tmp2);
3757 return 0;
19457615
FN
3758}
3759
39d5492a 3760static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3761{
39d5492a 3762 TCGv_i32 rd, tmp;
19457615 3763
7d1b0095
PM
3764 rd = tcg_temp_new_i32();
3765 tmp = tcg_temp_new_i32();
19457615
FN
3766
3767 tcg_gen_shli_i32(rd, t0, 8);
3768 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3769 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3770 tcg_gen_or_i32(rd, rd, tmp);
3771
3772 tcg_gen_shri_i32(t1, t1, 8);
3773 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3774 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3775 tcg_gen_or_i32(t1, t1, tmp);
3776 tcg_gen_mov_i32(t0, rd);
3777
7d1b0095
PM
3778 tcg_temp_free_i32(tmp);
3779 tcg_temp_free_i32(rd);
19457615
FN
3780}
3781
39d5492a 3782static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3783{
39d5492a 3784 TCGv_i32 rd, tmp;
19457615 3785
7d1b0095
PM
3786 rd = tcg_temp_new_i32();
3787 tmp = tcg_temp_new_i32();
19457615
FN
3788
3789 tcg_gen_shli_i32(rd, t0, 16);
3790 tcg_gen_andi_i32(tmp, t1, 0xffff);
3791 tcg_gen_or_i32(rd, rd, tmp);
3792 tcg_gen_shri_i32(t1, t1, 16);
3793 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3794 tcg_gen_or_i32(t1, t1, tmp);
3795 tcg_gen_mov_i32(t0, rd);
3796
7d1b0095
PM
3797 tcg_temp_free_i32(tmp);
3798 tcg_temp_free_i32(rd);
19457615
FN
3799}
3800
3801
9ee6e8bb
PB
3802static struct {
3803 int nregs;
3804 int interleave;
3805 int spacing;
3806} neon_ls_element_type[11] = {
3807 {4, 4, 1},
3808 {4, 4, 2},
3809 {4, 1, 1},
3810 {4, 2, 1},
3811 {3, 3, 1},
3812 {3, 3, 2},
3813 {3, 1, 1},
3814 {1, 1, 1},
3815 {2, 2, 1},
3816 {2, 2, 2},
3817 {2, 1, 1}
3818};
3819
3820/* Translate a NEON load/store element instruction. Return nonzero if the
3821 instruction is invalid. */
0ecb72a5 3822static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3823{
3824 int rd, rn, rm;
3825 int op;
3826 int nregs;
3827 int interleave;
84496233 3828 int spacing;
9ee6e8bb
PB
3829 int stride;
3830 int size;
3831 int reg;
3832 int pass;
3833 int load;
3834 int shift;
9ee6e8bb 3835 int n;
39d5492a
PM
3836 TCGv_i32 addr;
3837 TCGv_i32 tmp;
3838 TCGv_i32 tmp2;
84496233 3839 TCGv_i64 tmp64;
9ee6e8bb 3840
5df8bac1 3841 if (!s->vfp_enabled)
9ee6e8bb
PB
3842 return 1;
3843 VFP_DREG_D(rd, insn);
3844 rn = (insn >> 16) & 0xf;
3845 rm = insn & 0xf;
3846 load = (insn & (1 << 21)) != 0;
3847 if ((insn & (1 << 23)) == 0) {
3848 /* Load store all elements. */
3849 op = (insn >> 8) & 0xf;
3850 size = (insn >> 6) & 3;
84496233 3851 if (op > 10)
9ee6e8bb 3852 return 1;
f2dd89d0
PM
3853 /* Catch UNDEF cases for bad values of align field */
3854 switch (op & 0xc) {
3855 case 4:
3856 if (((insn >> 5) & 1) == 1) {
3857 return 1;
3858 }
3859 break;
3860 case 8:
3861 if (((insn >> 4) & 3) == 3) {
3862 return 1;
3863 }
3864 break;
3865 default:
3866 break;
3867 }
9ee6e8bb
PB
3868 nregs = neon_ls_element_type[op].nregs;
3869 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3870 spacing = neon_ls_element_type[op].spacing;
3871 if (size == 3 && (interleave | spacing) != 1)
3872 return 1;
e318a60b 3873 addr = tcg_temp_new_i32();
dcc65026 3874 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3875 stride = (1 << size) * interleave;
3876 for (reg = 0; reg < nregs; reg++) {
3877 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3878 load_reg_var(s, addr, rn);
3879 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3880 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3881 load_reg_var(s, addr, rn);
3882 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3883 }
84496233 3884 if (size == 3) {
8ed1237d 3885 tmp64 = tcg_temp_new_i64();
84496233 3886 if (load) {
08307563 3887 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 3888 neon_store_reg64(tmp64, rd);
84496233 3889 } else {
84496233 3890 neon_load_reg64(tmp64, rd);
08307563 3891 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 3892 }
8ed1237d 3893 tcg_temp_free_i64(tmp64);
84496233
JR
3894 tcg_gen_addi_i32(addr, addr, stride);
3895 } else {
3896 for (pass = 0; pass < 2; pass++) {
3897 if (size == 2) {
3898 if (load) {
58ab8e96 3899 tmp = tcg_temp_new_i32();
08307563 3900 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3901 neon_store_reg(rd, pass, tmp);
3902 } else {
3903 tmp = neon_load_reg(rd, pass);
08307563 3904 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 3905 tcg_temp_free_i32(tmp);
84496233 3906 }
1b2b1e54 3907 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3908 } else if (size == 1) {
3909 if (load) {
58ab8e96 3910 tmp = tcg_temp_new_i32();
08307563 3911 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 3912 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 3913 tmp2 = tcg_temp_new_i32();
08307563 3914 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 3915 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3916 tcg_gen_shli_i32(tmp2, tmp2, 16);
3917 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3918 tcg_temp_free_i32(tmp2);
84496233
JR
3919 neon_store_reg(rd, pass, tmp);
3920 } else {
3921 tmp = neon_load_reg(rd, pass);
7d1b0095 3922 tmp2 = tcg_temp_new_i32();
84496233 3923 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 3924 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 3925 tcg_temp_free_i32(tmp);
84496233 3926 tcg_gen_addi_i32(addr, addr, stride);
08307563 3927 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 3928 tcg_temp_free_i32(tmp2);
1b2b1e54 3929 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3930 }
84496233
JR
3931 } else /* size == 0 */ {
3932 if (load) {
39d5492a 3933 TCGV_UNUSED_I32(tmp2);
84496233 3934 for (n = 0; n < 4; n++) {
58ab8e96 3935 tmp = tcg_temp_new_i32();
08307563 3936 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3937 tcg_gen_addi_i32(addr, addr, stride);
3938 if (n == 0) {
3939 tmp2 = tmp;
3940 } else {
41ba8341
PB
3941 tcg_gen_shli_i32(tmp, tmp, n * 8);
3942 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3943 tcg_temp_free_i32(tmp);
84496233 3944 }
9ee6e8bb 3945 }
84496233
JR
3946 neon_store_reg(rd, pass, tmp2);
3947 } else {
3948 tmp2 = neon_load_reg(rd, pass);
3949 for (n = 0; n < 4; n++) {
7d1b0095 3950 tmp = tcg_temp_new_i32();
84496233
JR
3951 if (n == 0) {
3952 tcg_gen_mov_i32(tmp, tmp2);
3953 } else {
3954 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3955 }
08307563 3956 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 3957 tcg_temp_free_i32(tmp);
84496233
JR
3958 tcg_gen_addi_i32(addr, addr, stride);
3959 }
7d1b0095 3960 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3961 }
3962 }
3963 }
3964 }
84496233 3965 rd += spacing;
9ee6e8bb 3966 }
e318a60b 3967 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3968 stride = nregs * 8;
3969 } else {
3970 size = (insn >> 10) & 3;
3971 if (size == 3) {
3972 /* Load single element to all lanes. */
8e18cde3
PM
3973 int a = (insn >> 4) & 1;
3974 if (!load) {
9ee6e8bb 3975 return 1;
8e18cde3 3976 }
9ee6e8bb
PB
3977 size = (insn >> 6) & 3;
3978 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3979
3980 if (size == 3) {
3981 if (nregs != 4 || a == 0) {
9ee6e8bb 3982 return 1;
99c475ab 3983 }
8e18cde3
PM
3984 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3985 size = 2;
3986 }
3987 if (nregs == 1 && a == 1 && size == 0) {
3988 return 1;
3989 }
3990 if (nregs == 3 && a == 1) {
3991 return 1;
3992 }
e318a60b 3993 addr = tcg_temp_new_i32();
8e18cde3
PM
3994 load_reg_var(s, addr, rn);
3995 if (nregs == 1) {
3996 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3997 tmp = gen_load_and_replicate(s, addr, size);
3998 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3999 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4000 if (insn & (1 << 5)) {
4001 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4002 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4003 }
4004 tcg_temp_free_i32(tmp);
4005 } else {
4006 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4007 stride = (insn & (1 << 5)) ? 2 : 1;
4008 for (reg = 0; reg < nregs; reg++) {
4009 tmp = gen_load_and_replicate(s, addr, size);
4010 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4011 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4012 tcg_temp_free_i32(tmp);
4013 tcg_gen_addi_i32(addr, addr, 1 << size);
4014 rd += stride;
4015 }
9ee6e8bb 4016 }
e318a60b 4017 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4018 stride = (1 << size) * nregs;
4019 } else {
4020 /* Single element. */
93262b16 4021 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4022 pass = (insn >> 7) & 1;
4023 switch (size) {
4024 case 0:
4025 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4026 stride = 1;
4027 break;
4028 case 1:
4029 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4030 stride = (insn & (1 << 5)) ? 2 : 1;
4031 break;
4032 case 2:
4033 shift = 0;
9ee6e8bb
PB
4034 stride = (insn & (1 << 6)) ? 2 : 1;
4035 break;
4036 default:
4037 abort();
4038 }
4039 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4040 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4041 switch (nregs) {
4042 case 1:
4043 if (((idx & (1 << size)) != 0) ||
4044 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4045 return 1;
4046 }
4047 break;
4048 case 3:
4049 if ((idx & 1) != 0) {
4050 return 1;
4051 }
4052 /* fall through */
4053 case 2:
4054 if (size == 2 && (idx & 2) != 0) {
4055 return 1;
4056 }
4057 break;
4058 case 4:
4059 if ((size == 2) && ((idx & 3) == 3)) {
4060 return 1;
4061 }
4062 break;
4063 default:
4064 abort();
4065 }
4066 if ((rd + stride * (nregs - 1)) > 31) {
4067 /* Attempts to write off the end of the register file
4068 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4069 * the neon_load_reg() would write off the end of the array.
4070 */
4071 return 1;
4072 }
e318a60b 4073 addr = tcg_temp_new_i32();
dcc65026 4074 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4075 for (reg = 0; reg < nregs; reg++) {
4076 if (load) {
58ab8e96 4077 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4078 switch (size) {
4079 case 0:
08307563 4080 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4081 break;
4082 case 1:
08307563 4083 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4084 break;
4085 case 2:
08307563 4086 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4087 break;
a50f5b91
PB
4088 default: /* Avoid compiler warnings. */
4089 abort();
9ee6e8bb
PB
4090 }
4091 if (size != 2) {
8f8e3aa4 4092 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4093 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4094 shift, size ? 16 : 8);
7d1b0095 4095 tcg_temp_free_i32(tmp2);
9ee6e8bb 4096 }
8f8e3aa4 4097 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4098 } else { /* Store */
8f8e3aa4
PB
4099 tmp = neon_load_reg(rd, pass);
4100 if (shift)
4101 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4102 switch (size) {
4103 case 0:
08307563 4104 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4105 break;
4106 case 1:
08307563 4107 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4108 break;
4109 case 2:
08307563 4110 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4111 break;
99c475ab 4112 }
58ab8e96 4113 tcg_temp_free_i32(tmp);
99c475ab 4114 }
9ee6e8bb 4115 rd += stride;
1b2b1e54 4116 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4117 }
e318a60b 4118 tcg_temp_free_i32(addr);
9ee6e8bb 4119 stride = nregs * (1 << size);
99c475ab 4120 }
9ee6e8bb
PB
4121 }
4122 if (rm != 15) {
39d5492a 4123 TCGv_i32 base;
b26eefb6
PB
4124
4125 base = load_reg(s, rn);
9ee6e8bb 4126 if (rm == 13) {
b26eefb6 4127 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4128 } else {
39d5492a 4129 TCGv_i32 index;
b26eefb6
PB
4130 index = load_reg(s, rm);
4131 tcg_gen_add_i32(base, base, index);
7d1b0095 4132 tcg_temp_free_i32(index);
9ee6e8bb 4133 }
b26eefb6 4134 store_reg(s, rn, base);
9ee6e8bb
PB
4135 }
4136 return 0;
4137}
3b46e624 4138
8f8e3aa4 4139/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4140static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4141{
4142 tcg_gen_and_i32(t, t, c);
f669df27 4143 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4144 tcg_gen_or_i32(dest, t, f);
4145}
4146
39d5492a 4147static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4148{
4149 switch (size) {
4150 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4151 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4152 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4153 default: abort();
4154 }
4155}
4156
39d5492a 4157static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4158{
4159 switch (size) {
02da0b2d
PM
4160 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4161 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4162 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4163 default: abort();
4164 }
4165}
4166
39d5492a 4167static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4168{
4169 switch (size) {
02da0b2d
PM
4170 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4171 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4172 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4173 default: abort();
4174 }
4175}
4176
39d5492a 4177static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4178{
4179 switch (size) {
02da0b2d
PM
4180 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4181 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4182 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4183 default: abort();
4184 }
4185}
4186
39d5492a 4187static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4188 int q, int u)
4189{
4190 if (q) {
4191 if (u) {
4192 switch (size) {
4193 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4194 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4195 default: abort();
4196 }
4197 } else {
4198 switch (size) {
4199 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4200 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4201 default: abort();
4202 }
4203 }
4204 } else {
4205 if (u) {
4206 switch (size) {
b408a9b0
CL
4207 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4208 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4209 default: abort();
4210 }
4211 } else {
4212 switch (size) {
4213 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4214 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4215 default: abort();
4216 }
4217 }
4218 }
4219}
4220
39d5492a 4221static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4222{
4223 if (u) {
4224 switch (size) {
4225 case 0: gen_helper_neon_widen_u8(dest, src); break;
4226 case 1: gen_helper_neon_widen_u16(dest, src); break;
4227 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4228 default: abort();
4229 }
4230 } else {
4231 switch (size) {
4232 case 0: gen_helper_neon_widen_s8(dest, src); break;
4233 case 1: gen_helper_neon_widen_s16(dest, src); break;
4234 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4235 default: abort();
4236 }
4237 }
7d1b0095 4238 tcg_temp_free_i32(src);
ad69471c
PB
4239}
4240
4241static inline void gen_neon_addl(int size)
4242{
4243 switch (size) {
4244 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4245 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4246 case 2: tcg_gen_add_i64(CPU_V001); break;
4247 default: abort();
4248 }
4249}
4250
4251static inline void gen_neon_subl(int size)
4252{
4253 switch (size) {
4254 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4255 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4256 case 2: tcg_gen_sub_i64(CPU_V001); break;
4257 default: abort();
4258 }
4259}
4260
a7812ae4 4261static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4262{
4263 switch (size) {
4264 case 0: gen_helper_neon_negl_u16(var, var); break;
4265 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4266 case 2:
4267 tcg_gen_neg_i64(var, var);
4268 break;
ad69471c
PB
4269 default: abort();
4270 }
4271}
4272
a7812ae4 4273static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4274{
4275 switch (size) {
02da0b2d
PM
4276 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4277 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4278 default: abort();
4279 }
4280}
4281
39d5492a
PM
4282static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4283 int size, int u)
ad69471c 4284{
a7812ae4 4285 TCGv_i64 tmp;
ad69471c
PB
4286
4287 switch ((size << 1) | u) {
4288 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4289 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4290 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4291 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4292 case 4:
4293 tmp = gen_muls_i64_i32(a, b);
4294 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4295 tcg_temp_free_i64(tmp);
ad69471c
PB
4296 break;
4297 case 5:
4298 tmp = gen_mulu_i64_i32(a, b);
4299 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4300 tcg_temp_free_i64(tmp);
ad69471c
PB
4301 break;
4302 default: abort();
4303 }
c6067f04
CL
4304
4305 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4306 Don't forget to clean them now. */
4307 if (size < 2) {
7d1b0095
PM
4308 tcg_temp_free_i32(a);
4309 tcg_temp_free_i32(b);
c6067f04 4310 }
ad69471c
PB
4311}
4312
39d5492a
PM
4313static void gen_neon_narrow_op(int op, int u, int size,
4314 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4315{
4316 if (op) {
4317 if (u) {
4318 gen_neon_unarrow_sats(size, dest, src);
4319 } else {
4320 gen_neon_narrow(size, dest, src);
4321 }
4322 } else {
4323 if (u) {
4324 gen_neon_narrow_satu(size, dest, src);
4325 } else {
4326 gen_neon_narrow_sats(size, dest, src);
4327 }
4328 }
4329}
4330
62698be3
PM
4331/* Symbolic constants for op fields for Neon 3-register same-length.
4332 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4333 * table A7-9.
4334 */
4335#define NEON_3R_VHADD 0
4336#define NEON_3R_VQADD 1
4337#define NEON_3R_VRHADD 2
4338#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4339#define NEON_3R_VHSUB 4
4340#define NEON_3R_VQSUB 5
4341#define NEON_3R_VCGT 6
4342#define NEON_3R_VCGE 7
4343#define NEON_3R_VSHL 8
4344#define NEON_3R_VQSHL 9
4345#define NEON_3R_VRSHL 10
4346#define NEON_3R_VQRSHL 11
4347#define NEON_3R_VMAX 12
4348#define NEON_3R_VMIN 13
4349#define NEON_3R_VABD 14
4350#define NEON_3R_VABA 15
4351#define NEON_3R_VADD_VSUB 16
4352#define NEON_3R_VTST_VCEQ 17
4353#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4354#define NEON_3R_VMUL 19
4355#define NEON_3R_VPMAX 20
4356#define NEON_3R_VPMIN 21
4357#define NEON_3R_VQDMULH_VQRDMULH 22
4358#define NEON_3R_VPADD 23
da97f52c 4359#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4360#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4361#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4362#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4363#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4364#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4365#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4366
4367static const uint8_t neon_3r_sizes[] = {
4368 [NEON_3R_VHADD] = 0x7,
4369 [NEON_3R_VQADD] = 0xf,
4370 [NEON_3R_VRHADD] = 0x7,
4371 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4372 [NEON_3R_VHSUB] = 0x7,
4373 [NEON_3R_VQSUB] = 0xf,
4374 [NEON_3R_VCGT] = 0x7,
4375 [NEON_3R_VCGE] = 0x7,
4376 [NEON_3R_VSHL] = 0xf,
4377 [NEON_3R_VQSHL] = 0xf,
4378 [NEON_3R_VRSHL] = 0xf,
4379 [NEON_3R_VQRSHL] = 0xf,
4380 [NEON_3R_VMAX] = 0x7,
4381 [NEON_3R_VMIN] = 0x7,
4382 [NEON_3R_VABD] = 0x7,
4383 [NEON_3R_VABA] = 0x7,
4384 [NEON_3R_VADD_VSUB] = 0xf,
4385 [NEON_3R_VTST_VCEQ] = 0x7,
4386 [NEON_3R_VML] = 0x7,
4387 [NEON_3R_VMUL] = 0x7,
4388 [NEON_3R_VPMAX] = 0x7,
4389 [NEON_3R_VPMIN] = 0x7,
4390 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4391 [NEON_3R_VPADD] = 0x7,
da97f52c 4392 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4393 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4394 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4395 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4396 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4397 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4398 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4399};
4400
600b828c
PM
4401/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4402 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4403 * table A7-13.
4404 */
4405#define NEON_2RM_VREV64 0
4406#define NEON_2RM_VREV32 1
4407#define NEON_2RM_VREV16 2
4408#define NEON_2RM_VPADDL 4
4409#define NEON_2RM_VPADDL_U 5
4410#define NEON_2RM_VCLS 8
4411#define NEON_2RM_VCLZ 9
4412#define NEON_2RM_VCNT 10
4413#define NEON_2RM_VMVN 11
4414#define NEON_2RM_VPADAL 12
4415#define NEON_2RM_VPADAL_U 13
4416#define NEON_2RM_VQABS 14
4417#define NEON_2RM_VQNEG 15
4418#define NEON_2RM_VCGT0 16
4419#define NEON_2RM_VCGE0 17
4420#define NEON_2RM_VCEQ0 18
4421#define NEON_2RM_VCLE0 19
4422#define NEON_2RM_VCLT0 20
4423#define NEON_2RM_VABS 22
4424#define NEON_2RM_VNEG 23
4425#define NEON_2RM_VCGT0_F 24
4426#define NEON_2RM_VCGE0_F 25
4427#define NEON_2RM_VCEQ0_F 26
4428#define NEON_2RM_VCLE0_F 27
4429#define NEON_2RM_VCLT0_F 28
4430#define NEON_2RM_VABS_F 30
4431#define NEON_2RM_VNEG_F 31
4432#define NEON_2RM_VSWP 32
4433#define NEON_2RM_VTRN 33
4434#define NEON_2RM_VUZP 34
4435#define NEON_2RM_VZIP 35
4436#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4437#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4438#define NEON_2RM_VSHLL 38
4439#define NEON_2RM_VCVT_F16_F32 44
4440#define NEON_2RM_VCVT_F32_F16 46
4441#define NEON_2RM_VRECPE 56
4442#define NEON_2RM_VRSQRTE 57
4443#define NEON_2RM_VRECPE_F 58
4444#define NEON_2RM_VRSQRTE_F 59
4445#define NEON_2RM_VCVT_FS 60
4446#define NEON_2RM_VCVT_FU 61
4447#define NEON_2RM_VCVT_SF 62
4448#define NEON_2RM_VCVT_UF 63
4449
4450static int neon_2rm_is_float_op(int op)
4451{
4452 /* Return true if this neon 2reg-misc op is float-to-float */
4453 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4454 op >= NEON_2RM_VRECPE_F);
4455}
4456
4457/* Each entry in this array has bit n set if the insn allows
4458 * size value n (otherwise it will UNDEF). Since unallocated
4459 * op values will have no bits set they always UNDEF.
4460 */
4461static const uint8_t neon_2rm_sizes[] = {
4462 [NEON_2RM_VREV64] = 0x7,
4463 [NEON_2RM_VREV32] = 0x3,
4464 [NEON_2RM_VREV16] = 0x1,
4465 [NEON_2RM_VPADDL] = 0x7,
4466 [NEON_2RM_VPADDL_U] = 0x7,
4467 [NEON_2RM_VCLS] = 0x7,
4468 [NEON_2RM_VCLZ] = 0x7,
4469 [NEON_2RM_VCNT] = 0x1,
4470 [NEON_2RM_VMVN] = 0x1,
4471 [NEON_2RM_VPADAL] = 0x7,
4472 [NEON_2RM_VPADAL_U] = 0x7,
4473 [NEON_2RM_VQABS] = 0x7,
4474 [NEON_2RM_VQNEG] = 0x7,
4475 [NEON_2RM_VCGT0] = 0x7,
4476 [NEON_2RM_VCGE0] = 0x7,
4477 [NEON_2RM_VCEQ0] = 0x7,
4478 [NEON_2RM_VCLE0] = 0x7,
4479 [NEON_2RM_VCLT0] = 0x7,
4480 [NEON_2RM_VABS] = 0x7,
4481 [NEON_2RM_VNEG] = 0x7,
4482 [NEON_2RM_VCGT0_F] = 0x4,
4483 [NEON_2RM_VCGE0_F] = 0x4,
4484 [NEON_2RM_VCEQ0_F] = 0x4,
4485 [NEON_2RM_VCLE0_F] = 0x4,
4486 [NEON_2RM_VCLT0_F] = 0x4,
4487 [NEON_2RM_VABS_F] = 0x4,
4488 [NEON_2RM_VNEG_F] = 0x4,
4489 [NEON_2RM_VSWP] = 0x1,
4490 [NEON_2RM_VTRN] = 0x7,
4491 [NEON_2RM_VUZP] = 0x7,
4492 [NEON_2RM_VZIP] = 0x7,
4493 [NEON_2RM_VMOVN] = 0x7,
4494 [NEON_2RM_VQMOVN] = 0x7,
4495 [NEON_2RM_VSHLL] = 0x7,
4496 [NEON_2RM_VCVT_F16_F32] = 0x2,
4497 [NEON_2RM_VCVT_F32_F16] = 0x2,
4498 [NEON_2RM_VRECPE] = 0x4,
4499 [NEON_2RM_VRSQRTE] = 0x4,
4500 [NEON_2RM_VRECPE_F] = 0x4,
4501 [NEON_2RM_VRSQRTE_F] = 0x4,
4502 [NEON_2RM_VCVT_FS] = 0x4,
4503 [NEON_2RM_VCVT_FU] = 0x4,
4504 [NEON_2RM_VCVT_SF] = 0x4,
4505 [NEON_2RM_VCVT_UF] = 0x4,
4506};
4507
9ee6e8bb
PB
4508/* Translate a NEON data processing instruction. Return nonzero if the
4509 instruction is invalid.
ad69471c
PB
4510 We process data in a mixture of 32-bit and 64-bit chunks.
4511 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4512
0ecb72a5 4513static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4514{
4515 int op;
4516 int q;
4517 int rd, rn, rm;
4518 int size;
4519 int shift;
4520 int pass;
4521 int count;
4522 int pairwise;
4523 int u;
ca9a32e4 4524 uint32_t imm, mask;
39d5492a 4525 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4526 TCGv_i64 tmp64;
9ee6e8bb 4527
5df8bac1 4528 if (!s->vfp_enabled)
9ee6e8bb
PB
4529 return 1;
4530 q = (insn & (1 << 6)) != 0;
4531 u = (insn >> 24) & 1;
4532 VFP_DREG_D(rd, insn);
4533 VFP_DREG_N(rn, insn);
4534 VFP_DREG_M(rm, insn);
4535 size = (insn >> 20) & 3;
4536 if ((insn & (1 << 23)) == 0) {
4537 /* Three register same length. */
4538 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4539 /* Catch invalid op and bad size combinations: UNDEF */
4540 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4541 return 1;
4542 }
25f84f79
PM
4543 /* All insns of this form UNDEF for either this condition or the
4544 * superset of cases "Q==1"; we catch the latter later.
4545 */
4546 if (q && ((rd | rn | rm) & 1)) {
4547 return 1;
4548 }
62698be3
PM
4549 if (size == 3 && op != NEON_3R_LOGIC) {
4550 /* 64-bit element instructions. */
9ee6e8bb 4551 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4552 neon_load_reg64(cpu_V0, rn + pass);
4553 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4554 switch (op) {
62698be3 4555 case NEON_3R_VQADD:
9ee6e8bb 4556 if (u) {
02da0b2d
PM
4557 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4558 cpu_V0, cpu_V1);
2c0262af 4559 } else {
02da0b2d
PM
4560 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4561 cpu_V0, cpu_V1);
2c0262af 4562 }
9ee6e8bb 4563 break;
62698be3 4564 case NEON_3R_VQSUB:
9ee6e8bb 4565 if (u) {
02da0b2d
PM
4566 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4567 cpu_V0, cpu_V1);
ad69471c 4568 } else {
02da0b2d
PM
4569 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4570 cpu_V0, cpu_V1);
ad69471c
PB
4571 }
4572 break;
62698be3 4573 case NEON_3R_VSHL:
ad69471c
PB
4574 if (u) {
4575 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4576 } else {
4577 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4578 }
4579 break;
62698be3 4580 case NEON_3R_VQSHL:
ad69471c 4581 if (u) {
02da0b2d
PM
4582 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4583 cpu_V1, cpu_V0);
ad69471c 4584 } else {
02da0b2d
PM
4585 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4586 cpu_V1, cpu_V0);
ad69471c
PB
4587 }
4588 break;
62698be3 4589 case NEON_3R_VRSHL:
ad69471c
PB
4590 if (u) {
4591 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4592 } else {
ad69471c
PB
4593 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4594 }
4595 break;
62698be3 4596 case NEON_3R_VQRSHL:
ad69471c 4597 if (u) {
02da0b2d
PM
4598 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4599 cpu_V1, cpu_V0);
ad69471c 4600 } else {
02da0b2d
PM
4601 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4602 cpu_V1, cpu_V0);
1e8d4eec 4603 }
9ee6e8bb 4604 break;
62698be3 4605 case NEON_3R_VADD_VSUB:
9ee6e8bb 4606 if (u) {
ad69471c 4607 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4608 } else {
ad69471c 4609 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4610 }
4611 break;
4612 default:
4613 abort();
2c0262af 4614 }
ad69471c 4615 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4616 }
9ee6e8bb 4617 return 0;
2c0262af 4618 }
25f84f79 4619 pairwise = 0;
9ee6e8bb 4620 switch (op) {
62698be3
PM
4621 case NEON_3R_VSHL:
4622 case NEON_3R_VQSHL:
4623 case NEON_3R_VRSHL:
4624 case NEON_3R_VQRSHL:
9ee6e8bb 4625 {
ad69471c
PB
4626 int rtmp;
4627 /* Shift instruction operands are reversed. */
4628 rtmp = rn;
9ee6e8bb 4629 rn = rm;
ad69471c 4630 rm = rtmp;
9ee6e8bb 4631 }
2c0262af 4632 break;
25f84f79
PM
4633 case NEON_3R_VPADD:
4634 if (u) {
4635 return 1;
4636 }
4637 /* Fall through */
62698be3
PM
4638 case NEON_3R_VPMAX:
4639 case NEON_3R_VPMIN:
9ee6e8bb 4640 pairwise = 1;
2c0262af 4641 break;
25f84f79
PM
4642 case NEON_3R_FLOAT_ARITH:
4643 pairwise = (u && size < 2); /* if VPADD (float) */
4644 break;
4645 case NEON_3R_FLOAT_MINMAX:
4646 pairwise = u; /* if VPMIN/VPMAX (float) */
4647 break;
4648 case NEON_3R_FLOAT_CMP:
4649 if (!u && size) {
4650 /* no encoding for U=0 C=1x */
4651 return 1;
4652 }
4653 break;
4654 case NEON_3R_FLOAT_ACMP:
4655 if (!u) {
4656 return 1;
4657 }
4658 break;
4659 case NEON_3R_VRECPS_VRSQRTS:
4660 if (u) {
4661 return 1;
4662 }
2c0262af 4663 break;
25f84f79
PM
4664 case NEON_3R_VMUL:
4665 if (u && (size != 0)) {
4666 /* UNDEF on invalid size for polynomial subcase */
4667 return 1;
4668 }
2c0262af 4669 break;
da97f52c
PM
4670 case NEON_3R_VFM:
4671 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4672 return 1;
4673 }
4674 break;
9ee6e8bb 4675 default:
2c0262af 4676 break;
9ee6e8bb 4677 }
dd8fbd78 4678
25f84f79
PM
4679 if (pairwise && q) {
4680 /* All the pairwise insns UNDEF if Q is set */
4681 return 1;
4682 }
4683
9ee6e8bb
PB
4684 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4685
4686 if (pairwise) {
4687 /* Pairwise. */
a5a14945
JR
4688 if (pass < 1) {
4689 tmp = neon_load_reg(rn, 0);
4690 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4691 } else {
a5a14945
JR
4692 tmp = neon_load_reg(rm, 0);
4693 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4694 }
4695 } else {
4696 /* Elementwise. */
dd8fbd78
FN
4697 tmp = neon_load_reg(rn, pass);
4698 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4699 }
4700 switch (op) {
62698be3 4701 case NEON_3R_VHADD:
9ee6e8bb
PB
4702 GEN_NEON_INTEGER_OP(hadd);
4703 break;
62698be3 4704 case NEON_3R_VQADD:
02da0b2d 4705 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4706 break;
62698be3 4707 case NEON_3R_VRHADD:
9ee6e8bb 4708 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4709 break;
62698be3 4710 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4711 switch ((u << 2) | size) {
4712 case 0: /* VAND */
dd8fbd78 4713 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4714 break;
4715 case 1: /* BIC */
f669df27 4716 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4717 break;
4718 case 2: /* VORR */
dd8fbd78 4719 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4720 break;
4721 case 3: /* VORN */
f669df27 4722 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4723 break;
4724 case 4: /* VEOR */
dd8fbd78 4725 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4726 break;
4727 case 5: /* VBSL */
dd8fbd78
FN
4728 tmp3 = neon_load_reg(rd, pass);
4729 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4730 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4731 break;
4732 case 6: /* VBIT */
dd8fbd78
FN
4733 tmp3 = neon_load_reg(rd, pass);
4734 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4735 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4736 break;
4737 case 7: /* VBIF */
dd8fbd78
FN
4738 tmp3 = neon_load_reg(rd, pass);
4739 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4740 tcg_temp_free_i32(tmp3);
9ee6e8bb 4741 break;
2c0262af
FB
4742 }
4743 break;
62698be3 4744 case NEON_3R_VHSUB:
9ee6e8bb
PB
4745 GEN_NEON_INTEGER_OP(hsub);
4746 break;
62698be3 4747 case NEON_3R_VQSUB:
02da0b2d 4748 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4749 break;
62698be3 4750 case NEON_3R_VCGT:
9ee6e8bb
PB
4751 GEN_NEON_INTEGER_OP(cgt);
4752 break;
62698be3 4753 case NEON_3R_VCGE:
9ee6e8bb
PB
4754 GEN_NEON_INTEGER_OP(cge);
4755 break;
62698be3 4756 case NEON_3R_VSHL:
ad69471c 4757 GEN_NEON_INTEGER_OP(shl);
2c0262af 4758 break;
62698be3 4759 case NEON_3R_VQSHL:
02da0b2d 4760 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4761 break;
62698be3 4762 case NEON_3R_VRSHL:
ad69471c 4763 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4764 break;
62698be3 4765 case NEON_3R_VQRSHL:
02da0b2d 4766 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4767 break;
62698be3 4768 case NEON_3R_VMAX:
9ee6e8bb
PB
4769 GEN_NEON_INTEGER_OP(max);
4770 break;
62698be3 4771 case NEON_3R_VMIN:
9ee6e8bb
PB
4772 GEN_NEON_INTEGER_OP(min);
4773 break;
62698be3 4774 case NEON_3R_VABD:
9ee6e8bb
PB
4775 GEN_NEON_INTEGER_OP(abd);
4776 break;
62698be3 4777 case NEON_3R_VABA:
9ee6e8bb 4778 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4779 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4780 tmp2 = neon_load_reg(rd, pass);
4781 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4782 break;
62698be3 4783 case NEON_3R_VADD_VSUB:
9ee6e8bb 4784 if (!u) { /* VADD */
62698be3 4785 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4786 } else { /* VSUB */
4787 switch (size) {
dd8fbd78
FN
4788 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4789 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4790 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4791 default: abort();
9ee6e8bb
PB
4792 }
4793 }
4794 break;
62698be3 4795 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4796 if (!u) { /* VTST */
4797 switch (size) {
dd8fbd78
FN
4798 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4799 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4800 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4801 default: abort();
9ee6e8bb
PB
4802 }
4803 } else { /* VCEQ */
4804 switch (size) {
dd8fbd78
FN
4805 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4806 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4807 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4808 default: abort();
9ee6e8bb
PB
4809 }
4810 }
4811 break;
62698be3 4812 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4813 switch (size) {
dd8fbd78
FN
4814 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4815 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4816 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4817 default: abort();
9ee6e8bb 4818 }
7d1b0095 4819 tcg_temp_free_i32(tmp2);
dd8fbd78 4820 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4821 if (u) { /* VMLS */
dd8fbd78 4822 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4823 } else { /* VMLA */
dd8fbd78 4824 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4825 }
4826 break;
62698be3 4827 case NEON_3R_VMUL:
9ee6e8bb 4828 if (u) { /* polynomial */
dd8fbd78 4829 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4830 } else { /* Integer */
4831 switch (size) {
dd8fbd78
FN
4832 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4833 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4834 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4835 default: abort();
9ee6e8bb
PB
4836 }
4837 }
4838 break;
62698be3 4839 case NEON_3R_VPMAX:
9ee6e8bb
PB
4840 GEN_NEON_INTEGER_OP(pmax);
4841 break;
62698be3 4842 case NEON_3R_VPMIN:
9ee6e8bb
PB
4843 GEN_NEON_INTEGER_OP(pmin);
4844 break;
62698be3 4845 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4846 if (!u) { /* VQDMULH */
4847 switch (size) {
02da0b2d
PM
4848 case 1:
4849 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4850 break;
4851 case 2:
4852 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4853 break;
62698be3 4854 default: abort();
9ee6e8bb 4855 }
62698be3 4856 } else { /* VQRDMULH */
9ee6e8bb 4857 switch (size) {
02da0b2d
PM
4858 case 1:
4859 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4860 break;
4861 case 2:
4862 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4863 break;
62698be3 4864 default: abort();
9ee6e8bb
PB
4865 }
4866 }
4867 break;
62698be3 4868 case NEON_3R_VPADD:
9ee6e8bb 4869 switch (size) {
dd8fbd78
FN
4870 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4871 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4872 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4873 default: abort();
9ee6e8bb
PB
4874 }
4875 break;
62698be3 4876 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4877 {
4878 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4879 switch ((u << 2) | size) {
4880 case 0: /* VADD */
aa47cfdd
PM
4881 case 4: /* VPADD */
4882 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4883 break;
4884 case 2: /* VSUB */
aa47cfdd 4885 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4886 break;
4887 case 6: /* VABD */
aa47cfdd 4888 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4889 break;
4890 default:
62698be3 4891 abort();
9ee6e8bb 4892 }
aa47cfdd 4893 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4894 break;
aa47cfdd 4895 }
62698be3 4896 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4897 {
4898 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4899 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4900 if (!u) {
7d1b0095 4901 tcg_temp_free_i32(tmp2);
dd8fbd78 4902 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4903 if (size == 0) {
aa47cfdd 4904 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4905 } else {
aa47cfdd 4906 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4907 }
4908 }
aa47cfdd 4909 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4910 break;
aa47cfdd 4911 }
62698be3 4912 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4913 {
4914 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4915 if (!u) {
aa47cfdd 4916 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4917 } else {
aa47cfdd
PM
4918 if (size == 0) {
4919 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4920 } else {
4921 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4922 }
b5ff1b31 4923 }
aa47cfdd 4924 tcg_temp_free_ptr(fpstatus);
2c0262af 4925 break;
aa47cfdd 4926 }
62698be3 4927 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4928 {
4929 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4930 if (size == 0) {
4931 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4932 } else {
4933 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4934 }
4935 tcg_temp_free_ptr(fpstatus);
2c0262af 4936 break;
aa47cfdd 4937 }
62698be3 4938 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4939 {
4940 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4941 if (size == 0) {
4942 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4943 } else {
4944 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4945 }
4946 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4947 break;
aa47cfdd 4948 }
62698be3 4949 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4950 if (size == 0)
dd8fbd78 4951 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4952 else
dd8fbd78 4953 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4954 break;
da97f52c
PM
4955 case NEON_3R_VFM:
4956 {
4957 /* VFMA, VFMS: fused multiply-add */
4958 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4959 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4960 if (size) {
4961 /* VFMS */
4962 gen_helper_vfp_negs(tmp, tmp);
4963 }
4964 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4965 tcg_temp_free_i32(tmp3);
4966 tcg_temp_free_ptr(fpstatus);
4967 break;
4968 }
9ee6e8bb
PB
4969 default:
4970 abort();
2c0262af 4971 }
7d1b0095 4972 tcg_temp_free_i32(tmp2);
dd8fbd78 4973
9ee6e8bb
PB
4974 /* Save the result. For elementwise operations we can put it
4975 straight into the destination register. For pairwise operations
4976 we have to be careful to avoid clobbering the source operands. */
4977 if (pairwise && rd == rm) {
dd8fbd78 4978 neon_store_scratch(pass, tmp);
9ee6e8bb 4979 } else {
dd8fbd78 4980 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4981 }
4982
4983 } /* for pass */
4984 if (pairwise && rd == rm) {
4985 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4986 tmp = neon_load_scratch(pass);
4987 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4988 }
4989 }
ad69471c 4990 /* End of 3 register same size operations. */
9ee6e8bb
PB
4991 } else if (insn & (1 << 4)) {
4992 if ((insn & 0x00380080) != 0) {
4993 /* Two registers and shift. */
4994 op = (insn >> 8) & 0xf;
4995 if (insn & (1 << 7)) {
cc13115b
PM
4996 /* 64-bit shift. */
4997 if (op > 7) {
4998 return 1;
4999 }
9ee6e8bb
PB
5000 size = 3;
5001 } else {
5002 size = 2;
5003 while ((insn & (1 << (size + 19))) == 0)
5004 size--;
5005 }
5006 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5007 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5008 by immediate using the variable shift operations. */
5009 if (op < 8) {
5010 /* Shift by immediate:
5011 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5012 if (q && ((rd | rm) & 1)) {
5013 return 1;
5014 }
5015 if (!u && (op == 4 || op == 6)) {
5016 return 1;
5017 }
9ee6e8bb
PB
5018 /* Right shifts are encoded as N - shift, where N is the
5019 element size in bits. */
5020 if (op <= 4)
5021 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5022 if (size == 3) {
5023 count = q + 1;
5024 } else {
5025 count = q ? 4: 2;
5026 }
5027 switch (size) {
5028 case 0:
5029 imm = (uint8_t) shift;
5030 imm |= imm << 8;
5031 imm |= imm << 16;
5032 break;
5033 case 1:
5034 imm = (uint16_t) shift;
5035 imm |= imm << 16;
5036 break;
5037 case 2:
5038 case 3:
5039 imm = shift;
5040 break;
5041 default:
5042 abort();
5043 }
5044
5045 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5046 if (size == 3) {
5047 neon_load_reg64(cpu_V0, rm + pass);
5048 tcg_gen_movi_i64(cpu_V1, imm);
5049 switch (op) {
5050 case 0: /* VSHR */
5051 case 1: /* VSRA */
5052 if (u)
5053 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5054 else
ad69471c 5055 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5056 break;
ad69471c
PB
5057 case 2: /* VRSHR */
5058 case 3: /* VRSRA */
5059 if (u)
5060 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5061 else
ad69471c 5062 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5063 break;
ad69471c 5064 case 4: /* VSRI */
ad69471c
PB
5065 case 5: /* VSHL, VSLI */
5066 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5067 break;
0322b26e 5068 case 6: /* VQSHLU */
02da0b2d
PM
5069 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5070 cpu_V0, cpu_V1);
ad69471c 5071 break;
0322b26e
PM
5072 case 7: /* VQSHL */
5073 if (u) {
02da0b2d 5074 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5075 cpu_V0, cpu_V1);
5076 } else {
02da0b2d 5077 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5078 cpu_V0, cpu_V1);
5079 }
9ee6e8bb 5080 break;
9ee6e8bb 5081 }
ad69471c
PB
5082 if (op == 1 || op == 3) {
5083 /* Accumulate. */
5371cb81 5084 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5085 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5086 } else if (op == 4 || (op == 5 && u)) {
5087 /* Insert */
923e6509
CL
5088 neon_load_reg64(cpu_V1, rd + pass);
5089 uint64_t mask;
5090 if (shift < -63 || shift > 63) {
5091 mask = 0;
5092 } else {
5093 if (op == 4) {
5094 mask = 0xffffffffffffffffull >> -shift;
5095 } else {
5096 mask = 0xffffffffffffffffull << shift;
5097 }
5098 }
5099 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5100 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5101 }
5102 neon_store_reg64(cpu_V0, rd + pass);
5103 } else { /* size < 3 */
5104 /* Operands in T0 and T1. */
dd8fbd78 5105 tmp = neon_load_reg(rm, pass);
7d1b0095 5106 tmp2 = tcg_temp_new_i32();
dd8fbd78 5107 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5108 switch (op) {
5109 case 0: /* VSHR */
5110 case 1: /* VSRA */
5111 GEN_NEON_INTEGER_OP(shl);
5112 break;
5113 case 2: /* VRSHR */
5114 case 3: /* VRSRA */
5115 GEN_NEON_INTEGER_OP(rshl);
5116 break;
5117 case 4: /* VSRI */
ad69471c
PB
5118 case 5: /* VSHL, VSLI */
5119 switch (size) {
dd8fbd78
FN
5120 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5121 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5122 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5123 default: abort();
ad69471c
PB
5124 }
5125 break;
0322b26e 5126 case 6: /* VQSHLU */
ad69471c 5127 switch (size) {
0322b26e 5128 case 0:
02da0b2d
PM
5129 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5130 tmp, tmp2);
0322b26e
PM
5131 break;
5132 case 1:
02da0b2d
PM
5133 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5134 tmp, tmp2);
0322b26e
PM
5135 break;
5136 case 2:
02da0b2d
PM
5137 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5138 tmp, tmp2);
0322b26e
PM
5139 break;
5140 default:
cc13115b 5141 abort();
ad69471c
PB
5142 }
5143 break;
0322b26e 5144 case 7: /* VQSHL */
02da0b2d 5145 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5146 break;
ad69471c 5147 }
7d1b0095 5148 tcg_temp_free_i32(tmp2);
ad69471c
PB
5149
5150 if (op == 1 || op == 3) {
5151 /* Accumulate. */
dd8fbd78 5152 tmp2 = neon_load_reg(rd, pass);
5371cb81 5153 gen_neon_add(size, tmp, tmp2);
7d1b0095 5154 tcg_temp_free_i32(tmp2);
ad69471c
PB
5155 } else if (op == 4 || (op == 5 && u)) {
5156 /* Insert */
5157 switch (size) {
5158 case 0:
5159 if (op == 4)
ca9a32e4 5160 mask = 0xff >> -shift;
ad69471c 5161 else
ca9a32e4
JR
5162 mask = (uint8_t)(0xff << shift);
5163 mask |= mask << 8;
5164 mask |= mask << 16;
ad69471c
PB
5165 break;
5166 case 1:
5167 if (op == 4)
ca9a32e4 5168 mask = 0xffff >> -shift;
ad69471c 5169 else
ca9a32e4
JR
5170 mask = (uint16_t)(0xffff << shift);
5171 mask |= mask << 16;
ad69471c
PB
5172 break;
5173 case 2:
ca9a32e4
JR
5174 if (shift < -31 || shift > 31) {
5175 mask = 0;
5176 } else {
5177 if (op == 4)
5178 mask = 0xffffffffu >> -shift;
5179 else
5180 mask = 0xffffffffu << shift;
5181 }
ad69471c
PB
5182 break;
5183 default:
5184 abort();
5185 }
dd8fbd78 5186 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5187 tcg_gen_andi_i32(tmp, tmp, mask);
5188 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5189 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5190 tcg_temp_free_i32(tmp2);
ad69471c 5191 }
dd8fbd78 5192 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5193 }
5194 } /* for pass */
5195 } else if (op < 10) {
ad69471c 5196 /* Shift by immediate and narrow:
9ee6e8bb 5197 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5198 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5199 if (rm & 1) {
5200 return 1;
5201 }
9ee6e8bb
PB
5202 shift = shift - (1 << (size + 3));
5203 size++;
92cdfaeb 5204 if (size == 3) {
a7812ae4 5205 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5206 neon_load_reg64(cpu_V0, rm);
5207 neon_load_reg64(cpu_V1, rm + 1);
5208 for (pass = 0; pass < 2; pass++) {
5209 TCGv_i64 in;
5210 if (pass == 0) {
5211 in = cpu_V0;
5212 } else {
5213 in = cpu_V1;
5214 }
ad69471c 5215 if (q) {
0b36f4cd 5216 if (input_unsigned) {
92cdfaeb 5217 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5218 } else {
92cdfaeb 5219 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5220 }
ad69471c 5221 } else {
0b36f4cd 5222 if (input_unsigned) {
92cdfaeb 5223 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5224 } else {
92cdfaeb 5225 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5226 }
ad69471c 5227 }
7d1b0095 5228 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5229 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5230 neon_store_reg(rd, pass, tmp);
5231 } /* for pass */
5232 tcg_temp_free_i64(tmp64);
5233 } else {
5234 if (size == 1) {
5235 imm = (uint16_t)shift;
5236 imm |= imm << 16;
2c0262af 5237 } else {
92cdfaeb
PM
5238 /* size == 2 */
5239 imm = (uint32_t)shift;
5240 }
5241 tmp2 = tcg_const_i32(imm);
5242 tmp4 = neon_load_reg(rm + 1, 0);
5243 tmp5 = neon_load_reg(rm + 1, 1);
5244 for (pass = 0; pass < 2; pass++) {
5245 if (pass == 0) {
5246 tmp = neon_load_reg(rm, 0);
5247 } else {
5248 tmp = tmp4;
5249 }
0b36f4cd
CL
5250 gen_neon_shift_narrow(size, tmp, tmp2, q,
5251 input_unsigned);
92cdfaeb
PM
5252 if (pass == 0) {
5253 tmp3 = neon_load_reg(rm, 1);
5254 } else {
5255 tmp3 = tmp5;
5256 }
0b36f4cd
CL
5257 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5258 input_unsigned);
36aa55dc 5259 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5260 tcg_temp_free_i32(tmp);
5261 tcg_temp_free_i32(tmp3);
5262 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5263 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5264 neon_store_reg(rd, pass, tmp);
5265 } /* for pass */
c6067f04 5266 tcg_temp_free_i32(tmp2);
b75263d6 5267 }
9ee6e8bb 5268 } else if (op == 10) {
cc13115b
PM
5269 /* VSHLL, VMOVL */
5270 if (q || (rd & 1)) {
9ee6e8bb 5271 return 1;
cc13115b 5272 }
ad69471c
PB
5273 tmp = neon_load_reg(rm, 0);
5274 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5275 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5276 if (pass == 1)
5277 tmp = tmp2;
5278
5279 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5280
9ee6e8bb
PB
5281 if (shift != 0) {
5282 /* The shift is less than the width of the source
ad69471c
PB
5283 type, so we can just shift the whole register. */
5284 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5285 /* Widen the result of shift: we need to clear
5286 * the potential overflow bits resulting from
5287 * left bits of the narrow input appearing as
5288 * right bits of left the neighbour narrow
5289 * input. */
ad69471c
PB
5290 if (size < 2 || !u) {
5291 uint64_t imm64;
5292 if (size == 0) {
5293 imm = (0xffu >> (8 - shift));
5294 imm |= imm << 16;
acdf01ef 5295 } else if (size == 1) {
ad69471c 5296 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5297 } else {
5298 /* size == 2 */
5299 imm = 0xffffffff >> (32 - shift);
5300 }
5301 if (size < 2) {
5302 imm64 = imm | (((uint64_t)imm) << 32);
5303 } else {
5304 imm64 = imm;
9ee6e8bb 5305 }
acdf01ef 5306 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5307 }
5308 }
ad69471c 5309 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5310 }
f73534a5 5311 } else if (op >= 14) {
9ee6e8bb 5312 /* VCVT fixed-point. */
cc13115b
PM
5313 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5314 return 1;
5315 }
f73534a5
PM
5316 /* We have already masked out the must-be-1 top bit of imm6,
5317 * hence this 32-shift where the ARM ARM has 64-imm6.
5318 */
5319 shift = 32 - shift;
9ee6e8bb 5320 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5321 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5322 if (!(op & 1)) {
9ee6e8bb 5323 if (u)
5500b06c 5324 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5325 else
5500b06c 5326 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5327 } else {
5328 if (u)
5500b06c 5329 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5330 else
5500b06c 5331 gen_vfp_tosl(0, shift, 1);
2c0262af 5332 }
4373f3ce 5333 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5334 }
5335 } else {
9ee6e8bb
PB
5336 return 1;
5337 }
5338 } else { /* (insn & 0x00380080) == 0 */
5339 int invert;
7d80fee5
PM
5340 if (q && (rd & 1)) {
5341 return 1;
5342 }
9ee6e8bb
PB
5343
5344 op = (insn >> 8) & 0xf;
5345 /* One register and immediate. */
5346 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5347 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5348 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5349 * We choose to not special-case this and will behave as if a
5350 * valid constant encoding of 0 had been given.
5351 */
9ee6e8bb
PB
5352 switch (op) {
5353 case 0: case 1:
5354 /* no-op */
5355 break;
5356 case 2: case 3:
5357 imm <<= 8;
5358 break;
5359 case 4: case 5:
5360 imm <<= 16;
5361 break;
5362 case 6: case 7:
5363 imm <<= 24;
5364 break;
5365 case 8: case 9:
5366 imm |= imm << 16;
5367 break;
5368 case 10: case 11:
5369 imm = (imm << 8) | (imm << 24);
5370 break;
5371 case 12:
8e31209e 5372 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5373 break;
5374 case 13:
5375 imm = (imm << 16) | 0xffff;
5376 break;
5377 case 14:
5378 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5379 if (invert)
5380 imm = ~imm;
5381 break;
5382 case 15:
7d80fee5
PM
5383 if (invert) {
5384 return 1;
5385 }
9ee6e8bb
PB
5386 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5387 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5388 break;
5389 }
5390 if (invert)
5391 imm = ~imm;
5392
9ee6e8bb
PB
5393 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5394 if (op & 1 && op < 12) {
ad69471c 5395 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5396 if (invert) {
5397 /* The immediate value has already been inverted, so
5398 BIC becomes AND. */
ad69471c 5399 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5400 } else {
ad69471c 5401 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5402 }
9ee6e8bb 5403 } else {
ad69471c 5404 /* VMOV, VMVN. */
7d1b0095 5405 tmp = tcg_temp_new_i32();
9ee6e8bb 5406 if (op == 14 && invert) {
a5a14945 5407 int n;
ad69471c
PB
5408 uint32_t val;
5409 val = 0;
9ee6e8bb
PB
5410 for (n = 0; n < 4; n++) {
5411 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5412 val |= 0xff << (n * 8);
9ee6e8bb 5413 }
ad69471c
PB
5414 tcg_gen_movi_i32(tmp, val);
5415 } else {
5416 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5417 }
9ee6e8bb 5418 }
ad69471c 5419 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5420 }
5421 }
e4b3861d 5422 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5423 if (size != 3) {
5424 op = (insn >> 8) & 0xf;
5425 if ((insn & (1 << 6)) == 0) {
5426 /* Three registers of different lengths. */
5427 int src1_wide;
5428 int src2_wide;
5429 int prewiden;
695272dc
PM
5430 /* undefreq: bit 0 : UNDEF if size != 0
5431 * bit 1 : UNDEF if size == 0
5432 * bit 2 : UNDEF if U == 1
5433 * Note that [1:0] set implies 'always UNDEF'
5434 */
5435 int undefreq;
5436 /* prewiden, src1_wide, src2_wide, undefreq */
5437 static const int neon_3reg_wide[16][4] = {
5438 {1, 0, 0, 0}, /* VADDL */
5439 {1, 1, 0, 0}, /* VADDW */
5440 {1, 0, 0, 0}, /* VSUBL */
5441 {1, 1, 0, 0}, /* VSUBW */
5442 {0, 1, 1, 0}, /* VADDHN */
5443 {0, 0, 0, 0}, /* VABAL */
5444 {0, 1, 1, 0}, /* VSUBHN */
5445 {0, 0, 0, 0}, /* VABDL */
5446 {0, 0, 0, 0}, /* VMLAL */
5447 {0, 0, 0, 6}, /* VQDMLAL */
5448 {0, 0, 0, 0}, /* VMLSL */
5449 {0, 0, 0, 6}, /* VQDMLSL */
5450 {0, 0, 0, 0}, /* Integer VMULL */
5451 {0, 0, 0, 2}, /* VQDMULL */
5452 {0, 0, 0, 5}, /* Polynomial VMULL */
5453 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5454 };
5455
5456 prewiden = neon_3reg_wide[op][0];
5457 src1_wide = neon_3reg_wide[op][1];
5458 src2_wide = neon_3reg_wide[op][2];
695272dc 5459 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5460
695272dc
PM
5461 if (((undefreq & 1) && (size != 0)) ||
5462 ((undefreq & 2) && (size == 0)) ||
5463 ((undefreq & 4) && u)) {
5464 return 1;
5465 }
5466 if ((src1_wide && (rn & 1)) ||
5467 (src2_wide && (rm & 1)) ||
5468 (!src2_wide && (rd & 1))) {
ad69471c 5469 return 1;
695272dc 5470 }
ad69471c 5471
9ee6e8bb
PB
5472 /* Avoid overlapping operands. Wide source operands are
5473 always aligned so will never overlap with wide
5474 destinations in problematic ways. */
8f8e3aa4 5475 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5476 tmp = neon_load_reg(rm, 1);
5477 neon_store_scratch(2, tmp);
8f8e3aa4 5478 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5479 tmp = neon_load_reg(rn, 1);
5480 neon_store_scratch(2, tmp);
9ee6e8bb 5481 }
39d5492a 5482 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5483 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5484 if (src1_wide) {
5485 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5486 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5487 } else {
ad69471c 5488 if (pass == 1 && rd == rn) {
dd8fbd78 5489 tmp = neon_load_scratch(2);
9ee6e8bb 5490 } else {
ad69471c
PB
5491 tmp = neon_load_reg(rn, pass);
5492 }
5493 if (prewiden) {
5494 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5495 }
5496 }
ad69471c
PB
5497 if (src2_wide) {
5498 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5499 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5500 } else {
ad69471c 5501 if (pass == 1 && rd == rm) {
dd8fbd78 5502 tmp2 = neon_load_scratch(2);
9ee6e8bb 5503 } else {
ad69471c
PB
5504 tmp2 = neon_load_reg(rm, pass);
5505 }
5506 if (prewiden) {
5507 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5508 }
9ee6e8bb
PB
5509 }
5510 switch (op) {
5511 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5512 gen_neon_addl(size);
9ee6e8bb 5513 break;
79b0e534 5514 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5515 gen_neon_subl(size);
9ee6e8bb
PB
5516 break;
5517 case 5: case 7: /* VABAL, VABDL */
5518 switch ((size << 1) | u) {
ad69471c
PB
5519 case 0:
5520 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5521 break;
5522 case 1:
5523 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5524 break;
5525 case 2:
5526 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5527 break;
5528 case 3:
5529 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5530 break;
5531 case 4:
5532 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5533 break;
5534 case 5:
5535 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5536 break;
9ee6e8bb
PB
5537 default: abort();
5538 }
7d1b0095
PM
5539 tcg_temp_free_i32(tmp2);
5540 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5541 break;
5542 case 8: case 9: case 10: case 11: case 12: case 13:
5543 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5544 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5545 break;
5546 case 14: /* Polynomial VMULL */
e5ca24cb 5547 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5548 tcg_temp_free_i32(tmp2);
5549 tcg_temp_free_i32(tmp);
e5ca24cb 5550 break;
695272dc
PM
5551 default: /* 15 is RESERVED: caught earlier */
5552 abort();
9ee6e8bb 5553 }
ebcd88ce
PM
5554 if (op == 13) {
5555 /* VQDMULL */
5556 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5557 neon_store_reg64(cpu_V0, rd + pass);
5558 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5559 /* Accumulate. */
ebcd88ce 5560 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5561 switch (op) {
4dc064e6
PM
5562 case 10: /* VMLSL */
5563 gen_neon_negl(cpu_V0, size);
5564 /* Fall through */
5565 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5566 gen_neon_addl(size);
9ee6e8bb
PB
5567 break;
5568 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5569 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5570 if (op == 11) {
5571 gen_neon_negl(cpu_V0, size);
5572 }
ad69471c
PB
5573 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5574 break;
9ee6e8bb
PB
5575 default:
5576 abort();
5577 }
ad69471c 5578 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5579 } else if (op == 4 || op == 6) {
5580 /* Narrowing operation. */
7d1b0095 5581 tmp = tcg_temp_new_i32();
79b0e534 5582 if (!u) {
9ee6e8bb 5583 switch (size) {
ad69471c
PB
5584 case 0:
5585 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5586 break;
5587 case 1:
5588 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5589 break;
5590 case 2:
5591 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5592 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5593 break;
9ee6e8bb
PB
5594 default: abort();
5595 }
5596 } else {
5597 switch (size) {
ad69471c
PB
5598 case 0:
5599 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5600 break;
5601 case 1:
5602 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5603 break;
5604 case 2:
5605 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5606 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5607 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5608 break;
9ee6e8bb
PB
5609 default: abort();
5610 }
5611 }
ad69471c
PB
5612 if (pass == 0) {
5613 tmp3 = tmp;
5614 } else {
5615 neon_store_reg(rd, 0, tmp3);
5616 neon_store_reg(rd, 1, tmp);
5617 }
9ee6e8bb
PB
5618 } else {
5619 /* Write back the result. */
ad69471c 5620 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5621 }
5622 }
5623 } else {
3e3326df
PM
5624 /* Two registers and a scalar. NB that for ops of this form
5625 * the ARM ARM labels bit 24 as Q, but it is in our variable
5626 * 'u', not 'q'.
5627 */
5628 if (size == 0) {
5629 return 1;
5630 }
9ee6e8bb 5631 switch (op) {
9ee6e8bb 5632 case 1: /* Float VMLA scalar */
9ee6e8bb 5633 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5634 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5635 if (size == 1) {
5636 return 1;
5637 }
5638 /* fall through */
5639 case 0: /* Integer VMLA scalar */
5640 case 4: /* Integer VMLS scalar */
5641 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5642 case 12: /* VQDMULH scalar */
5643 case 13: /* VQRDMULH scalar */
3e3326df
PM
5644 if (u && ((rd | rn) & 1)) {
5645 return 1;
5646 }
dd8fbd78
FN
5647 tmp = neon_get_scalar(size, rm);
5648 neon_store_scratch(0, tmp);
9ee6e8bb 5649 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5650 tmp = neon_load_scratch(0);
5651 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5652 if (op == 12) {
5653 if (size == 1) {
02da0b2d 5654 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5655 } else {
02da0b2d 5656 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5657 }
5658 } else if (op == 13) {
5659 if (size == 1) {
02da0b2d 5660 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5661 } else {
02da0b2d 5662 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5663 }
5664 } else if (op & 1) {
aa47cfdd
PM
5665 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5666 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5667 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5668 } else {
5669 switch (size) {
dd8fbd78
FN
5670 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5671 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5672 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5673 default: abort();
9ee6e8bb
PB
5674 }
5675 }
7d1b0095 5676 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5677 if (op < 8) {
5678 /* Accumulate. */
dd8fbd78 5679 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5680 switch (op) {
5681 case 0:
dd8fbd78 5682 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5683 break;
5684 case 1:
aa47cfdd
PM
5685 {
5686 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5687 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5688 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5689 break;
aa47cfdd 5690 }
9ee6e8bb 5691 case 4:
dd8fbd78 5692 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5693 break;
5694 case 5:
aa47cfdd
PM
5695 {
5696 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5697 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5698 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5699 break;
aa47cfdd 5700 }
9ee6e8bb
PB
5701 default:
5702 abort();
5703 }
7d1b0095 5704 tcg_temp_free_i32(tmp2);
9ee6e8bb 5705 }
dd8fbd78 5706 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5707 }
5708 break;
9ee6e8bb 5709 case 3: /* VQDMLAL scalar */
9ee6e8bb 5710 case 7: /* VQDMLSL scalar */
9ee6e8bb 5711 case 11: /* VQDMULL scalar */
3e3326df 5712 if (u == 1) {
ad69471c 5713 return 1;
3e3326df
PM
5714 }
5715 /* fall through */
5716 case 2: /* VMLAL sclar */
5717 case 6: /* VMLSL scalar */
5718 case 10: /* VMULL scalar */
5719 if (rd & 1) {
5720 return 1;
5721 }
dd8fbd78 5722 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5723 /* We need a copy of tmp2 because gen_neon_mull
5724 * deletes it during pass 0. */
7d1b0095 5725 tmp4 = tcg_temp_new_i32();
c6067f04 5726 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5727 tmp3 = neon_load_reg(rn, 1);
ad69471c 5728
9ee6e8bb 5729 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5730 if (pass == 0) {
5731 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5732 } else {
dd8fbd78 5733 tmp = tmp3;
c6067f04 5734 tmp2 = tmp4;
9ee6e8bb 5735 }
ad69471c 5736 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5737 if (op != 11) {
5738 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5739 }
9ee6e8bb 5740 switch (op) {
4dc064e6
PM
5741 case 6:
5742 gen_neon_negl(cpu_V0, size);
5743 /* Fall through */
5744 case 2:
ad69471c 5745 gen_neon_addl(size);
9ee6e8bb
PB
5746 break;
5747 case 3: case 7:
ad69471c 5748 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5749 if (op == 7) {
5750 gen_neon_negl(cpu_V0, size);
5751 }
ad69471c 5752 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5753 break;
5754 case 10:
5755 /* no-op */
5756 break;
5757 case 11:
ad69471c 5758 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5759 break;
5760 default:
5761 abort();
5762 }
ad69471c 5763 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5764 }
dd8fbd78 5765
dd8fbd78 5766
9ee6e8bb
PB
5767 break;
5768 default: /* 14 and 15 are RESERVED */
5769 return 1;
5770 }
5771 }
5772 } else { /* size == 3 */
5773 if (!u) {
5774 /* Extract. */
9ee6e8bb 5775 imm = (insn >> 8) & 0xf;
ad69471c
PB
5776
5777 if (imm > 7 && !q)
5778 return 1;
5779
52579ea1
PM
5780 if (q && ((rd | rn | rm) & 1)) {
5781 return 1;
5782 }
5783
ad69471c
PB
5784 if (imm == 0) {
5785 neon_load_reg64(cpu_V0, rn);
5786 if (q) {
5787 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5788 }
ad69471c
PB
5789 } else if (imm == 8) {
5790 neon_load_reg64(cpu_V0, rn + 1);
5791 if (q) {
5792 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5793 }
ad69471c 5794 } else if (q) {
a7812ae4 5795 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5796 if (imm < 8) {
5797 neon_load_reg64(cpu_V0, rn);
a7812ae4 5798 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5799 } else {
5800 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5801 neon_load_reg64(tmp64, rm);
ad69471c
PB
5802 }
5803 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5804 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5805 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5806 if (imm < 8) {
5807 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5808 } else {
ad69471c
PB
5809 neon_load_reg64(cpu_V1, rm + 1);
5810 imm -= 8;
9ee6e8bb 5811 }
ad69471c 5812 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5813 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5814 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5815 tcg_temp_free_i64(tmp64);
ad69471c 5816 } else {
a7812ae4 5817 /* BUGFIX */
ad69471c 5818 neon_load_reg64(cpu_V0, rn);
a7812ae4 5819 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5820 neon_load_reg64(cpu_V1, rm);
a7812ae4 5821 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5822 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5823 }
5824 neon_store_reg64(cpu_V0, rd);
5825 if (q) {
5826 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5827 }
5828 } else if ((insn & (1 << 11)) == 0) {
5829 /* Two register misc. */
5830 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5831 size = (insn >> 18) & 3;
600b828c
PM
5832 /* UNDEF for unknown op values and bad op-size combinations */
5833 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5834 return 1;
5835 }
fc2a9b37
PM
5836 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5837 q && ((rm | rd) & 1)) {
5838 return 1;
5839 }
9ee6e8bb 5840 switch (op) {
600b828c 5841 case NEON_2RM_VREV64:
9ee6e8bb 5842 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5843 tmp = neon_load_reg(rm, pass * 2);
5844 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5845 switch (size) {
dd8fbd78
FN
5846 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5847 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5848 case 2: /* no-op */ break;
5849 default: abort();
5850 }
dd8fbd78 5851 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5852 if (size == 2) {
dd8fbd78 5853 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5854 } else {
9ee6e8bb 5855 switch (size) {
dd8fbd78
FN
5856 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5857 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5858 default: abort();
5859 }
dd8fbd78 5860 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5861 }
5862 }
5863 break;
600b828c
PM
5864 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5865 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5866 for (pass = 0; pass < q + 1; pass++) {
5867 tmp = neon_load_reg(rm, pass * 2);
5868 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5869 tmp = neon_load_reg(rm, pass * 2 + 1);
5870 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5871 switch (size) {
5872 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5873 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5874 case 2: tcg_gen_add_i64(CPU_V001); break;
5875 default: abort();
5876 }
600b828c 5877 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5878 /* Accumulate. */
ad69471c
PB
5879 neon_load_reg64(cpu_V1, rd + pass);
5880 gen_neon_addl(size);
9ee6e8bb 5881 }
ad69471c 5882 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5883 }
5884 break;
600b828c 5885 case NEON_2RM_VTRN:
9ee6e8bb 5886 if (size == 2) {
a5a14945 5887 int n;
9ee6e8bb 5888 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5889 tmp = neon_load_reg(rm, n);
5890 tmp2 = neon_load_reg(rd, n + 1);
5891 neon_store_reg(rm, n, tmp2);
5892 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5893 }
5894 } else {
5895 goto elementwise;
5896 }
5897 break;
600b828c 5898 case NEON_2RM_VUZP:
02acedf9 5899 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5900 return 1;
9ee6e8bb
PB
5901 }
5902 break;
600b828c 5903 case NEON_2RM_VZIP:
d68a6f3a 5904 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5905 return 1;
9ee6e8bb
PB
5906 }
5907 break;
600b828c
PM
5908 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5909 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5910 if (rm & 1) {
5911 return 1;
5912 }
39d5492a 5913 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5914 for (pass = 0; pass < 2; pass++) {
ad69471c 5915 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5916 tmp = tcg_temp_new_i32();
600b828c
PM
5917 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5918 tmp, cpu_V0);
ad69471c
PB
5919 if (pass == 0) {
5920 tmp2 = tmp;
5921 } else {
5922 neon_store_reg(rd, 0, tmp2);
5923 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5924 }
9ee6e8bb
PB
5925 }
5926 break;
600b828c 5927 case NEON_2RM_VSHLL:
fc2a9b37 5928 if (q || (rd & 1)) {
9ee6e8bb 5929 return 1;
600b828c 5930 }
ad69471c
PB
5931 tmp = neon_load_reg(rm, 0);
5932 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5933 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5934 if (pass == 1)
5935 tmp = tmp2;
5936 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5937 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5938 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5939 }
5940 break;
600b828c 5941 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5942 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5943 q || (rm & 1)) {
5944 return 1;
5945 }
7d1b0095
PM
5946 tmp = tcg_temp_new_i32();
5947 tmp2 = tcg_temp_new_i32();
60011498 5948 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5949 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5950 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5951 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5952 tcg_gen_shli_i32(tmp2, tmp2, 16);
5953 tcg_gen_or_i32(tmp2, tmp2, tmp);
5954 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5955 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5956 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5957 neon_store_reg(rd, 0, tmp2);
7d1b0095 5958 tmp2 = tcg_temp_new_i32();
2d981da7 5959 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5960 tcg_gen_shli_i32(tmp2, tmp2, 16);
5961 tcg_gen_or_i32(tmp2, tmp2, tmp);
5962 neon_store_reg(rd, 1, tmp2);
7d1b0095 5963 tcg_temp_free_i32(tmp);
60011498 5964 break;
600b828c 5965 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5966 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5967 q || (rd & 1)) {
5968 return 1;
5969 }
7d1b0095 5970 tmp3 = tcg_temp_new_i32();
60011498
PB
5971 tmp = neon_load_reg(rm, 0);
5972 tmp2 = neon_load_reg(rm, 1);
5973 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5974 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5976 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5977 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5978 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5979 tcg_temp_free_i32(tmp);
60011498 5980 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5981 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5982 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5983 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5984 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5985 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5986 tcg_temp_free_i32(tmp2);
5987 tcg_temp_free_i32(tmp3);
60011498 5988 break;
9ee6e8bb
PB
5989 default:
5990 elementwise:
5991 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5992 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5993 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5994 neon_reg_offset(rm, pass));
39d5492a 5995 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5996 } else {
dd8fbd78 5997 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5998 }
5999 switch (op) {
600b828c 6000 case NEON_2RM_VREV32:
9ee6e8bb 6001 switch (size) {
dd8fbd78
FN
6002 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6003 case 1: gen_swap_half(tmp); break;
600b828c 6004 default: abort();
9ee6e8bb
PB
6005 }
6006 break;
600b828c 6007 case NEON_2RM_VREV16:
dd8fbd78 6008 gen_rev16(tmp);
9ee6e8bb 6009 break;
600b828c 6010 case NEON_2RM_VCLS:
9ee6e8bb 6011 switch (size) {
dd8fbd78
FN
6012 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6013 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6014 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6015 default: abort();
9ee6e8bb
PB
6016 }
6017 break;
600b828c 6018 case NEON_2RM_VCLZ:
9ee6e8bb 6019 switch (size) {
dd8fbd78
FN
6020 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6021 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6022 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6023 default: abort();
9ee6e8bb
PB
6024 }
6025 break;
600b828c 6026 case NEON_2RM_VCNT:
dd8fbd78 6027 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6028 break;
600b828c 6029 case NEON_2RM_VMVN:
dd8fbd78 6030 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6031 break;
600b828c 6032 case NEON_2RM_VQABS:
9ee6e8bb 6033 switch (size) {
02da0b2d
PM
6034 case 0:
6035 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6036 break;
6037 case 1:
6038 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6039 break;
6040 case 2:
6041 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6042 break;
600b828c 6043 default: abort();
9ee6e8bb
PB
6044 }
6045 break;
600b828c 6046 case NEON_2RM_VQNEG:
9ee6e8bb 6047 switch (size) {
02da0b2d
PM
6048 case 0:
6049 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6050 break;
6051 case 1:
6052 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6053 break;
6054 case 2:
6055 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6056 break;
600b828c 6057 default: abort();
9ee6e8bb
PB
6058 }
6059 break;
600b828c 6060 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6061 tmp2 = tcg_const_i32(0);
9ee6e8bb 6062 switch(size) {
dd8fbd78
FN
6063 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6064 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6065 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6066 default: abort();
9ee6e8bb 6067 }
39d5492a 6068 tcg_temp_free_i32(tmp2);
600b828c 6069 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6070 tcg_gen_not_i32(tmp, tmp);
600b828c 6071 }
9ee6e8bb 6072 break;
600b828c 6073 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6074 tmp2 = tcg_const_i32(0);
9ee6e8bb 6075 switch(size) {
dd8fbd78
FN
6076 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6077 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6078 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6079 default: abort();
9ee6e8bb 6080 }
39d5492a 6081 tcg_temp_free_i32(tmp2);
600b828c 6082 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6083 tcg_gen_not_i32(tmp, tmp);
600b828c 6084 }
9ee6e8bb 6085 break;
600b828c 6086 case NEON_2RM_VCEQ0:
dd8fbd78 6087 tmp2 = tcg_const_i32(0);
9ee6e8bb 6088 switch(size) {
dd8fbd78
FN
6089 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6090 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6091 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6092 default: abort();
9ee6e8bb 6093 }
39d5492a 6094 tcg_temp_free_i32(tmp2);
9ee6e8bb 6095 break;
600b828c 6096 case NEON_2RM_VABS:
9ee6e8bb 6097 switch(size) {
dd8fbd78
FN
6098 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6099 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6100 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6101 default: abort();
9ee6e8bb
PB
6102 }
6103 break;
600b828c 6104 case NEON_2RM_VNEG:
dd8fbd78
FN
6105 tmp2 = tcg_const_i32(0);
6106 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6107 tcg_temp_free_i32(tmp2);
9ee6e8bb 6108 break;
600b828c 6109 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6110 {
6111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6112 tmp2 = tcg_const_i32(0);
aa47cfdd 6113 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6114 tcg_temp_free_i32(tmp2);
aa47cfdd 6115 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6116 break;
aa47cfdd 6117 }
600b828c 6118 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6119 {
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6121 tmp2 = tcg_const_i32(0);
aa47cfdd 6122 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6123 tcg_temp_free_i32(tmp2);
aa47cfdd 6124 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6125 break;
aa47cfdd 6126 }
600b828c 6127 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6128 {
6129 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6130 tmp2 = tcg_const_i32(0);
aa47cfdd 6131 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6132 tcg_temp_free_i32(tmp2);
aa47cfdd 6133 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6134 break;
aa47cfdd 6135 }
600b828c 6136 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6137 {
6138 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6139 tmp2 = tcg_const_i32(0);
aa47cfdd 6140 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6141 tcg_temp_free_i32(tmp2);
aa47cfdd 6142 tcg_temp_free_ptr(fpstatus);
0e326109 6143 break;
aa47cfdd 6144 }
600b828c 6145 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6146 {
6147 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6148 tmp2 = tcg_const_i32(0);
aa47cfdd 6149 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6150 tcg_temp_free_i32(tmp2);
aa47cfdd 6151 tcg_temp_free_ptr(fpstatus);
0e326109 6152 break;
aa47cfdd 6153 }
600b828c 6154 case NEON_2RM_VABS_F:
4373f3ce 6155 gen_vfp_abs(0);
9ee6e8bb 6156 break;
600b828c 6157 case NEON_2RM_VNEG_F:
4373f3ce 6158 gen_vfp_neg(0);
9ee6e8bb 6159 break;
600b828c 6160 case NEON_2RM_VSWP:
dd8fbd78
FN
6161 tmp2 = neon_load_reg(rd, pass);
6162 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6163 break;
600b828c 6164 case NEON_2RM_VTRN:
dd8fbd78 6165 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6166 switch (size) {
dd8fbd78
FN
6167 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6168 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6169 default: abort();
9ee6e8bb 6170 }
dd8fbd78 6171 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6172 break;
600b828c 6173 case NEON_2RM_VRECPE:
dd8fbd78 6174 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6175 break;
600b828c 6176 case NEON_2RM_VRSQRTE:
dd8fbd78 6177 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6178 break;
600b828c 6179 case NEON_2RM_VRECPE_F:
4373f3ce 6180 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6181 break;
600b828c 6182 case NEON_2RM_VRSQRTE_F:
4373f3ce 6183 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6184 break;
600b828c 6185 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6186 gen_vfp_sito(0, 1);
9ee6e8bb 6187 break;
600b828c 6188 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6189 gen_vfp_uito(0, 1);
9ee6e8bb 6190 break;
600b828c 6191 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6192 gen_vfp_tosiz(0, 1);
9ee6e8bb 6193 break;
600b828c 6194 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6195 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6196 break;
6197 default:
600b828c
PM
6198 /* Reserved op values were caught by the
6199 * neon_2rm_sizes[] check earlier.
6200 */
6201 abort();
9ee6e8bb 6202 }
600b828c 6203 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6204 tcg_gen_st_f32(cpu_F0s, cpu_env,
6205 neon_reg_offset(rd, pass));
9ee6e8bb 6206 } else {
dd8fbd78 6207 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6208 }
6209 }
6210 break;
6211 }
6212 } else if ((insn & (1 << 10)) == 0) {
6213 /* VTBL, VTBX. */
56907d77
PM
6214 int n = ((insn >> 8) & 3) + 1;
6215 if ((rn + n) > 32) {
6216 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6217 * helper function running off the end of the register file.
6218 */
6219 return 1;
6220 }
6221 n <<= 3;
9ee6e8bb 6222 if (insn & (1 << 6)) {
8f8e3aa4 6223 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6224 } else {
7d1b0095 6225 tmp = tcg_temp_new_i32();
8f8e3aa4 6226 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6227 }
8f8e3aa4 6228 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6229 tmp4 = tcg_const_i32(rn);
6230 tmp5 = tcg_const_i32(n);
9ef39277 6231 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6232 tcg_temp_free_i32(tmp);
9ee6e8bb 6233 if (insn & (1 << 6)) {
8f8e3aa4 6234 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6235 } else {
7d1b0095 6236 tmp = tcg_temp_new_i32();
8f8e3aa4 6237 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6238 }
8f8e3aa4 6239 tmp3 = neon_load_reg(rm, 1);
9ef39277 6240 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6241 tcg_temp_free_i32(tmp5);
6242 tcg_temp_free_i32(tmp4);
8f8e3aa4 6243 neon_store_reg(rd, 0, tmp2);
3018f259 6244 neon_store_reg(rd, 1, tmp3);
7d1b0095 6245 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6246 } else if ((insn & 0x380) == 0) {
6247 /* VDUP */
133da6aa
JR
6248 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6249 return 1;
6250 }
9ee6e8bb 6251 if (insn & (1 << 19)) {
dd8fbd78 6252 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6253 } else {
dd8fbd78 6254 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6255 }
6256 if (insn & (1 << 16)) {
dd8fbd78 6257 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6258 } else if (insn & (1 << 17)) {
6259 if ((insn >> 18) & 1)
dd8fbd78 6260 gen_neon_dup_high16(tmp);
9ee6e8bb 6261 else
dd8fbd78 6262 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6263 }
6264 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6265 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6266 tcg_gen_mov_i32(tmp2, tmp);
6267 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6268 }
7d1b0095 6269 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6270 } else {
6271 return 1;
6272 }
6273 }
6274 }
6275 return 0;
6276}
6277
0ecb72a5 6278static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6279{
4b6a83fb
PM
6280 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6281 const ARMCPRegInfo *ri;
6282 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6283
6284 cpnum = (insn >> 8) & 0xf;
6285 if (arm_feature(env, ARM_FEATURE_XSCALE)
6286 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6287 return 1;
6288
4b6a83fb 6289 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6290 switch (cpnum) {
6291 case 0:
6292 case 1:
6293 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6294 return disas_iwmmxt_insn(env, s, insn);
6295 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6296 return disas_dsp_insn(env, s, insn);
6297 }
6298 return 1;
6299 case 10:
6300 case 11:
6301 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6302 default:
6303 break;
6304 }
6305
6306 /* Otherwise treat as a generic register access */
6307 is64 = (insn & (1 << 25)) == 0;
6308 if (!is64 && ((insn & (1 << 4)) == 0)) {
6309 /* cdp */
6310 return 1;
6311 }
6312
6313 crm = insn & 0xf;
6314 if (is64) {
6315 crn = 0;
6316 opc1 = (insn >> 4) & 0xf;
6317 opc2 = 0;
6318 rt2 = (insn >> 16) & 0xf;
6319 } else {
6320 crn = (insn >> 16) & 0xf;
6321 opc1 = (insn >> 21) & 7;
6322 opc2 = (insn >> 5) & 7;
6323 rt2 = 0;
6324 }
6325 isread = (insn >> 20) & 1;
6326 rt = (insn >> 12) & 0xf;
6327
6328 ri = get_arm_cp_reginfo(cpu,
6329 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6330 if (ri) {
6331 /* Check access permissions */
6332 if (!cp_access_ok(env, ri, isread)) {
6333 return 1;
6334 }
6335
6336 /* Handle special cases first */
6337 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6338 case ARM_CP_NOP:
6339 return 0;
6340 case ARM_CP_WFI:
6341 if (isread) {
6342 return 1;
6343 }
eaed129d 6344 gen_set_pc_im(s, s->pc);
4b6a83fb 6345 s->is_jmp = DISAS_WFI;
2bee5105 6346 return 0;
4b6a83fb
PM
6347 default:
6348 break;
6349 }
6350
2452731c
PM
6351 if (use_icount && (ri->type & ARM_CP_IO)) {
6352 gen_io_start();
6353 }
6354
4b6a83fb
PM
6355 if (isread) {
6356 /* Read */
6357 if (is64) {
6358 TCGv_i64 tmp64;
6359 TCGv_i32 tmp;
6360 if (ri->type & ARM_CP_CONST) {
6361 tmp64 = tcg_const_i64(ri->resetvalue);
6362 } else if (ri->readfn) {
6363 TCGv_ptr tmpptr;
eaed129d 6364 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6365 tmp64 = tcg_temp_new_i64();
6366 tmpptr = tcg_const_ptr(ri);
6367 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6368 tcg_temp_free_ptr(tmpptr);
6369 } else {
6370 tmp64 = tcg_temp_new_i64();
6371 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6372 }
6373 tmp = tcg_temp_new_i32();
6374 tcg_gen_trunc_i64_i32(tmp, tmp64);
6375 store_reg(s, rt, tmp);
6376 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6377 tmp = tcg_temp_new_i32();
4b6a83fb 6378 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6379 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6380 store_reg(s, rt2, tmp);
6381 } else {
39d5492a 6382 TCGv_i32 tmp;
4b6a83fb
PM
6383 if (ri->type & ARM_CP_CONST) {
6384 tmp = tcg_const_i32(ri->resetvalue);
6385 } else if (ri->readfn) {
6386 TCGv_ptr tmpptr;
eaed129d 6387 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6388 tmp = tcg_temp_new_i32();
6389 tmpptr = tcg_const_ptr(ri);
6390 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6391 tcg_temp_free_ptr(tmpptr);
6392 } else {
6393 tmp = load_cpu_offset(ri->fieldoffset);
6394 }
6395 if (rt == 15) {
6396 /* Destination register of r15 for 32 bit loads sets
6397 * the condition codes from the high 4 bits of the value
6398 */
6399 gen_set_nzcv(tmp);
6400 tcg_temp_free_i32(tmp);
6401 } else {
6402 store_reg(s, rt, tmp);
6403 }
6404 }
6405 } else {
6406 /* Write */
6407 if (ri->type & ARM_CP_CONST) {
6408 /* If not forbidden by access permissions, treat as WI */
6409 return 0;
6410 }
6411
6412 if (is64) {
39d5492a 6413 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6414 TCGv_i64 tmp64 = tcg_temp_new_i64();
6415 tmplo = load_reg(s, rt);
6416 tmphi = load_reg(s, rt2);
6417 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6418 tcg_temp_free_i32(tmplo);
6419 tcg_temp_free_i32(tmphi);
6420 if (ri->writefn) {
6421 TCGv_ptr tmpptr = tcg_const_ptr(ri);
eaed129d 6422 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6423 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6424 tcg_temp_free_ptr(tmpptr);
6425 } else {
6426 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6427 }
6428 tcg_temp_free_i64(tmp64);
6429 } else {
6430 if (ri->writefn) {
39d5492a 6431 TCGv_i32 tmp;
4b6a83fb 6432 TCGv_ptr tmpptr;
eaed129d 6433 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6434 tmp = load_reg(s, rt);
6435 tmpptr = tcg_const_ptr(ri);
6436 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6437 tcg_temp_free_ptr(tmpptr);
6438 tcg_temp_free_i32(tmp);
6439 } else {
39d5492a 6440 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6441 store_cpu_offset(tmp, ri->fieldoffset);
6442 }
6443 }
2452731c
PM
6444 }
6445
6446 if (use_icount && (ri->type & ARM_CP_IO)) {
6447 /* I/O operations must end the TB here (whether read or write) */
6448 gen_io_end();
6449 gen_lookup_tb(s);
6450 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6451 /* We default to ending the TB on a coprocessor register write,
6452 * but allow this to be suppressed by the register definition
6453 * (usually only necessary to work around guest bugs).
6454 */
2452731c 6455 gen_lookup_tb(s);
4b6a83fb 6456 }
2452731c 6457
4b6a83fb
PM
6458 return 0;
6459 }
6460
4a9a539f 6461 return 1;
9ee6e8bb
PB
6462}
6463
5e3f878a
PB
6464
6465/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6466static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6467{
39d5492a 6468 TCGv_i32 tmp;
7d1b0095 6469 tmp = tcg_temp_new_i32();
5e3f878a
PB
6470 tcg_gen_trunc_i64_i32(tmp, val);
6471 store_reg(s, rlow, tmp);
7d1b0095 6472 tmp = tcg_temp_new_i32();
5e3f878a
PB
6473 tcg_gen_shri_i64(val, val, 32);
6474 tcg_gen_trunc_i64_i32(tmp, val);
6475 store_reg(s, rhigh, tmp);
6476}
6477
6478/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6479static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6480{
a7812ae4 6481 TCGv_i64 tmp;
39d5492a 6482 TCGv_i32 tmp2;
5e3f878a 6483
36aa55dc 6484 /* Load value and extend to 64 bits. */
a7812ae4 6485 tmp = tcg_temp_new_i64();
5e3f878a
PB
6486 tmp2 = load_reg(s, rlow);
6487 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6488 tcg_temp_free_i32(tmp2);
5e3f878a 6489 tcg_gen_add_i64(val, val, tmp);
b75263d6 6490 tcg_temp_free_i64(tmp);
5e3f878a
PB
6491}
6492
6493/* load and add a 64-bit value from a register pair. */
a7812ae4 6494static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6495{
a7812ae4 6496 TCGv_i64 tmp;
39d5492a
PM
6497 TCGv_i32 tmpl;
6498 TCGv_i32 tmph;
5e3f878a
PB
6499
6500 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6501 tmpl = load_reg(s, rlow);
6502 tmph = load_reg(s, rhigh);
a7812ae4 6503 tmp = tcg_temp_new_i64();
36aa55dc 6504 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6505 tcg_temp_free_i32(tmpl);
6506 tcg_temp_free_i32(tmph);
5e3f878a 6507 tcg_gen_add_i64(val, val, tmp);
b75263d6 6508 tcg_temp_free_i64(tmp);
5e3f878a
PB
6509}
6510
c9f10124 6511/* Set N and Z flags from hi|lo. */
39d5492a 6512static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6513{
c9f10124
RH
6514 tcg_gen_mov_i32(cpu_NF, hi);
6515 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6516}
6517
426f5abc
PB
6518/* Load/Store exclusive instructions are implemented by remembering
6519 the value/address loaded, and seeing if these are the same
b90372ad 6520 when the store is performed. This should be sufficient to implement
426f5abc
PB
6521 the architecturally mandated semantics, and avoids having to monitor
6522 regular stores.
6523
6524 In system emulation mode only one CPU will be running at once, so
6525 this sequence is effectively atomic. In user emulation mode we
6526 throw an exception and handle the atomic operation elsewhere. */
6527static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6528 TCGv_i32 addr, int size)
426f5abc 6529{
94ee24e7 6530 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6531
6532 switch (size) {
6533 case 0:
08307563 6534 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6535 break;
6536 case 1:
08307563 6537 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6538 break;
6539 case 2:
6540 case 3:
08307563 6541 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6542 break;
6543 default:
6544 abort();
6545 }
6546 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6547 store_reg(s, rt, tmp);
6548 if (size == 3) {
39d5492a 6549 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6550 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6551 tmp = tcg_temp_new_i32();
08307563 6552 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6553 tcg_temp_free_i32(tmp2);
426f5abc
PB
6554 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6555 store_reg(s, rt2, tmp);
6556 }
6557 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6558}
6559
6560static void gen_clrex(DisasContext *s)
6561{
6562 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6563}
6564
6565#ifdef CONFIG_USER_ONLY
6566static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6567 TCGv_i32 addr, int size)
426f5abc
PB
6568{
6569 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6570 tcg_gen_movi_i32(cpu_exclusive_info,
6571 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6572 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6573}
6574#else
6575static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6576 TCGv_i32 addr, int size)
426f5abc 6577{
39d5492a 6578 TCGv_i32 tmp;
426f5abc
PB
6579 int done_label;
6580 int fail_label;
6581
6582 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6583 [addr] = {Rt};
6584 {Rd} = 0;
6585 } else {
6586 {Rd} = 1;
6587 } */
6588 fail_label = gen_new_label();
6589 done_label = gen_new_label();
6590 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6591 tmp = tcg_temp_new_i32();
426f5abc
PB
6592 switch (size) {
6593 case 0:
08307563 6594 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6595 break;
6596 case 1:
08307563 6597 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6598 break;
6599 case 2:
6600 case 3:
08307563 6601 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6602 break;
6603 default:
6604 abort();
6605 }
6606 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6607 tcg_temp_free_i32(tmp);
426f5abc 6608 if (size == 3) {
39d5492a 6609 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6610 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6611 tmp = tcg_temp_new_i32();
08307563 6612 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6613 tcg_temp_free_i32(tmp2);
426f5abc 6614 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6615 tcg_temp_free_i32(tmp);
426f5abc
PB
6616 }
6617 tmp = load_reg(s, rt);
6618 switch (size) {
6619 case 0:
08307563 6620 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6621 break;
6622 case 1:
08307563 6623 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6624 break;
6625 case 2:
6626 case 3:
08307563 6627 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6628 break;
6629 default:
6630 abort();
6631 }
94ee24e7 6632 tcg_temp_free_i32(tmp);
426f5abc
PB
6633 if (size == 3) {
6634 tcg_gen_addi_i32(addr, addr, 4);
6635 tmp = load_reg(s, rt2);
08307563 6636 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 6637 tcg_temp_free_i32(tmp);
426f5abc
PB
6638 }
6639 tcg_gen_movi_i32(cpu_R[rd], 0);
6640 tcg_gen_br(done_label);
6641 gen_set_label(fail_label);
6642 tcg_gen_movi_i32(cpu_R[rd], 1);
6643 gen_set_label(done_label);
6644 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6645}
6646#endif
6647
81465888
PM
6648/* gen_srs:
6649 * @env: CPUARMState
6650 * @s: DisasContext
6651 * @mode: mode field from insn (which stack to store to)
6652 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6653 * @writeback: true if writeback bit set
6654 *
6655 * Generate code for the SRS (Store Return State) insn.
6656 */
6657static void gen_srs(DisasContext *s,
6658 uint32_t mode, uint32_t amode, bool writeback)
6659{
6660 int32_t offset;
6661 TCGv_i32 addr = tcg_temp_new_i32();
6662 TCGv_i32 tmp = tcg_const_i32(mode);
6663 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6664 tcg_temp_free_i32(tmp);
6665 switch (amode) {
6666 case 0: /* DA */
6667 offset = -4;
6668 break;
6669 case 1: /* IA */
6670 offset = 0;
6671 break;
6672 case 2: /* DB */
6673 offset = -8;
6674 break;
6675 case 3: /* IB */
6676 offset = 4;
6677 break;
6678 default:
6679 abort();
6680 }
6681 tcg_gen_addi_i32(addr, addr, offset);
6682 tmp = load_reg(s, 14);
08307563 6683 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6684 tcg_temp_free_i32(tmp);
81465888
PM
6685 tmp = load_cpu_field(spsr);
6686 tcg_gen_addi_i32(addr, addr, 4);
08307563 6687 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6688 tcg_temp_free_i32(tmp);
81465888
PM
6689 if (writeback) {
6690 switch (amode) {
6691 case 0:
6692 offset = -8;
6693 break;
6694 case 1:
6695 offset = 4;
6696 break;
6697 case 2:
6698 offset = -4;
6699 break;
6700 case 3:
6701 offset = 0;
6702 break;
6703 default:
6704 abort();
6705 }
6706 tcg_gen_addi_i32(addr, addr, offset);
6707 tmp = tcg_const_i32(mode);
6708 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6709 tcg_temp_free_i32(tmp);
6710 }
6711 tcg_temp_free_i32(addr);
6712}
6713
0ecb72a5 6714static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6715{
6716 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6717 TCGv_i32 tmp;
6718 TCGv_i32 tmp2;
6719 TCGv_i32 tmp3;
6720 TCGv_i32 addr;
a7812ae4 6721 TCGv_i64 tmp64;
9ee6e8bb 6722
d31dd73e 6723 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6724 s->pc += 4;
6725
6726 /* M variants do not implement ARM mode. */
6727 if (IS_M(env))
6728 goto illegal_op;
6729 cond = insn >> 28;
6730 if (cond == 0xf){
be5e7a76
DES
6731 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6732 * choose to UNDEF. In ARMv5 and above the space is used
6733 * for miscellaneous unconditional instructions.
6734 */
6735 ARCH(5);
6736
9ee6e8bb
PB
6737 /* Unconditional instructions. */
6738 if (((insn >> 25) & 7) == 1) {
6739 /* NEON Data processing. */
6740 if (!arm_feature(env, ARM_FEATURE_NEON))
6741 goto illegal_op;
6742
6743 if (disas_neon_data_insn(env, s, insn))
6744 goto illegal_op;
6745 return;
6746 }
6747 if ((insn & 0x0f100000) == 0x04000000) {
6748 /* NEON load/store. */
6749 if (!arm_feature(env, ARM_FEATURE_NEON))
6750 goto illegal_op;
6751
6752 if (disas_neon_ls_insn(env, s, insn))
6753 goto illegal_op;
6754 return;
6755 }
3d185e5d
PM
6756 if (((insn & 0x0f30f000) == 0x0510f000) ||
6757 ((insn & 0x0f30f010) == 0x0710f000)) {
6758 if ((insn & (1 << 22)) == 0) {
6759 /* PLDW; v7MP */
6760 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6761 goto illegal_op;
6762 }
6763 }
6764 /* Otherwise PLD; v5TE+ */
be5e7a76 6765 ARCH(5TE);
3d185e5d
PM
6766 return;
6767 }
6768 if (((insn & 0x0f70f000) == 0x0450f000) ||
6769 ((insn & 0x0f70f010) == 0x0650f000)) {
6770 ARCH(7);
6771 return; /* PLI; V7 */
6772 }
6773 if (((insn & 0x0f700000) == 0x04100000) ||
6774 ((insn & 0x0f700010) == 0x06100000)) {
6775 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6776 goto illegal_op;
6777 }
6778 return; /* v7MP: Unallocated memory hint: must NOP */
6779 }
6780
6781 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6782 ARCH(6);
6783 /* setend */
10962fd5
PM
6784 if (((insn >> 9) & 1) != s->bswap_code) {
6785 /* Dynamic endianness switching not implemented. */
e0c270d9 6786 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
6787 goto illegal_op;
6788 }
6789 return;
6790 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6791 switch ((insn >> 4) & 0xf) {
6792 case 1: /* clrex */
6793 ARCH(6K);
426f5abc 6794 gen_clrex(s);
9ee6e8bb
PB
6795 return;
6796 case 4: /* dsb */
6797 case 5: /* dmb */
6798 case 6: /* isb */
6799 ARCH(7);
6800 /* We don't emulate caches so these are a no-op. */
6801 return;
6802 default:
6803 goto illegal_op;
6804 }
6805 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6806 /* srs */
81465888 6807 if (IS_USER(s)) {
9ee6e8bb 6808 goto illegal_op;
9ee6e8bb 6809 }
81465888
PM
6810 ARCH(6);
6811 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6812 return;
ea825eee 6813 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6814 /* rfe */
c67b6b71 6815 int32_t offset;
9ee6e8bb
PB
6816 if (IS_USER(s))
6817 goto illegal_op;
6818 ARCH(6);
6819 rn = (insn >> 16) & 0xf;
b0109805 6820 addr = load_reg(s, rn);
9ee6e8bb
PB
6821 i = (insn >> 23) & 3;
6822 switch (i) {
b0109805 6823 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6824 case 1: offset = 0; break; /* IA */
6825 case 2: offset = -8; break; /* DB */
b0109805 6826 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6827 default: abort();
6828 }
6829 if (offset)
b0109805
PB
6830 tcg_gen_addi_i32(addr, addr, offset);
6831 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 6832 tmp = tcg_temp_new_i32();
08307563 6833 gen_aa32_ld32u(tmp, addr, 0);
b0109805 6834 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 6835 tmp2 = tcg_temp_new_i32();
08307563 6836 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
6837 if (insn & (1 << 21)) {
6838 /* Base writeback. */
6839 switch (i) {
b0109805 6840 case 0: offset = -8; break;
c67b6b71
FN
6841 case 1: offset = 4; break;
6842 case 2: offset = -4; break;
b0109805 6843 case 3: offset = 0; break;
9ee6e8bb
PB
6844 default: abort();
6845 }
6846 if (offset)
b0109805
PB
6847 tcg_gen_addi_i32(addr, addr, offset);
6848 store_reg(s, rn, addr);
6849 } else {
7d1b0095 6850 tcg_temp_free_i32(addr);
9ee6e8bb 6851 }
b0109805 6852 gen_rfe(s, tmp, tmp2);
c67b6b71 6853 return;
9ee6e8bb
PB
6854 } else if ((insn & 0x0e000000) == 0x0a000000) {
6855 /* branch link and change to thumb (blx <offset>) */
6856 int32_t offset;
6857
6858 val = (uint32_t)s->pc;
7d1b0095 6859 tmp = tcg_temp_new_i32();
d9ba4830
PB
6860 tcg_gen_movi_i32(tmp, val);
6861 store_reg(s, 14, tmp);
9ee6e8bb
PB
6862 /* Sign-extend the 24-bit offset */
6863 offset = (((int32_t)insn) << 8) >> 8;
6864 /* offset * 4 + bit24 * 2 + (thumb bit) */
6865 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6866 /* pipeline offset */
6867 val += 4;
be5e7a76 6868 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6869 gen_bx_im(s, val);
9ee6e8bb
PB
6870 return;
6871 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6872 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6873 /* iWMMXt register transfer. */
6874 if (env->cp15.c15_cpar & (1 << 1))
6875 if (!disas_iwmmxt_insn(env, s, insn))
6876 return;
6877 }
6878 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6879 /* Coprocessor double register transfer. */
be5e7a76 6880 ARCH(5TE);
9ee6e8bb
PB
6881 } else if ((insn & 0x0f000010) == 0x0e000010) {
6882 /* Additional coprocessor register transfer. */
7997d92f 6883 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6884 uint32_t mask;
6885 uint32_t val;
6886 /* cps (privileged) */
6887 if (IS_USER(s))
6888 return;
6889 mask = val = 0;
6890 if (insn & (1 << 19)) {
6891 if (insn & (1 << 8))
6892 mask |= CPSR_A;
6893 if (insn & (1 << 7))
6894 mask |= CPSR_I;
6895 if (insn & (1 << 6))
6896 mask |= CPSR_F;
6897 if (insn & (1 << 18))
6898 val |= mask;
6899 }
7997d92f 6900 if (insn & (1 << 17)) {
9ee6e8bb
PB
6901 mask |= CPSR_M;
6902 val |= (insn & 0x1f);
6903 }
6904 if (mask) {
2fbac54b 6905 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6906 }
6907 return;
6908 }
6909 goto illegal_op;
6910 }
6911 if (cond != 0xe) {
6912 /* if not always execute, we generate a conditional jump to
6913 next instruction */
6914 s->condlabel = gen_new_label();
d9ba4830 6915 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6916 s->condjmp = 1;
6917 }
6918 if ((insn & 0x0f900000) == 0x03000000) {
6919 if ((insn & (1 << 21)) == 0) {
6920 ARCH(6T2);
6921 rd = (insn >> 12) & 0xf;
6922 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6923 if ((insn & (1 << 22)) == 0) {
6924 /* MOVW */
7d1b0095 6925 tmp = tcg_temp_new_i32();
5e3f878a 6926 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6927 } else {
6928 /* MOVT */
5e3f878a 6929 tmp = load_reg(s, rd);
86831435 6930 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6931 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6932 }
5e3f878a 6933 store_reg(s, rd, tmp);
9ee6e8bb
PB
6934 } else {
6935 if (((insn >> 12) & 0xf) != 0xf)
6936 goto illegal_op;
6937 if (((insn >> 16) & 0xf) == 0) {
6938 gen_nop_hint(s, insn & 0xff);
6939 } else {
6940 /* CPSR = immediate */
6941 val = insn & 0xff;
6942 shift = ((insn >> 8) & 0xf) * 2;
6943 if (shift)
6944 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6945 i = ((insn & (1 << 22)) != 0);
2fbac54b 6946 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6947 goto illegal_op;
6948 }
6949 }
6950 } else if ((insn & 0x0f900000) == 0x01000000
6951 && (insn & 0x00000090) != 0x00000090) {
6952 /* miscellaneous instructions */
6953 op1 = (insn >> 21) & 3;
6954 sh = (insn >> 4) & 0xf;
6955 rm = insn & 0xf;
6956 switch (sh) {
6957 case 0x0: /* move program status register */
6958 if (op1 & 1) {
6959 /* PSR = reg */
2fbac54b 6960 tmp = load_reg(s, rm);
9ee6e8bb 6961 i = ((op1 & 2) != 0);
2fbac54b 6962 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6963 goto illegal_op;
6964 } else {
6965 /* reg = PSR */
6966 rd = (insn >> 12) & 0xf;
6967 if (op1 & 2) {
6968 if (IS_USER(s))
6969 goto illegal_op;
d9ba4830 6970 tmp = load_cpu_field(spsr);
9ee6e8bb 6971 } else {
7d1b0095 6972 tmp = tcg_temp_new_i32();
9ef39277 6973 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6974 }
d9ba4830 6975 store_reg(s, rd, tmp);
9ee6e8bb
PB
6976 }
6977 break;
6978 case 0x1:
6979 if (op1 == 1) {
6980 /* branch/exchange thumb (bx). */
be5e7a76 6981 ARCH(4T);
d9ba4830
PB
6982 tmp = load_reg(s, rm);
6983 gen_bx(s, tmp);
9ee6e8bb
PB
6984 } else if (op1 == 3) {
6985 /* clz */
be5e7a76 6986 ARCH(5);
9ee6e8bb 6987 rd = (insn >> 12) & 0xf;
1497c961
PB
6988 tmp = load_reg(s, rm);
6989 gen_helper_clz(tmp, tmp);
6990 store_reg(s, rd, tmp);
9ee6e8bb
PB
6991 } else {
6992 goto illegal_op;
6993 }
6994 break;
6995 case 0x2:
6996 if (op1 == 1) {
6997 ARCH(5J); /* bxj */
6998 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6999 tmp = load_reg(s, rm);
7000 gen_bx(s, tmp);
9ee6e8bb
PB
7001 } else {
7002 goto illegal_op;
7003 }
7004 break;
7005 case 0x3:
7006 if (op1 != 1)
7007 goto illegal_op;
7008
be5e7a76 7009 ARCH(5);
9ee6e8bb 7010 /* branch link/exchange thumb (blx) */
d9ba4830 7011 tmp = load_reg(s, rm);
7d1b0095 7012 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7013 tcg_gen_movi_i32(tmp2, s->pc);
7014 store_reg(s, 14, tmp2);
7015 gen_bx(s, tmp);
9ee6e8bb
PB
7016 break;
7017 case 0x5: /* saturating add/subtract */
be5e7a76 7018 ARCH(5TE);
9ee6e8bb
PB
7019 rd = (insn >> 12) & 0xf;
7020 rn = (insn >> 16) & 0xf;
b40d0353 7021 tmp = load_reg(s, rm);
5e3f878a 7022 tmp2 = load_reg(s, rn);
9ee6e8bb 7023 if (op1 & 2)
9ef39277 7024 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7025 if (op1 & 1)
9ef39277 7026 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7027 else
9ef39277 7028 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7029 tcg_temp_free_i32(tmp2);
5e3f878a 7030 store_reg(s, rd, tmp);
9ee6e8bb 7031 break;
49e14940
AL
7032 case 7:
7033 /* SMC instruction (op1 == 3)
7034 and undefined instructions (op1 == 0 || op1 == 2)
7035 will trap */
7036 if (op1 != 1) {
7037 goto illegal_op;
7038 }
7039 /* bkpt */
be5e7a76 7040 ARCH(5);
bc4a0de0 7041 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7042 break;
7043 case 0x8: /* signed multiply */
7044 case 0xa:
7045 case 0xc:
7046 case 0xe:
be5e7a76 7047 ARCH(5TE);
9ee6e8bb
PB
7048 rs = (insn >> 8) & 0xf;
7049 rn = (insn >> 12) & 0xf;
7050 rd = (insn >> 16) & 0xf;
7051 if (op1 == 1) {
7052 /* (32 * 16) >> 16 */
5e3f878a
PB
7053 tmp = load_reg(s, rm);
7054 tmp2 = load_reg(s, rs);
9ee6e8bb 7055 if (sh & 4)
5e3f878a 7056 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7057 else
5e3f878a 7058 gen_sxth(tmp2);
a7812ae4
PB
7059 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7060 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7061 tmp = tcg_temp_new_i32();
a7812ae4 7062 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7063 tcg_temp_free_i64(tmp64);
9ee6e8bb 7064 if ((sh & 2) == 0) {
5e3f878a 7065 tmp2 = load_reg(s, rn);
9ef39277 7066 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7067 tcg_temp_free_i32(tmp2);
9ee6e8bb 7068 }
5e3f878a 7069 store_reg(s, rd, tmp);
9ee6e8bb
PB
7070 } else {
7071 /* 16 * 16 */
5e3f878a
PB
7072 tmp = load_reg(s, rm);
7073 tmp2 = load_reg(s, rs);
7074 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7075 tcg_temp_free_i32(tmp2);
9ee6e8bb 7076 if (op1 == 2) {
a7812ae4
PB
7077 tmp64 = tcg_temp_new_i64();
7078 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7079 tcg_temp_free_i32(tmp);
a7812ae4
PB
7080 gen_addq(s, tmp64, rn, rd);
7081 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7082 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7083 } else {
7084 if (op1 == 0) {
5e3f878a 7085 tmp2 = load_reg(s, rn);
9ef39277 7086 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7087 tcg_temp_free_i32(tmp2);
9ee6e8bb 7088 }
5e3f878a 7089 store_reg(s, rd, tmp);
9ee6e8bb
PB
7090 }
7091 }
7092 break;
7093 default:
7094 goto illegal_op;
7095 }
7096 } else if (((insn & 0x0e000000) == 0 &&
7097 (insn & 0x00000090) != 0x90) ||
7098 ((insn & 0x0e000000) == (1 << 25))) {
7099 int set_cc, logic_cc, shiftop;
7100
7101 op1 = (insn >> 21) & 0xf;
7102 set_cc = (insn >> 20) & 1;
7103 logic_cc = table_logic_cc[op1] & set_cc;
7104
7105 /* data processing instruction */
7106 if (insn & (1 << 25)) {
7107 /* immediate operand */
7108 val = insn & 0xff;
7109 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7110 if (shift) {
9ee6e8bb 7111 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7112 }
7d1b0095 7113 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7114 tcg_gen_movi_i32(tmp2, val);
7115 if (logic_cc && shift) {
7116 gen_set_CF_bit31(tmp2);
7117 }
9ee6e8bb
PB
7118 } else {
7119 /* register */
7120 rm = (insn) & 0xf;
e9bb4aa9 7121 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7122 shiftop = (insn >> 5) & 3;
7123 if (!(insn & (1 << 4))) {
7124 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7125 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7126 } else {
7127 rs = (insn >> 8) & 0xf;
8984bd2e 7128 tmp = load_reg(s, rs);
e9bb4aa9 7129 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7130 }
7131 }
7132 if (op1 != 0x0f && op1 != 0x0d) {
7133 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7134 tmp = load_reg(s, rn);
7135 } else {
39d5492a 7136 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7137 }
7138 rd = (insn >> 12) & 0xf;
7139 switch(op1) {
7140 case 0x00:
e9bb4aa9
JR
7141 tcg_gen_and_i32(tmp, tmp, tmp2);
7142 if (logic_cc) {
7143 gen_logic_CC(tmp);
7144 }
21aeb343 7145 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7146 break;
7147 case 0x01:
e9bb4aa9
JR
7148 tcg_gen_xor_i32(tmp, tmp, tmp2);
7149 if (logic_cc) {
7150 gen_logic_CC(tmp);
7151 }
21aeb343 7152 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7153 break;
7154 case 0x02:
7155 if (set_cc && rd == 15) {
7156 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7157 if (IS_USER(s)) {
9ee6e8bb 7158 goto illegal_op;
e9bb4aa9 7159 }
72485ec4 7160 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7161 gen_exception_return(s, tmp);
9ee6e8bb 7162 } else {
e9bb4aa9 7163 if (set_cc) {
72485ec4 7164 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7165 } else {
7166 tcg_gen_sub_i32(tmp, tmp, tmp2);
7167 }
21aeb343 7168 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7169 }
7170 break;
7171 case 0x03:
e9bb4aa9 7172 if (set_cc) {
72485ec4 7173 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7174 } else {
7175 tcg_gen_sub_i32(tmp, tmp2, tmp);
7176 }
21aeb343 7177 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7178 break;
7179 case 0x04:
e9bb4aa9 7180 if (set_cc) {
72485ec4 7181 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7182 } else {
7183 tcg_gen_add_i32(tmp, tmp, tmp2);
7184 }
21aeb343 7185 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7186 break;
7187 case 0x05:
e9bb4aa9 7188 if (set_cc) {
49b4c31e 7189 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7190 } else {
7191 gen_add_carry(tmp, tmp, tmp2);
7192 }
21aeb343 7193 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7194 break;
7195 case 0x06:
e9bb4aa9 7196 if (set_cc) {
2de68a49 7197 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7198 } else {
7199 gen_sub_carry(tmp, tmp, tmp2);
7200 }
21aeb343 7201 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7202 break;
7203 case 0x07:
e9bb4aa9 7204 if (set_cc) {
2de68a49 7205 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7206 } else {
7207 gen_sub_carry(tmp, tmp2, tmp);
7208 }
21aeb343 7209 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7210 break;
7211 case 0x08:
7212 if (set_cc) {
e9bb4aa9
JR
7213 tcg_gen_and_i32(tmp, tmp, tmp2);
7214 gen_logic_CC(tmp);
9ee6e8bb 7215 }
7d1b0095 7216 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7217 break;
7218 case 0x09:
7219 if (set_cc) {
e9bb4aa9
JR
7220 tcg_gen_xor_i32(tmp, tmp, tmp2);
7221 gen_logic_CC(tmp);
9ee6e8bb 7222 }
7d1b0095 7223 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7224 break;
7225 case 0x0a:
7226 if (set_cc) {
72485ec4 7227 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7228 }
7d1b0095 7229 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7230 break;
7231 case 0x0b:
7232 if (set_cc) {
72485ec4 7233 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7234 }
7d1b0095 7235 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7236 break;
7237 case 0x0c:
e9bb4aa9
JR
7238 tcg_gen_or_i32(tmp, tmp, tmp2);
7239 if (logic_cc) {
7240 gen_logic_CC(tmp);
7241 }
21aeb343 7242 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7243 break;
7244 case 0x0d:
7245 if (logic_cc && rd == 15) {
7246 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7247 if (IS_USER(s)) {
9ee6e8bb 7248 goto illegal_op;
e9bb4aa9
JR
7249 }
7250 gen_exception_return(s, tmp2);
9ee6e8bb 7251 } else {
e9bb4aa9
JR
7252 if (logic_cc) {
7253 gen_logic_CC(tmp2);
7254 }
21aeb343 7255 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7256 }
7257 break;
7258 case 0x0e:
f669df27 7259 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7260 if (logic_cc) {
7261 gen_logic_CC(tmp);
7262 }
21aeb343 7263 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7264 break;
7265 default:
7266 case 0x0f:
e9bb4aa9
JR
7267 tcg_gen_not_i32(tmp2, tmp2);
7268 if (logic_cc) {
7269 gen_logic_CC(tmp2);
7270 }
21aeb343 7271 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7272 break;
7273 }
e9bb4aa9 7274 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7275 tcg_temp_free_i32(tmp2);
e9bb4aa9 7276 }
9ee6e8bb
PB
7277 } else {
7278 /* other instructions */
7279 op1 = (insn >> 24) & 0xf;
7280 switch(op1) {
7281 case 0x0:
7282 case 0x1:
7283 /* multiplies, extra load/stores */
7284 sh = (insn >> 5) & 3;
7285 if (sh == 0) {
7286 if (op1 == 0x0) {
7287 rd = (insn >> 16) & 0xf;
7288 rn = (insn >> 12) & 0xf;
7289 rs = (insn >> 8) & 0xf;
7290 rm = (insn) & 0xf;
7291 op1 = (insn >> 20) & 0xf;
7292 switch (op1) {
7293 case 0: case 1: case 2: case 3: case 6:
7294 /* 32 bit mul */
5e3f878a
PB
7295 tmp = load_reg(s, rs);
7296 tmp2 = load_reg(s, rm);
7297 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7298 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7299 if (insn & (1 << 22)) {
7300 /* Subtract (mls) */
7301 ARCH(6T2);
5e3f878a
PB
7302 tmp2 = load_reg(s, rn);
7303 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7304 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7305 } else if (insn & (1 << 21)) {
7306 /* Add */
5e3f878a
PB
7307 tmp2 = load_reg(s, rn);
7308 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7309 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7310 }
7311 if (insn & (1 << 20))
5e3f878a
PB
7312 gen_logic_CC(tmp);
7313 store_reg(s, rd, tmp);
9ee6e8bb 7314 break;
8aac08b1
AJ
7315 case 4:
7316 /* 64 bit mul double accumulate (UMAAL) */
7317 ARCH(6);
7318 tmp = load_reg(s, rs);
7319 tmp2 = load_reg(s, rm);
7320 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7321 gen_addq_lo(s, tmp64, rn);
7322 gen_addq_lo(s, tmp64, rd);
7323 gen_storeq_reg(s, rn, rd, tmp64);
7324 tcg_temp_free_i64(tmp64);
7325 break;
7326 case 8: case 9: case 10: case 11:
7327 case 12: case 13: case 14: case 15:
7328 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7329 tmp = load_reg(s, rs);
7330 tmp2 = load_reg(s, rm);
8aac08b1 7331 if (insn & (1 << 22)) {
c9f10124 7332 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7333 } else {
c9f10124 7334 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7335 }
7336 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7337 TCGv_i32 al = load_reg(s, rn);
7338 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7339 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7340 tcg_temp_free_i32(al);
7341 tcg_temp_free_i32(ah);
9ee6e8bb 7342 }
8aac08b1 7343 if (insn & (1 << 20)) {
c9f10124 7344 gen_logicq_cc(tmp, tmp2);
8aac08b1 7345 }
c9f10124
RH
7346 store_reg(s, rn, tmp);
7347 store_reg(s, rd, tmp2);
9ee6e8bb 7348 break;
8aac08b1
AJ
7349 default:
7350 goto illegal_op;
9ee6e8bb
PB
7351 }
7352 } else {
7353 rn = (insn >> 16) & 0xf;
7354 rd = (insn >> 12) & 0xf;
7355 if (insn & (1 << 23)) {
7356 /* load/store exclusive */
2359bf80 7357 int op2 = (insn >> 8) & 3;
86753403 7358 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7359
7360 switch (op2) {
7361 case 0: /* lda/stl */
7362 if (op1 == 1) {
7363 goto illegal_op;
7364 }
7365 ARCH(8);
7366 break;
7367 case 1: /* reserved */
7368 goto illegal_op;
7369 case 2: /* ldaex/stlex */
7370 ARCH(8);
7371 break;
7372 case 3: /* ldrex/strex */
7373 if (op1) {
7374 ARCH(6K);
7375 } else {
7376 ARCH(6);
7377 }
7378 break;
7379 }
7380
3174f8e9 7381 addr = tcg_temp_local_new_i32();
98a46317 7382 load_reg_var(s, addr, rn);
2359bf80
MR
7383
7384 /* Since the emulation does not have barriers,
7385 the acquire/release semantics need no special
7386 handling */
7387 if (op2 == 0) {
7388 if (insn & (1 << 20)) {
7389 tmp = tcg_temp_new_i32();
7390 switch (op1) {
7391 case 0: /* lda */
08307563 7392 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7393 break;
7394 case 2: /* ldab */
08307563 7395 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7396 break;
7397 case 3: /* ldah */
08307563 7398 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7399 break;
7400 default:
7401 abort();
7402 }
7403 store_reg(s, rd, tmp);
7404 } else {
7405 rm = insn & 0xf;
7406 tmp = load_reg(s, rm);
7407 switch (op1) {
7408 case 0: /* stl */
08307563 7409 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
7410 break;
7411 case 2: /* stlb */
08307563 7412 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
7413 break;
7414 case 3: /* stlh */
08307563 7415 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
7416 break;
7417 default:
7418 abort();
7419 }
7420 tcg_temp_free_i32(tmp);
7421 }
7422 } else if (insn & (1 << 20)) {
86753403
PB
7423 switch (op1) {
7424 case 0: /* ldrex */
426f5abc 7425 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7426 break;
7427 case 1: /* ldrexd */
426f5abc 7428 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7429 break;
7430 case 2: /* ldrexb */
426f5abc 7431 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7432 break;
7433 case 3: /* ldrexh */
426f5abc 7434 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7435 break;
7436 default:
7437 abort();
7438 }
9ee6e8bb
PB
7439 } else {
7440 rm = insn & 0xf;
86753403
PB
7441 switch (op1) {
7442 case 0: /* strex */
426f5abc 7443 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7444 break;
7445 case 1: /* strexd */
502e64fe 7446 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7447 break;
7448 case 2: /* strexb */
426f5abc 7449 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7450 break;
7451 case 3: /* strexh */
426f5abc 7452 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7453 break;
7454 default:
7455 abort();
7456 }
9ee6e8bb 7457 }
39d5492a 7458 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7459 } else {
7460 /* SWP instruction */
7461 rm = (insn) & 0xf;
7462
8984bd2e
PB
7463 /* ??? This is not really atomic. However we know
7464 we never have multiple CPUs running in parallel,
7465 so it is good enough. */
7466 addr = load_reg(s, rn);
7467 tmp = load_reg(s, rm);
5a839c0d 7468 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7469 if (insn & (1 << 22)) {
08307563
PM
7470 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
7471 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7472 } else {
08307563
PM
7473 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
7474 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7475 }
5a839c0d 7476 tcg_temp_free_i32(tmp);
7d1b0095 7477 tcg_temp_free_i32(addr);
8984bd2e 7478 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7479 }
7480 }
7481 } else {
7482 int address_offset;
7483 int load;
7484 /* Misc load/store */
7485 rn = (insn >> 16) & 0xf;
7486 rd = (insn >> 12) & 0xf;
b0109805 7487 addr = load_reg(s, rn);
9ee6e8bb 7488 if (insn & (1 << 24))
b0109805 7489 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7490 address_offset = 0;
7491 if (insn & (1 << 20)) {
7492 /* load */
5a839c0d 7493 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7494 switch(sh) {
7495 case 1:
08307563 7496 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7497 break;
7498 case 2:
08307563 7499 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7500 break;
7501 default:
7502 case 3:
08307563 7503 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7504 break;
7505 }
7506 load = 1;
7507 } else if (sh & 2) {
be5e7a76 7508 ARCH(5TE);
9ee6e8bb
PB
7509 /* doubleword */
7510 if (sh & 1) {
7511 /* store */
b0109805 7512 tmp = load_reg(s, rd);
08307563 7513 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7514 tcg_temp_free_i32(tmp);
b0109805
PB
7515 tcg_gen_addi_i32(addr, addr, 4);
7516 tmp = load_reg(s, rd + 1);
08307563 7517 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7518 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7519 load = 0;
7520 } else {
7521 /* load */
5a839c0d 7522 tmp = tcg_temp_new_i32();
08307563 7523 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7524 store_reg(s, rd, tmp);
7525 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7526 tmp = tcg_temp_new_i32();
08307563 7527 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7528 rd++;
7529 load = 1;
7530 }
7531 address_offset = -4;
7532 } else {
7533 /* store */
b0109805 7534 tmp = load_reg(s, rd);
08307563 7535 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 7536 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7537 load = 0;
7538 }
7539 /* Perform base writeback before the loaded value to
7540 ensure correct behavior with overlapping index registers.
7541 ldrd with base writeback is is undefined if the
7542 destination and index registers overlap. */
7543 if (!(insn & (1 << 24))) {
b0109805
PB
7544 gen_add_datah_offset(s, insn, address_offset, addr);
7545 store_reg(s, rn, addr);
9ee6e8bb
PB
7546 } else if (insn & (1 << 21)) {
7547 if (address_offset)
b0109805
PB
7548 tcg_gen_addi_i32(addr, addr, address_offset);
7549 store_reg(s, rn, addr);
7550 } else {
7d1b0095 7551 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7552 }
7553 if (load) {
7554 /* Complete the load. */
b0109805 7555 store_reg(s, rd, tmp);
9ee6e8bb
PB
7556 }
7557 }
7558 break;
7559 case 0x4:
7560 case 0x5:
7561 goto do_ldst;
7562 case 0x6:
7563 case 0x7:
7564 if (insn & (1 << 4)) {
7565 ARCH(6);
7566 /* Armv6 Media instructions. */
7567 rm = insn & 0xf;
7568 rn = (insn >> 16) & 0xf;
2c0262af 7569 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7570 rs = (insn >> 8) & 0xf;
7571 switch ((insn >> 23) & 3) {
7572 case 0: /* Parallel add/subtract. */
7573 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7574 tmp = load_reg(s, rn);
7575 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7576 sh = (insn >> 5) & 7;
7577 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7578 goto illegal_op;
6ddbc6e4 7579 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7580 tcg_temp_free_i32(tmp2);
6ddbc6e4 7581 store_reg(s, rd, tmp);
9ee6e8bb
PB
7582 break;
7583 case 1:
7584 if ((insn & 0x00700020) == 0) {
6c95676b 7585 /* Halfword pack. */
3670669c
PB
7586 tmp = load_reg(s, rn);
7587 tmp2 = load_reg(s, rm);
9ee6e8bb 7588 shift = (insn >> 7) & 0x1f;
3670669c
PB
7589 if (insn & (1 << 6)) {
7590 /* pkhtb */
22478e79
AZ
7591 if (shift == 0)
7592 shift = 31;
7593 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7594 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7595 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7596 } else {
7597 /* pkhbt */
22478e79
AZ
7598 if (shift)
7599 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7600 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7601 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7602 }
7603 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7604 tcg_temp_free_i32(tmp2);
3670669c 7605 store_reg(s, rd, tmp);
9ee6e8bb
PB
7606 } else if ((insn & 0x00200020) == 0x00200000) {
7607 /* [us]sat */
6ddbc6e4 7608 tmp = load_reg(s, rm);
9ee6e8bb
PB
7609 shift = (insn >> 7) & 0x1f;
7610 if (insn & (1 << 6)) {
7611 if (shift == 0)
7612 shift = 31;
6ddbc6e4 7613 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7614 } else {
6ddbc6e4 7615 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7616 }
7617 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7618 tmp2 = tcg_const_i32(sh);
7619 if (insn & (1 << 22))
9ef39277 7620 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7621 else
9ef39277 7622 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7623 tcg_temp_free_i32(tmp2);
6ddbc6e4 7624 store_reg(s, rd, tmp);
9ee6e8bb
PB
7625 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7626 /* [us]sat16 */
6ddbc6e4 7627 tmp = load_reg(s, rm);
9ee6e8bb 7628 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7629 tmp2 = tcg_const_i32(sh);
7630 if (insn & (1 << 22))
9ef39277 7631 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7632 else
9ef39277 7633 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7634 tcg_temp_free_i32(tmp2);
6ddbc6e4 7635 store_reg(s, rd, tmp);
9ee6e8bb
PB
7636 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7637 /* Select bytes. */
6ddbc6e4
PB
7638 tmp = load_reg(s, rn);
7639 tmp2 = load_reg(s, rm);
7d1b0095 7640 tmp3 = tcg_temp_new_i32();
0ecb72a5 7641 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7642 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7643 tcg_temp_free_i32(tmp3);
7644 tcg_temp_free_i32(tmp2);
6ddbc6e4 7645 store_reg(s, rd, tmp);
9ee6e8bb 7646 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7647 tmp = load_reg(s, rm);
9ee6e8bb 7648 shift = (insn >> 10) & 3;
1301f322 7649 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7650 rotate, a shift is sufficient. */
7651 if (shift != 0)
f669df27 7652 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7653 op1 = (insn >> 20) & 7;
7654 switch (op1) {
5e3f878a
PB
7655 case 0: gen_sxtb16(tmp); break;
7656 case 2: gen_sxtb(tmp); break;
7657 case 3: gen_sxth(tmp); break;
7658 case 4: gen_uxtb16(tmp); break;
7659 case 6: gen_uxtb(tmp); break;
7660 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7661 default: goto illegal_op;
7662 }
7663 if (rn != 15) {
5e3f878a 7664 tmp2 = load_reg(s, rn);
9ee6e8bb 7665 if ((op1 & 3) == 0) {
5e3f878a 7666 gen_add16(tmp, tmp2);
9ee6e8bb 7667 } else {
5e3f878a 7668 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7669 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7670 }
7671 }
6c95676b 7672 store_reg(s, rd, tmp);
9ee6e8bb
PB
7673 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7674 /* rev */
b0109805 7675 tmp = load_reg(s, rm);
9ee6e8bb
PB
7676 if (insn & (1 << 22)) {
7677 if (insn & (1 << 7)) {
b0109805 7678 gen_revsh(tmp);
9ee6e8bb
PB
7679 } else {
7680 ARCH(6T2);
b0109805 7681 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7682 }
7683 } else {
7684 if (insn & (1 << 7))
b0109805 7685 gen_rev16(tmp);
9ee6e8bb 7686 else
66896cb8 7687 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7688 }
b0109805 7689 store_reg(s, rd, tmp);
9ee6e8bb
PB
7690 } else {
7691 goto illegal_op;
7692 }
7693 break;
7694 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7695 switch ((insn >> 20) & 0x7) {
7696 case 5:
7697 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7698 /* op2 not 00x or 11x : UNDEF */
7699 goto illegal_op;
7700 }
838fa72d
AJ
7701 /* Signed multiply most significant [accumulate].
7702 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7703 tmp = load_reg(s, rm);
7704 tmp2 = load_reg(s, rs);
a7812ae4 7705 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7706
955a7dd5 7707 if (rd != 15) {
838fa72d 7708 tmp = load_reg(s, rd);
9ee6e8bb 7709 if (insn & (1 << 6)) {
838fa72d 7710 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7711 } else {
838fa72d 7712 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7713 }
7714 }
838fa72d
AJ
7715 if (insn & (1 << 5)) {
7716 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7717 }
7718 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7719 tmp = tcg_temp_new_i32();
838fa72d
AJ
7720 tcg_gen_trunc_i64_i32(tmp, tmp64);
7721 tcg_temp_free_i64(tmp64);
955a7dd5 7722 store_reg(s, rn, tmp);
41e9564d
PM
7723 break;
7724 case 0:
7725 case 4:
7726 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7727 if (insn & (1 << 7)) {
7728 goto illegal_op;
7729 }
7730 tmp = load_reg(s, rm);
7731 tmp2 = load_reg(s, rs);
9ee6e8bb 7732 if (insn & (1 << 5))
5e3f878a
PB
7733 gen_swap_half(tmp2);
7734 gen_smul_dual(tmp, tmp2);
5e3f878a 7735 if (insn & (1 << 6)) {
e1d177b9 7736 /* This subtraction cannot overflow. */
5e3f878a
PB
7737 tcg_gen_sub_i32(tmp, tmp, tmp2);
7738 } else {
e1d177b9
PM
7739 /* This addition cannot overflow 32 bits;
7740 * however it may overflow considered as a signed
7741 * operation, in which case we must set the Q flag.
7742 */
9ef39277 7743 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7744 }
7d1b0095 7745 tcg_temp_free_i32(tmp2);
9ee6e8bb 7746 if (insn & (1 << 22)) {
5e3f878a 7747 /* smlald, smlsld */
a7812ae4
PB
7748 tmp64 = tcg_temp_new_i64();
7749 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7750 tcg_temp_free_i32(tmp);
a7812ae4
PB
7751 gen_addq(s, tmp64, rd, rn);
7752 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7753 tcg_temp_free_i64(tmp64);
9ee6e8bb 7754 } else {
5e3f878a 7755 /* smuad, smusd, smlad, smlsd */
22478e79 7756 if (rd != 15)
9ee6e8bb 7757 {
22478e79 7758 tmp2 = load_reg(s, rd);
9ef39277 7759 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7760 tcg_temp_free_i32(tmp2);
9ee6e8bb 7761 }
22478e79 7762 store_reg(s, rn, tmp);
9ee6e8bb 7763 }
41e9564d 7764 break;
b8b8ea05
PM
7765 case 1:
7766 case 3:
7767 /* SDIV, UDIV */
7768 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7769 goto illegal_op;
7770 }
7771 if (((insn >> 5) & 7) || (rd != 15)) {
7772 goto illegal_op;
7773 }
7774 tmp = load_reg(s, rm);
7775 tmp2 = load_reg(s, rs);
7776 if (insn & (1 << 21)) {
7777 gen_helper_udiv(tmp, tmp, tmp2);
7778 } else {
7779 gen_helper_sdiv(tmp, tmp, tmp2);
7780 }
7781 tcg_temp_free_i32(tmp2);
7782 store_reg(s, rn, tmp);
7783 break;
41e9564d
PM
7784 default:
7785 goto illegal_op;
9ee6e8bb
PB
7786 }
7787 break;
7788 case 3:
7789 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7790 switch (op1) {
7791 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7792 ARCH(6);
7793 tmp = load_reg(s, rm);
7794 tmp2 = load_reg(s, rs);
7795 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7796 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7797 if (rd != 15) {
7798 tmp2 = load_reg(s, rd);
6ddbc6e4 7799 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7800 tcg_temp_free_i32(tmp2);
9ee6e8bb 7801 }
ded9d295 7802 store_reg(s, rn, tmp);
9ee6e8bb
PB
7803 break;
7804 case 0x20: case 0x24: case 0x28: case 0x2c:
7805 /* Bitfield insert/clear. */
7806 ARCH(6T2);
7807 shift = (insn >> 7) & 0x1f;
7808 i = (insn >> 16) & 0x1f;
7809 i = i + 1 - shift;
7810 if (rm == 15) {
7d1b0095 7811 tmp = tcg_temp_new_i32();
5e3f878a 7812 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7813 } else {
5e3f878a 7814 tmp = load_reg(s, rm);
9ee6e8bb
PB
7815 }
7816 if (i != 32) {
5e3f878a 7817 tmp2 = load_reg(s, rd);
d593c48e 7818 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7819 tcg_temp_free_i32(tmp2);
9ee6e8bb 7820 }
5e3f878a 7821 store_reg(s, rd, tmp);
9ee6e8bb
PB
7822 break;
7823 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7824 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7825 ARCH(6T2);
5e3f878a 7826 tmp = load_reg(s, rm);
9ee6e8bb
PB
7827 shift = (insn >> 7) & 0x1f;
7828 i = ((insn >> 16) & 0x1f) + 1;
7829 if (shift + i > 32)
7830 goto illegal_op;
7831 if (i < 32) {
7832 if (op1 & 0x20) {
5e3f878a 7833 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7834 } else {
5e3f878a 7835 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7836 }
7837 }
5e3f878a 7838 store_reg(s, rd, tmp);
9ee6e8bb
PB
7839 break;
7840 default:
7841 goto illegal_op;
7842 }
7843 break;
7844 }
7845 break;
7846 }
7847 do_ldst:
7848 /* Check for undefined extension instructions
7849 * per the ARM Bible IE:
7850 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7851 */
7852 sh = (0xf << 20) | (0xf << 4);
7853 if (op1 == 0x7 && ((insn & sh) == sh))
7854 {
7855 goto illegal_op;
7856 }
7857 /* load/store byte/word */
7858 rn = (insn >> 16) & 0xf;
7859 rd = (insn >> 12) & 0xf;
b0109805 7860 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7861 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7862 if (insn & (1 << 24))
b0109805 7863 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7864 if (insn & (1 << 20)) {
7865 /* load */
5a839c0d 7866 tmp = tcg_temp_new_i32();
9ee6e8bb 7867 if (insn & (1 << 22)) {
08307563 7868 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 7869 } else {
08307563 7870 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 7871 }
9ee6e8bb
PB
7872 } else {
7873 /* store */
b0109805 7874 tmp = load_reg(s, rd);
5a839c0d 7875 if (insn & (1 << 22)) {
08307563 7876 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 7877 } else {
08307563 7878 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
7879 }
7880 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7881 }
7882 if (!(insn & (1 << 24))) {
b0109805
PB
7883 gen_add_data_offset(s, insn, tmp2);
7884 store_reg(s, rn, tmp2);
7885 } else if (insn & (1 << 21)) {
7886 store_reg(s, rn, tmp2);
7887 } else {
7d1b0095 7888 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7889 }
7890 if (insn & (1 << 20)) {
7891 /* Complete the load. */
be5e7a76 7892 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7893 }
7894 break;
7895 case 0x08:
7896 case 0x09:
7897 {
7898 int j, n, user, loaded_base;
39d5492a 7899 TCGv_i32 loaded_var;
9ee6e8bb
PB
7900 /* load/store multiple words */
7901 /* XXX: store correct base if write back */
7902 user = 0;
7903 if (insn & (1 << 22)) {
7904 if (IS_USER(s))
7905 goto illegal_op; /* only usable in supervisor mode */
7906
7907 if ((insn & (1 << 15)) == 0)
7908 user = 1;
7909 }
7910 rn = (insn >> 16) & 0xf;
b0109805 7911 addr = load_reg(s, rn);
9ee6e8bb
PB
7912
7913 /* compute total size */
7914 loaded_base = 0;
39d5492a 7915 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7916 n = 0;
7917 for(i=0;i<16;i++) {
7918 if (insn & (1 << i))
7919 n++;
7920 }
7921 /* XXX: test invalid n == 0 case ? */
7922 if (insn & (1 << 23)) {
7923 if (insn & (1 << 24)) {
7924 /* pre increment */
b0109805 7925 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7926 } else {
7927 /* post increment */
7928 }
7929 } else {
7930 if (insn & (1 << 24)) {
7931 /* pre decrement */
b0109805 7932 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7933 } else {
7934 /* post decrement */
7935 if (n != 1)
b0109805 7936 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7937 }
7938 }
7939 j = 0;
7940 for(i=0;i<16;i++) {
7941 if (insn & (1 << i)) {
7942 if (insn & (1 << 20)) {
7943 /* load */
5a839c0d 7944 tmp = tcg_temp_new_i32();
08307563 7945 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7946 if (user) {
b75263d6 7947 tmp2 = tcg_const_i32(i);
1ce94f81 7948 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7949 tcg_temp_free_i32(tmp2);
7d1b0095 7950 tcg_temp_free_i32(tmp);
9ee6e8bb 7951 } else if (i == rn) {
b0109805 7952 loaded_var = tmp;
9ee6e8bb
PB
7953 loaded_base = 1;
7954 } else {
be5e7a76 7955 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7956 }
7957 } else {
7958 /* store */
7959 if (i == 15) {
7960 /* special case: r15 = PC + 8 */
7961 val = (long)s->pc + 4;
7d1b0095 7962 tmp = tcg_temp_new_i32();
b0109805 7963 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7964 } else if (user) {
7d1b0095 7965 tmp = tcg_temp_new_i32();
b75263d6 7966 tmp2 = tcg_const_i32(i);
9ef39277 7967 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7968 tcg_temp_free_i32(tmp2);
9ee6e8bb 7969 } else {
b0109805 7970 tmp = load_reg(s, i);
9ee6e8bb 7971 }
08307563 7972 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7973 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7974 }
7975 j++;
7976 /* no need to add after the last transfer */
7977 if (j != n)
b0109805 7978 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7979 }
7980 }
7981 if (insn & (1 << 21)) {
7982 /* write back */
7983 if (insn & (1 << 23)) {
7984 if (insn & (1 << 24)) {
7985 /* pre increment */
7986 } else {
7987 /* post increment */
b0109805 7988 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7989 }
7990 } else {
7991 if (insn & (1 << 24)) {
7992 /* pre decrement */
7993 if (n != 1)
b0109805 7994 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7995 } else {
7996 /* post decrement */
b0109805 7997 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7998 }
7999 }
b0109805
PB
8000 store_reg(s, rn, addr);
8001 } else {
7d1b0095 8002 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8003 }
8004 if (loaded_base) {
b0109805 8005 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8006 }
8007 if ((insn & (1 << 22)) && !user) {
8008 /* Restore CPSR from SPSR. */
d9ba4830
PB
8009 tmp = load_cpu_field(spsr);
8010 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8011 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8012 s->is_jmp = DISAS_UPDATE;
8013 }
8014 }
8015 break;
8016 case 0xa:
8017 case 0xb:
8018 {
8019 int32_t offset;
8020
8021 /* branch (and link) */
8022 val = (int32_t)s->pc;
8023 if (insn & (1 << 24)) {
7d1b0095 8024 tmp = tcg_temp_new_i32();
5e3f878a
PB
8025 tcg_gen_movi_i32(tmp, val);
8026 store_reg(s, 14, tmp);
9ee6e8bb 8027 }
534df156
PM
8028 offset = sextract32(insn << 2, 0, 26);
8029 val += offset + 4;
9ee6e8bb
PB
8030 gen_jmp(s, val);
8031 }
8032 break;
8033 case 0xc:
8034 case 0xd:
8035 case 0xe:
8036 /* Coprocessor. */
8037 if (disas_coproc_insn(env, s, insn))
8038 goto illegal_op;
8039 break;
8040 case 0xf:
8041 /* swi */
eaed129d 8042 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
8043 s->is_jmp = DISAS_SWI;
8044 break;
8045 default:
8046 illegal_op:
bc4a0de0 8047 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8048 break;
8049 }
8050 }
8051}
8052
8053/* Return true if this is a Thumb-2 logical op. */
8054static int
8055thumb2_logic_op(int op)
8056{
8057 return (op < 8);
8058}
8059
8060/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8061 then set condition code flags based on the result of the operation.
8062 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8063 to the high bit of T1.
8064 Returns zero if the opcode is valid. */
8065
8066static int
39d5492a
PM
8067gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8068 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8069{
8070 int logic_cc;
8071
8072 logic_cc = 0;
8073 switch (op) {
8074 case 0: /* and */
396e467c 8075 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8076 logic_cc = conds;
8077 break;
8078 case 1: /* bic */
f669df27 8079 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8080 logic_cc = conds;
8081 break;
8082 case 2: /* orr */
396e467c 8083 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8084 logic_cc = conds;
8085 break;
8086 case 3: /* orn */
29501f1b 8087 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8088 logic_cc = conds;
8089 break;
8090 case 4: /* eor */
396e467c 8091 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8092 logic_cc = conds;
8093 break;
8094 case 8: /* add */
8095 if (conds)
72485ec4 8096 gen_add_CC(t0, t0, t1);
9ee6e8bb 8097 else
396e467c 8098 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8099 break;
8100 case 10: /* adc */
8101 if (conds)
49b4c31e 8102 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8103 else
396e467c 8104 gen_adc(t0, t1);
9ee6e8bb
PB
8105 break;
8106 case 11: /* sbc */
2de68a49
RH
8107 if (conds) {
8108 gen_sbc_CC(t0, t0, t1);
8109 } else {
396e467c 8110 gen_sub_carry(t0, t0, t1);
2de68a49 8111 }
9ee6e8bb
PB
8112 break;
8113 case 13: /* sub */
8114 if (conds)
72485ec4 8115 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8116 else
396e467c 8117 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8118 break;
8119 case 14: /* rsb */
8120 if (conds)
72485ec4 8121 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8122 else
396e467c 8123 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8124 break;
8125 default: /* 5, 6, 7, 9, 12, 15. */
8126 return 1;
8127 }
8128 if (logic_cc) {
396e467c 8129 gen_logic_CC(t0);
9ee6e8bb 8130 if (shifter_out)
396e467c 8131 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8132 }
8133 return 0;
8134}
8135
8136/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8137 is not legal. */
0ecb72a5 8138static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8139{
b0109805 8140 uint32_t insn, imm, shift, offset;
9ee6e8bb 8141 uint32_t rd, rn, rm, rs;
39d5492a
PM
8142 TCGv_i32 tmp;
8143 TCGv_i32 tmp2;
8144 TCGv_i32 tmp3;
8145 TCGv_i32 addr;
a7812ae4 8146 TCGv_i64 tmp64;
9ee6e8bb
PB
8147 int op;
8148 int shiftop;
8149 int conds;
8150 int logic_cc;
8151
8152 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8153 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8154 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8155 16-bit instructions to get correct prefetch abort behavior. */
8156 insn = insn_hw1;
8157 if ((insn & (1 << 12)) == 0) {
be5e7a76 8158 ARCH(5);
9ee6e8bb
PB
8159 /* Second half of blx. */
8160 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8161 tmp = load_reg(s, 14);
8162 tcg_gen_addi_i32(tmp, tmp, offset);
8163 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8164
7d1b0095 8165 tmp2 = tcg_temp_new_i32();
b0109805 8166 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8167 store_reg(s, 14, tmp2);
8168 gen_bx(s, tmp);
9ee6e8bb
PB
8169 return 0;
8170 }
8171 if (insn & (1 << 11)) {
8172 /* Second half of bl. */
8173 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8174 tmp = load_reg(s, 14);
6a0d8a1d 8175 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8176
7d1b0095 8177 tmp2 = tcg_temp_new_i32();
b0109805 8178 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8179 store_reg(s, 14, tmp2);
8180 gen_bx(s, tmp);
9ee6e8bb
PB
8181 return 0;
8182 }
8183 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8184 /* Instruction spans a page boundary. Implement it as two
8185 16-bit instructions in case the second half causes an
8186 prefetch abort. */
8187 offset = ((int32_t)insn << 21) >> 9;
396e467c 8188 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8189 return 0;
8190 }
8191 /* Fall through to 32-bit decode. */
8192 }
8193
d31dd73e 8194 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8195 s->pc += 2;
8196 insn |= (uint32_t)insn_hw1 << 16;
8197
8198 if ((insn & 0xf800e800) != 0xf000e800) {
8199 ARCH(6T2);
8200 }
8201
8202 rn = (insn >> 16) & 0xf;
8203 rs = (insn >> 12) & 0xf;
8204 rd = (insn >> 8) & 0xf;
8205 rm = insn & 0xf;
8206 switch ((insn >> 25) & 0xf) {
8207 case 0: case 1: case 2: case 3:
8208 /* 16-bit instructions. Should never happen. */
8209 abort();
8210 case 4:
8211 if (insn & (1 << 22)) {
8212 /* Other load/store, table branch. */
8213 if (insn & 0x01200000) {
8214 /* Load/store doubleword. */
8215 if (rn == 15) {
7d1b0095 8216 addr = tcg_temp_new_i32();
b0109805 8217 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8218 } else {
b0109805 8219 addr = load_reg(s, rn);
9ee6e8bb
PB
8220 }
8221 offset = (insn & 0xff) * 4;
8222 if ((insn & (1 << 23)) == 0)
8223 offset = -offset;
8224 if (insn & (1 << 24)) {
b0109805 8225 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8226 offset = 0;
8227 }
8228 if (insn & (1 << 20)) {
8229 /* ldrd */
e2592fad 8230 tmp = tcg_temp_new_i32();
08307563 8231 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8232 store_reg(s, rs, tmp);
8233 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8234 tmp = tcg_temp_new_i32();
08307563 8235 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8236 store_reg(s, rd, tmp);
9ee6e8bb
PB
8237 } else {
8238 /* strd */
b0109805 8239 tmp = load_reg(s, rs);
08307563 8240 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8241 tcg_temp_free_i32(tmp);
b0109805
PB
8242 tcg_gen_addi_i32(addr, addr, 4);
8243 tmp = load_reg(s, rd);
08307563 8244 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8245 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8246 }
8247 if (insn & (1 << 21)) {
8248 /* Base writeback. */
8249 if (rn == 15)
8250 goto illegal_op;
b0109805
PB
8251 tcg_gen_addi_i32(addr, addr, offset - 4);
8252 store_reg(s, rn, addr);
8253 } else {
7d1b0095 8254 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8255 }
8256 } else if ((insn & (1 << 23)) == 0) {
8257 /* Load/store exclusive word. */
39d5492a 8258 addr = tcg_temp_local_new_i32();
98a46317 8259 load_reg_var(s, addr, rn);
426f5abc 8260 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8261 if (insn & (1 << 20)) {
426f5abc 8262 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8263 } else {
426f5abc 8264 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8265 }
39d5492a 8266 tcg_temp_free_i32(addr);
2359bf80 8267 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8268 /* Table Branch. */
8269 if (rn == 15) {
7d1b0095 8270 addr = tcg_temp_new_i32();
b0109805 8271 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8272 } else {
b0109805 8273 addr = load_reg(s, rn);
9ee6e8bb 8274 }
b26eefb6 8275 tmp = load_reg(s, rm);
b0109805 8276 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8277 if (insn & (1 << 4)) {
8278 /* tbh */
b0109805 8279 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8280 tcg_temp_free_i32(tmp);
e2592fad 8281 tmp = tcg_temp_new_i32();
08307563 8282 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8283 } else { /* tbb */
7d1b0095 8284 tcg_temp_free_i32(tmp);
e2592fad 8285 tmp = tcg_temp_new_i32();
08307563 8286 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8287 }
7d1b0095 8288 tcg_temp_free_i32(addr);
b0109805
PB
8289 tcg_gen_shli_i32(tmp, tmp, 1);
8290 tcg_gen_addi_i32(tmp, tmp, s->pc);
8291 store_reg(s, 15, tmp);
9ee6e8bb 8292 } else {
2359bf80 8293 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8294 op = (insn >> 4) & 0x3;
2359bf80
MR
8295 switch (op2) {
8296 case 0:
426f5abc 8297 goto illegal_op;
2359bf80
MR
8298 case 1:
8299 /* Load/store exclusive byte/halfword/doubleword */
8300 if (op == 2) {
8301 goto illegal_op;
8302 }
8303 ARCH(7);
8304 break;
8305 case 2:
8306 /* Load-acquire/store-release */
8307 if (op == 3) {
8308 goto illegal_op;
8309 }
8310 /* Fall through */
8311 case 3:
8312 /* Load-acquire/store-release exclusive */
8313 ARCH(8);
8314 break;
426f5abc 8315 }
39d5492a 8316 addr = tcg_temp_local_new_i32();
98a46317 8317 load_reg_var(s, addr, rn);
2359bf80
MR
8318 if (!(op2 & 1)) {
8319 if (insn & (1 << 20)) {
8320 tmp = tcg_temp_new_i32();
8321 switch (op) {
8322 case 0: /* ldab */
08307563 8323 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8324 break;
8325 case 1: /* ldah */
08307563 8326 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8327 break;
8328 case 2: /* lda */
08307563 8329 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8330 break;
8331 default:
8332 abort();
8333 }
8334 store_reg(s, rs, tmp);
8335 } else {
8336 tmp = load_reg(s, rs);
8337 switch (op) {
8338 case 0: /* stlb */
08307563 8339 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8340 break;
8341 case 1: /* stlh */
08307563 8342 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8343 break;
8344 case 2: /* stl */
08307563 8345 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8346 break;
8347 default:
8348 abort();
8349 }
8350 tcg_temp_free_i32(tmp);
8351 }
8352 } else if (insn & (1 << 20)) {
426f5abc 8353 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8354 } else {
426f5abc 8355 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8356 }
39d5492a 8357 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8358 }
8359 } else {
8360 /* Load/store multiple, RFE, SRS. */
8361 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8362 /* RFE, SRS: not available in user mode or on M profile */
8363 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8364 goto illegal_op;
00115976 8365 }
9ee6e8bb
PB
8366 if (insn & (1 << 20)) {
8367 /* rfe */
b0109805
PB
8368 addr = load_reg(s, rn);
8369 if ((insn & (1 << 24)) == 0)
8370 tcg_gen_addi_i32(addr, addr, -8);
8371 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8372 tmp = tcg_temp_new_i32();
08307563 8373 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8374 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8375 tmp2 = tcg_temp_new_i32();
08307563 8376 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8377 if (insn & (1 << 21)) {
8378 /* Base writeback. */
b0109805
PB
8379 if (insn & (1 << 24)) {
8380 tcg_gen_addi_i32(addr, addr, 4);
8381 } else {
8382 tcg_gen_addi_i32(addr, addr, -4);
8383 }
8384 store_reg(s, rn, addr);
8385 } else {
7d1b0095 8386 tcg_temp_free_i32(addr);
9ee6e8bb 8387 }
b0109805 8388 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8389 } else {
8390 /* srs */
81465888
PM
8391 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8392 insn & (1 << 21));
9ee6e8bb
PB
8393 }
8394 } else {
5856d44e 8395 int i, loaded_base = 0;
39d5492a 8396 TCGv_i32 loaded_var;
9ee6e8bb 8397 /* Load/store multiple. */
b0109805 8398 addr = load_reg(s, rn);
9ee6e8bb
PB
8399 offset = 0;
8400 for (i = 0; i < 16; i++) {
8401 if (insn & (1 << i))
8402 offset += 4;
8403 }
8404 if (insn & (1 << 24)) {
b0109805 8405 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8406 }
8407
39d5492a 8408 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8409 for (i = 0; i < 16; i++) {
8410 if ((insn & (1 << i)) == 0)
8411 continue;
8412 if (insn & (1 << 20)) {
8413 /* Load. */
e2592fad 8414 tmp = tcg_temp_new_i32();
08307563 8415 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8416 if (i == 15) {
b0109805 8417 gen_bx(s, tmp);
5856d44e
YO
8418 } else if (i == rn) {
8419 loaded_var = tmp;
8420 loaded_base = 1;
9ee6e8bb 8421 } else {
b0109805 8422 store_reg(s, i, tmp);
9ee6e8bb
PB
8423 }
8424 } else {
8425 /* Store. */
b0109805 8426 tmp = load_reg(s, i);
08307563 8427 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8428 tcg_temp_free_i32(tmp);
9ee6e8bb 8429 }
b0109805 8430 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8431 }
5856d44e
YO
8432 if (loaded_base) {
8433 store_reg(s, rn, loaded_var);
8434 }
9ee6e8bb
PB
8435 if (insn & (1 << 21)) {
8436 /* Base register writeback. */
8437 if (insn & (1 << 24)) {
b0109805 8438 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8439 }
8440 /* Fault if writeback register is in register list. */
8441 if (insn & (1 << rn))
8442 goto illegal_op;
b0109805
PB
8443 store_reg(s, rn, addr);
8444 } else {
7d1b0095 8445 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8446 }
8447 }
8448 }
8449 break;
2af9ab77
JB
8450 case 5:
8451
9ee6e8bb 8452 op = (insn >> 21) & 0xf;
2af9ab77
JB
8453 if (op == 6) {
8454 /* Halfword pack. */
8455 tmp = load_reg(s, rn);
8456 tmp2 = load_reg(s, rm);
8457 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8458 if (insn & (1 << 5)) {
8459 /* pkhtb */
8460 if (shift == 0)
8461 shift = 31;
8462 tcg_gen_sari_i32(tmp2, tmp2, shift);
8463 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8464 tcg_gen_ext16u_i32(tmp2, tmp2);
8465 } else {
8466 /* pkhbt */
8467 if (shift)
8468 tcg_gen_shli_i32(tmp2, tmp2, shift);
8469 tcg_gen_ext16u_i32(tmp, tmp);
8470 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8471 }
8472 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8473 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8474 store_reg(s, rd, tmp);
8475 } else {
2af9ab77
JB
8476 /* Data processing register constant shift. */
8477 if (rn == 15) {
7d1b0095 8478 tmp = tcg_temp_new_i32();
2af9ab77
JB
8479 tcg_gen_movi_i32(tmp, 0);
8480 } else {
8481 tmp = load_reg(s, rn);
8482 }
8483 tmp2 = load_reg(s, rm);
8484
8485 shiftop = (insn >> 4) & 3;
8486 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8487 conds = (insn & (1 << 20)) != 0;
8488 logic_cc = (conds && thumb2_logic_op(op));
8489 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8490 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8491 goto illegal_op;
7d1b0095 8492 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8493 if (rd != 15) {
8494 store_reg(s, rd, tmp);
8495 } else {
7d1b0095 8496 tcg_temp_free_i32(tmp);
2af9ab77 8497 }
3174f8e9 8498 }
9ee6e8bb
PB
8499 break;
8500 case 13: /* Misc data processing. */
8501 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8502 if (op < 4 && (insn & 0xf000) != 0xf000)
8503 goto illegal_op;
8504 switch (op) {
8505 case 0: /* Register controlled shift. */
8984bd2e
PB
8506 tmp = load_reg(s, rn);
8507 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8508 if ((insn & 0x70) != 0)
8509 goto illegal_op;
8510 op = (insn >> 21) & 3;
8984bd2e
PB
8511 logic_cc = (insn & (1 << 20)) != 0;
8512 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8513 if (logic_cc)
8514 gen_logic_CC(tmp);
21aeb343 8515 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8516 break;
8517 case 1: /* Sign/zero extend. */
5e3f878a 8518 tmp = load_reg(s, rm);
9ee6e8bb 8519 shift = (insn >> 4) & 3;
1301f322 8520 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8521 rotate, a shift is sufficient. */
8522 if (shift != 0)
f669df27 8523 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8524 op = (insn >> 20) & 7;
8525 switch (op) {
5e3f878a
PB
8526 case 0: gen_sxth(tmp); break;
8527 case 1: gen_uxth(tmp); break;
8528 case 2: gen_sxtb16(tmp); break;
8529 case 3: gen_uxtb16(tmp); break;
8530 case 4: gen_sxtb(tmp); break;
8531 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8532 default: goto illegal_op;
8533 }
8534 if (rn != 15) {
5e3f878a 8535 tmp2 = load_reg(s, rn);
9ee6e8bb 8536 if ((op >> 1) == 1) {
5e3f878a 8537 gen_add16(tmp, tmp2);
9ee6e8bb 8538 } else {
5e3f878a 8539 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8540 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8541 }
8542 }
5e3f878a 8543 store_reg(s, rd, tmp);
9ee6e8bb
PB
8544 break;
8545 case 2: /* SIMD add/subtract. */
8546 op = (insn >> 20) & 7;
8547 shift = (insn >> 4) & 7;
8548 if ((op & 3) == 3 || (shift & 3) == 3)
8549 goto illegal_op;
6ddbc6e4
PB
8550 tmp = load_reg(s, rn);
8551 tmp2 = load_reg(s, rm);
8552 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8553 tcg_temp_free_i32(tmp2);
6ddbc6e4 8554 store_reg(s, rd, tmp);
9ee6e8bb
PB
8555 break;
8556 case 3: /* Other data processing. */
8557 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8558 if (op < 4) {
8559 /* Saturating add/subtract. */
d9ba4830
PB
8560 tmp = load_reg(s, rn);
8561 tmp2 = load_reg(s, rm);
9ee6e8bb 8562 if (op & 1)
9ef39277 8563 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8564 if (op & 2)
9ef39277 8565 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8566 else
9ef39277 8567 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8568 tcg_temp_free_i32(tmp2);
9ee6e8bb 8569 } else {
d9ba4830 8570 tmp = load_reg(s, rn);
9ee6e8bb
PB
8571 switch (op) {
8572 case 0x0a: /* rbit */
d9ba4830 8573 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8574 break;
8575 case 0x08: /* rev */
66896cb8 8576 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8577 break;
8578 case 0x09: /* rev16 */
d9ba4830 8579 gen_rev16(tmp);
9ee6e8bb
PB
8580 break;
8581 case 0x0b: /* revsh */
d9ba4830 8582 gen_revsh(tmp);
9ee6e8bb
PB
8583 break;
8584 case 0x10: /* sel */
d9ba4830 8585 tmp2 = load_reg(s, rm);
7d1b0095 8586 tmp3 = tcg_temp_new_i32();
0ecb72a5 8587 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8588 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8589 tcg_temp_free_i32(tmp3);
8590 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8591 break;
8592 case 0x18: /* clz */
d9ba4830 8593 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8594 break;
8595 default:
8596 goto illegal_op;
8597 }
8598 }
d9ba4830 8599 store_reg(s, rd, tmp);
9ee6e8bb
PB
8600 break;
8601 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8602 op = (insn >> 4) & 0xf;
d9ba4830
PB
8603 tmp = load_reg(s, rn);
8604 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8605 switch ((insn >> 20) & 7) {
8606 case 0: /* 32 x 32 -> 32 */
d9ba4830 8607 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8608 tcg_temp_free_i32(tmp2);
9ee6e8bb 8609 if (rs != 15) {
d9ba4830 8610 tmp2 = load_reg(s, rs);
9ee6e8bb 8611 if (op)
d9ba4830 8612 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8613 else
d9ba4830 8614 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8615 tcg_temp_free_i32(tmp2);
9ee6e8bb 8616 }
9ee6e8bb
PB
8617 break;
8618 case 1: /* 16 x 16 -> 32 */
d9ba4830 8619 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8620 tcg_temp_free_i32(tmp2);
9ee6e8bb 8621 if (rs != 15) {
d9ba4830 8622 tmp2 = load_reg(s, rs);
9ef39277 8623 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8624 tcg_temp_free_i32(tmp2);
9ee6e8bb 8625 }
9ee6e8bb
PB
8626 break;
8627 case 2: /* Dual multiply add. */
8628 case 4: /* Dual multiply subtract. */
8629 if (op)
d9ba4830
PB
8630 gen_swap_half(tmp2);
8631 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8632 if (insn & (1 << 22)) {
e1d177b9 8633 /* This subtraction cannot overflow. */
d9ba4830 8634 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8635 } else {
e1d177b9
PM
8636 /* This addition cannot overflow 32 bits;
8637 * however it may overflow considered as a signed
8638 * operation, in which case we must set the Q flag.
8639 */
9ef39277 8640 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8641 }
7d1b0095 8642 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8643 if (rs != 15)
8644 {
d9ba4830 8645 tmp2 = load_reg(s, rs);
9ef39277 8646 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8647 tcg_temp_free_i32(tmp2);
9ee6e8bb 8648 }
9ee6e8bb
PB
8649 break;
8650 case 3: /* 32 * 16 -> 32msb */
8651 if (op)
d9ba4830 8652 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8653 else
d9ba4830 8654 gen_sxth(tmp2);
a7812ae4
PB
8655 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8656 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8657 tmp = tcg_temp_new_i32();
a7812ae4 8658 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8659 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8660 if (rs != 15)
8661 {
d9ba4830 8662 tmp2 = load_reg(s, rs);
9ef39277 8663 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8664 tcg_temp_free_i32(tmp2);
9ee6e8bb 8665 }
9ee6e8bb 8666 break;
838fa72d
AJ
8667 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8668 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8669 if (rs != 15) {
838fa72d
AJ
8670 tmp = load_reg(s, rs);
8671 if (insn & (1 << 20)) {
8672 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8673 } else {
838fa72d 8674 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8675 }
2c0262af 8676 }
838fa72d
AJ
8677 if (insn & (1 << 4)) {
8678 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8679 }
8680 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8681 tmp = tcg_temp_new_i32();
838fa72d
AJ
8682 tcg_gen_trunc_i64_i32(tmp, tmp64);
8683 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8684 break;
8685 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8686 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8687 tcg_temp_free_i32(tmp2);
9ee6e8bb 8688 if (rs != 15) {
d9ba4830
PB
8689 tmp2 = load_reg(s, rs);
8690 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8691 tcg_temp_free_i32(tmp2);
5fd46862 8692 }
9ee6e8bb 8693 break;
2c0262af 8694 }
d9ba4830 8695 store_reg(s, rd, tmp);
2c0262af 8696 break;
9ee6e8bb
PB
8697 case 6: case 7: /* 64-bit multiply, Divide. */
8698 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8699 tmp = load_reg(s, rn);
8700 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8701 if ((op & 0x50) == 0x10) {
8702 /* sdiv, udiv */
47789990 8703 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8704 goto illegal_op;
47789990 8705 }
9ee6e8bb 8706 if (op & 0x20)
5e3f878a 8707 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8708 else
5e3f878a 8709 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8710 tcg_temp_free_i32(tmp2);
5e3f878a 8711 store_reg(s, rd, tmp);
9ee6e8bb
PB
8712 } else if ((op & 0xe) == 0xc) {
8713 /* Dual multiply accumulate long. */
8714 if (op & 1)
5e3f878a
PB
8715 gen_swap_half(tmp2);
8716 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8717 if (op & 0x10) {
5e3f878a 8718 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8719 } else {
5e3f878a 8720 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8721 }
7d1b0095 8722 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8723 /* BUGFIX */
8724 tmp64 = tcg_temp_new_i64();
8725 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8726 tcg_temp_free_i32(tmp);
a7812ae4
PB
8727 gen_addq(s, tmp64, rs, rd);
8728 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8729 tcg_temp_free_i64(tmp64);
2c0262af 8730 } else {
9ee6e8bb
PB
8731 if (op & 0x20) {
8732 /* Unsigned 64-bit multiply */
a7812ae4 8733 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8734 } else {
9ee6e8bb
PB
8735 if (op & 8) {
8736 /* smlalxy */
5e3f878a 8737 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8738 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8739 tmp64 = tcg_temp_new_i64();
8740 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8741 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8742 } else {
8743 /* Signed 64-bit multiply */
a7812ae4 8744 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8745 }
b5ff1b31 8746 }
9ee6e8bb
PB
8747 if (op & 4) {
8748 /* umaal */
a7812ae4
PB
8749 gen_addq_lo(s, tmp64, rs);
8750 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8751 } else if (op & 0x40) {
8752 /* 64-bit accumulate. */
a7812ae4 8753 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8754 }
a7812ae4 8755 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8756 tcg_temp_free_i64(tmp64);
5fd46862 8757 }
2c0262af 8758 break;
9ee6e8bb
PB
8759 }
8760 break;
8761 case 6: case 7: case 14: case 15:
8762 /* Coprocessor. */
8763 if (((insn >> 24) & 3) == 3) {
8764 /* Translate into the equivalent ARM encoding. */
f06053e3 8765 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8766 if (disas_neon_data_insn(env, s, insn))
8767 goto illegal_op;
8768 } else {
8769 if (insn & (1 << 28))
8770 goto illegal_op;
8771 if (disas_coproc_insn (env, s, insn))
8772 goto illegal_op;
8773 }
8774 break;
8775 case 8: case 9: case 10: case 11:
8776 if (insn & (1 << 15)) {
8777 /* Branches, misc control. */
8778 if (insn & 0x5000) {
8779 /* Unconditional branch. */
8780 /* signextend(hw1[10:0]) -> offset[:12]. */
8781 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8782 /* hw1[10:0] -> offset[11:1]. */
8783 offset |= (insn & 0x7ff) << 1;
8784 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8785 offset[24:22] already have the same value because of the
8786 sign extension above. */
8787 offset ^= ((~insn) & (1 << 13)) << 10;
8788 offset ^= ((~insn) & (1 << 11)) << 11;
8789
9ee6e8bb
PB
8790 if (insn & (1 << 14)) {
8791 /* Branch and link. */
3174f8e9 8792 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8793 }
3b46e624 8794
b0109805 8795 offset += s->pc;
9ee6e8bb
PB
8796 if (insn & (1 << 12)) {
8797 /* b/bl */
b0109805 8798 gen_jmp(s, offset);
9ee6e8bb
PB
8799 } else {
8800 /* blx */
b0109805 8801 offset &= ~(uint32_t)2;
be5e7a76 8802 /* thumb2 bx, no need to check */
b0109805 8803 gen_bx_im(s, offset);
2c0262af 8804 }
9ee6e8bb
PB
8805 } else if (((insn >> 23) & 7) == 7) {
8806 /* Misc control */
8807 if (insn & (1 << 13))
8808 goto illegal_op;
8809
8810 if (insn & (1 << 26)) {
8811 /* Secure monitor call (v6Z) */
e0c270d9
SW
8812 qemu_log_mask(LOG_UNIMP,
8813 "arm: unimplemented secure monitor call\n");
9ee6e8bb 8814 goto illegal_op; /* not implemented. */
2c0262af 8815 } else {
9ee6e8bb
PB
8816 op = (insn >> 20) & 7;
8817 switch (op) {
8818 case 0: /* msr cpsr. */
8819 if (IS_M(env)) {
8984bd2e
PB
8820 tmp = load_reg(s, rn);
8821 addr = tcg_const_i32(insn & 0xff);
8822 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8823 tcg_temp_free_i32(addr);
7d1b0095 8824 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8825 gen_lookup_tb(s);
8826 break;
8827 }
8828 /* fall through */
8829 case 1: /* msr spsr. */
8830 if (IS_M(env))
8831 goto illegal_op;
2fbac54b
FN
8832 tmp = load_reg(s, rn);
8833 if (gen_set_psr(s,
9ee6e8bb 8834 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8835 op == 1, tmp))
9ee6e8bb
PB
8836 goto illegal_op;
8837 break;
8838 case 2: /* cps, nop-hint. */
8839 if (((insn >> 8) & 7) == 0) {
8840 gen_nop_hint(s, insn & 0xff);
8841 }
8842 /* Implemented as NOP in user mode. */
8843 if (IS_USER(s))
8844 break;
8845 offset = 0;
8846 imm = 0;
8847 if (insn & (1 << 10)) {
8848 if (insn & (1 << 7))
8849 offset |= CPSR_A;
8850 if (insn & (1 << 6))
8851 offset |= CPSR_I;
8852 if (insn & (1 << 5))
8853 offset |= CPSR_F;
8854 if (insn & (1 << 9))
8855 imm = CPSR_A | CPSR_I | CPSR_F;
8856 }
8857 if (insn & (1 << 8)) {
8858 offset |= 0x1f;
8859 imm |= (insn & 0x1f);
8860 }
8861 if (offset) {
2fbac54b 8862 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8863 }
8864 break;
8865 case 3: /* Special control operations. */
426f5abc 8866 ARCH(7);
9ee6e8bb
PB
8867 op = (insn >> 4) & 0xf;
8868 switch (op) {
8869 case 2: /* clrex */
426f5abc 8870 gen_clrex(s);
9ee6e8bb
PB
8871 break;
8872 case 4: /* dsb */
8873 case 5: /* dmb */
8874 case 6: /* isb */
8875 /* These execute as NOPs. */
9ee6e8bb
PB
8876 break;
8877 default:
8878 goto illegal_op;
8879 }
8880 break;
8881 case 4: /* bxj */
8882 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8883 tmp = load_reg(s, rn);
8884 gen_bx(s, tmp);
9ee6e8bb
PB
8885 break;
8886 case 5: /* Exception return. */
b8b45b68
RV
8887 if (IS_USER(s)) {
8888 goto illegal_op;
8889 }
8890 if (rn != 14 || rd != 15) {
8891 goto illegal_op;
8892 }
8893 tmp = load_reg(s, rn);
8894 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8895 gen_exception_return(s, tmp);
8896 break;
9ee6e8bb 8897 case 6: /* mrs cpsr. */
7d1b0095 8898 tmp = tcg_temp_new_i32();
9ee6e8bb 8899 if (IS_M(env)) {
8984bd2e
PB
8900 addr = tcg_const_i32(insn & 0xff);
8901 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8902 tcg_temp_free_i32(addr);
9ee6e8bb 8903 } else {
9ef39277 8904 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8905 }
8984bd2e 8906 store_reg(s, rd, tmp);
9ee6e8bb
PB
8907 break;
8908 case 7: /* mrs spsr. */
8909 /* Not accessible in user mode. */
8910 if (IS_USER(s) || IS_M(env))
8911 goto illegal_op;
d9ba4830
PB
8912 tmp = load_cpu_field(spsr);
8913 store_reg(s, rd, tmp);
9ee6e8bb 8914 break;
2c0262af
FB
8915 }
8916 }
9ee6e8bb
PB
8917 } else {
8918 /* Conditional branch. */
8919 op = (insn >> 22) & 0xf;
8920 /* Generate a conditional jump to next instruction. */
8921 s->condlabel = gen_new_label();
d9ba4830 8922 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8923 s->condjmp = 1;
8924
8925 /* offset[11:1] = insn[10:0] */
8926 offset = (insn & 0x7ff) << 1;
8927 /* offset[17:12] = insn[21:16]. */
8928 offset |= (insn & 0x003f0000) >> 4;
8929 /* offset[31:20] = insn[26]. */
8930 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8931 /* offset[18] = insn[13]. */
8932 offset |= (insn & (1 << 13)) << 5;
8933 /* offset[19] = insn[11]. */
8934 offset |= (insn & (1 << 11)) << 8;
8935
8936 /* jump to the offset */
b0109805 8937 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8938 }
8939 } else {
8940 /* Data processing immediate. */
8941 if (insn & (1 << 25)) {
8942 if (insn & (1 << 24)) {
8943 if (insn & (1 << 20))
8944 goto illegal_op;
8945 /* Bitfield/Saturate. */
8946 op = (insn >> 21) & 7;
8947 imm = insn & 0x1f;
8948 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8949 if (rn == 15) {
7d1b0095 8950 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8951 tcg_gen_movi_i32(tmp, 0);
8952 } else {
8953 tmp = load_reg(s, rn);
8954 }
9ee6e8bb
PB
8955 switch (op) {
8956 case 2: /* Signed bitfield extract. */
8957 imm++;
8958 if (shift + imm > 32)
8959 goto illegal_op;
8960 if (imm < 32)
6ddbc6e4 8961 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8962 break;
8963 case 6: /* Unsigned bitfield extract. */
8964 imm++;
8965 if (shift + imm > 32)
8966 goto illegal_op;
8967 if (imm < 32)
6ddbc6e4 8968 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8969 break;
8970 case 3: /* Bitfield insert/clear. */
8971 if (imm < shift)
8972 goto illegal_op;
8973 imm = imm + 1 - shift;
8974 if (imm != 32) {
6ddbc6e4 8975 tmp2 = load_reg(s, rd);
d593c48e 8976 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8977 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8978 }
8979 break;
8980 case 7:
8981 goto illegal_op;
8982 default: /* Saturate. */
9ee6e8bb
PB
8983 if (shift) {
8984 if (op & 1)
6ddbc6e4 8985 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8986 else
6ddbc6e4 8987 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8988 }
6ddbc6e4 8989 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8990 if (op & 4) {
8991 /* Unsigned. */
9ee6e8bb 8992 if ((op & 1) && shift == 0)
9ef39277 8993 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8994 else
9ef39277 8995 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8996 } else {
9ee6e8bb 8997 /* Signed. */
9ee6e8bb 8998 if ((op & 1) && shift == 0)
9ef39277 8999 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9000 else
9ef39277 9001 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9002 }
b75263d6 9003 tcg_temp_free_i32(tmp2);
9ee6e8bb 9004 break;
2c0262af 9005 }
6ddbc6e4 9006 store_reg(s, rd, tmp);
9ee6e8bb
PB
9007 } else {
9008 imm = ((insn & 0x04000000) >> 15)
9009 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9010 if (insn & (1 << 22)) {
9011 /* 16-bit immediate. */
9012 imm |= (insn >> 4) & 0xf000;
9013 if (insn & (1 << 23)) {
9014 /* movt */
5e3f878a 9015 tmp = load_reg(s, rd);
86831435 9016 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9017 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9018 } else {
9ee6e8bb 9019 /* movw */
7d1b0095 9020 tmp = tcg_temp_new_i32();
5e3f878a 9021 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9022 }
9023 } else {
9ee6e8bb
PB
9024 /* Add/sub 12-bit immediate. */
9025 if (rn == 15) {
b0109805 9026 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9027 if (insn & (1 << 23))
b0109805 9028 offset -= imm;
9ee6e8bb 9029 else
b0109805 9030 offset += imm;
7d1b0095 9031 tmp = tcg_temp_new_i32();
5e3f878a 9032 tcg_gen_movi_i32(tmp, offset);
2c0262af 9033 } else {
5e3f878a 9034 tmp = load_reg(s, rn);
9ee6e8bb 9035 if (insn & (1 << 23))
5e3f878a 9036 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9037 else
5e3f878a 9038 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9039 }
9ee6e8bb 9040 }
5e3f878a 9041 store_reg(s, rd, tmp);
191abaa2 9042 }
9ee6e8bb
PB
9043 } else {
9044 int shifter_out = 0;
9045 /* modified 12-bit immediate. */
9046 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9047 imm = (insn & 0xff);
9048 switch (shift) {
9049 case 0: /* XY */
9050 /* Nothing to do. */
9051 break;
9052 case 1: /* 00XY00XY */
9053 imm |= imm << 16;
9054 break;
9055 case 2: /* XY00XY00 */
9056 imm |= imm << 16;
9057 imm <<= 8;
9058 break;
9059 case 3: /* XYXYXYXY */
9060 imm |= imm << 16;
9061 imm |= imm << 8;
9062 break;
9063 default: /* Rotated constant. */
9064 shift = (shift << 1) | (imm >> 7);
9065 imm |= 0x80;
9066 imm = imm << (32 - shift);
9067 shifter_out = 1;
9068 break;
b5ff1b31 9069 }
7d1b0095 9070 tmp2 = tcg_temp_new_i32();
3174f8e9 9071 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9072 rn = (insn >> 16) & 0xf;
3174f8e9 9073 if (rn == 15) {
7d1b0095 9074 tmp = tcg_temp_new_i32();
3174f8e9
FN
9075 tcg_gen_movi_i32(tmp, 0);
9076 } else {
9077 tmp = load_reg(s, rn);
9078 }
9ee6e8bb
PB
9079 op = (insn >> 21) & 0xf;
9080 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9081 shifter_out, tmp, tmp2))
9ee6e8bb 9082 goto illegal_op;
7d1b0095 9083 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9084 rd = (insn >> 8) & 0xf;
9085 if (rd != 15) {
3174f8e9
FN
9086 store_reg(s, rd, tmp);
9087 } else {
7d1b0095 9088 tcg_temp_free_i32(tmp);
2c0262af 9089 }
2c0262af 9090 }
9ee6e8bb
PB
9091 }
9092 break;
9093 case 12: /* Load/store single data item. */
9094 {
9095 int postinc = 0;
9096 int writeback = 0;
b0109805 9097 int user;
9ee6e8bb
PB
9098 if ((insn & 0x01100000) == 0x01000000) {
9099 if (disas_neon_ls_insn(env, s, insn))
c1713132 9100 goto illegal_op;
9ee6e8bb
PB
9101 break;
9102 }
a2fdc890
PM
9103 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9104 if (rs == 15) {
9105 if (!(insn & (1 << 20))) {
9106 goto illegal_op;
9107 }
9108 if (op != 2) {
9109 /* Byte or halfword load space with dest == r15 : memory hints.
9110 * Catch them early so we don't emit pointless addressing code.
9111 * This space is a mix of:
9112 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9113 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9114 * cores)
9115 * unallocated hints, which must be treated as NOPs
9116 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9117 * which is easiest for the decoding logic
9118 * Some space which must UNDEF
9119 */
9120 int op1 = (insn >> 23) & 3;
9121 int op2 = (insn >> 6) & 0x3f;
9122 if (op & 2) {
9123 goto illegal_op;
9124 }
9125 if (rn == 15) {
02afbf64
PM
9126 /* UNPREDICTABLE, unallocated hint or
9127 * PLD/PLDW/PLI (literal)
9128 */
a2fdc890
PM
9129 return 0;
9130 }
9131 if (op1 & 1) {
02afbf64 9132 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9133 }
9134 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9135 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9136 }
9137 /* UNDEF space, or an UNPREDICTABLE */
9138 return 1;
9139 }
9140 }
b0109805 9141 user = IS_USER(s);
9ee6e8bb 9142 if (rn == 15) {
7d1b0095 9143 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9144 /* PC relative. */
9145 /* s->pc has already been incremented by 4. */
9146 imm = s->pc & 0xfffffffc;
9147 if (insn & (1 << 23))
9148 imm += insn & 0xfff;
9149 else
9150 imm -= insn & 0xfff;
b0109805 9151 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9152 } else {
b0109805 9153 addr = load_reg(s, rn);
9ee6e8bb
PB
9154 if (insn & (1 << 23)) {
9155 /* Positive offset. */
9156 imm = insn & 0xfff;
b0109805 9157 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9158 } else {
9ee6e8bb 9159 imm = insn & 0xff;
2a0308c5
PM
9160 switch ((insn >> 8) & 0xf) {
9161 case 0x0: /* Shifted Register. */
9ee6e8bb 9162 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9163 if (shift > 3) {
9164 tcg_temp_free_i32(addr);
18c9b560 9165 goto illegal_op;
2a0308c5 9166 }
b26eefb6 9167 tmp = load_reg(s, rm);
9ee6e8bb 9168 if (shift)
b26eefb6 9169 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9170 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9171 tcg_temp_free_i32(tmp);
9ee6e8bb 9172 break;
2a0308c5 9173 case 0xc: /* Negative offset. */
b0109805 9174 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9175 break;
2a0308c5 9176 case 0xe: /* User privilege. */
b0109805
PB
9177 tcg_gen_addi_i32(addr, addr, imm);
9178 user = 1;
9ee6e8bb 9179 break;
2a0308c5 9180 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9181 imm = -imm;
9182 /* Fall through. */
2a0308c5 9183 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9184 postinc = 1;
9185 writeback = 1;
9186 break;
2a0308c5 9187 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9188 imm = -imm;
9189 /* Fall through. */
2a0308c5 9190 case 0xf: /* Pre-increment. */
b0109805 9191 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9192 writeback = 1;
9193 break;
9194 default:
2a0308c5 9195 tcg_temp_free_i32(addr);
b7bcbe95 9196 goto illegal_op;
9ee6e8bb
PB
9197 }
9198 }
9199 }
9ee6e8bb
PB
9200 if (insn & (1 << 20)) {
9201 /* Load. */
5a839c0d 9202 tmp = tcg_temp_new_i32();
a2fdc890 9203 switch (op) {
5a839c0d 9204 case 0:
08307563 9205 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9206 break;
9207 case 4:
08307563 9208 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9209 break;
9210 case 1:
08307563 9211 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9212 break;
9213 case 5:
08307563 9214 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9215 break;
9216 case 2:
08307563 9217 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9218 break;
2a0308c5 9219 default:
5a839c0d 9220 tcg_temp_free_i32(tmp);
2a0308c5
PM
9221 tcg_temp_free_i32(addr);
9222 goto illegal_op;
a2fdc890
PM
9223 }
9224 if (rs == 15) {
9225 gen_bx(s, tmp);
9ee6e8bb 9226 } else {
a2fdc890 9227 store_reg(s, rs, tmp);
9ee6e8bb
PB
9228 }
9229 } else {
9230 /* Store. */
b0109805 9231 tmp = load_reg(s, rs);
9ee6e8bb 9232 switch (op) {
5a839c0d 9233 case 0:
08307563 9234 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9235 break;
9236 case 1:
08307563 9237 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9238 break;
9239 case 2:
08307563 9240 gen_aa32_st32(tmp, addr, user);
5a839c0d 9241 break;
2a0308c5 9242 default:
5a839c0d 9243 tcg_temp_free_i32(tmp);
2a0308c5
PM
9244 tcg_temp_free_i32(addr);
9245 goto illegal_op;
b7bcbe95 9246 }
5a839c0d 9247 tcg_temp_free_i32(tmp);
2c0262af 9248 }
9ee6e8bb 9249 if (postinc)
b0109805
PB
9250 tcg_gen_addi_i32(addr, addr, imm);
9251 if (writeback) {
9252 store_reg(s, rn, addr);
9253 } else {
7d1b0095 9254 tcg_temp_free_i32(addr);
b0109805 9255 }
9ee6e8bb
PB
9256 }
9257 break;
9258 default:
9259 goto illegal_op;
2c0262af 9260 }
9ee6e8bb
PB
9261 return 0;
9262illegal_op:
9263 return 1;
2c0262af
FB
9264}
9265
0ecb72a5 9266static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9267{
9268 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9269 int32_t offset;
9270 int i;
39d5492a
PM
9271 TCGv_i32 tmp;
9272 TCGv_i32 tmp2;
9273 TCGv_i32 addr;
99c475ab 9274
9ee6e8bb
PB
9275 if (s->condexec_mask) {
9276 cond = s->condexec_cond;
bedd2912
JB
9277 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9278 s->condlabel = gen_new_label();
9279 gen_test_cc(cond ^ 1, s->condlabel);
9280 s->condjmp = 1;
9281 }
9ee6e8bb
PB
9282 }
9283
d31dd73e 9284 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9285 s->pc += 2;
b5ff1b31 9286
99c475ab
FB
9287 switch (insn >> 12) {
9288 case 0: case 1:
396e467c 9289
99c475ab
FB
9290 rd = insn & 7;
9291 op = (insn >> 11) & 3;
9292 if (op == 3) {
9293 /* add/subtract */
9294 rn = (insn >> 3) & 7;
396e467c 9295 tmp = load_reg(s, rn);
99c475ab
FB
9296 if (insn & (1 << 10)) {
9297 /* immediate */
7d1b0095 9298 tmp2 = tcg_temp_new_i32();
396e467c 9299 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9300 } else {
9301 /* reg */
9302 rm = (insn >> 6) & 7;
396e467c 9303 tmp2 = load_reg(s, rm);
99c475ab 9304 }
9ee6e8bb
PB
9305 if (insn & (1 << 9)) {
9306 if (s->condexec_mask)
396e467c 9307 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9308 else
72485ec4 9309 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9310 } else {
9311 if (s->condexec_mask)
396e467c 9312 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9313 else
72485ec4 9314 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9315 }
7d1b0095 9316 tcg_temp_free_i32(tmp2);
396e467c 9317 store_reg(s, rd, tmp);
99c475ab
FB
9318 } else {
9319 /* shift immediate */
9320 rm = (insn >> 3) & 7;
9321 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9322 tmp = load_reg(s, rm);
9323 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9324 if (!s->condexec_mask)
9325 gen_logic_CC(tmp);
9326 store_reg(s, rd, tmp);
99c475ab
FB
9327 }
9328 break;
9329 case 2: case 3:
9330 /* arithmetic large immediate */
9331 op = (insn >> 11) & 3;
9332 rd = (insn >> 8) & 0x7;
396e467c 9333 if (op == 0) { /* mov */
7d1b0095 9334 tmp = tcg_temp_new_i32();
396e467c 9335 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9336 if (!s->condexec_mask)
396e467c
FN
9337 gen_logic_CC(tmp);
9338 store_reg(s, rd, tmp);
9339 } else {
9340 tmp = load_reg(s, rd);
7d1b0095 9341 tmp2 = tcg_temp_new_i32();
396e467c
FN
9342 tcg_gen_movi_i32(tmp2, insn & 0xff);
9343 switch (op) {
9344 case 1: /* cmp */
72485ec4 9345 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9346 tcg_temp_free_i32(tmp);
9347 tcg_temp_free_i32(tmp2);
396e467c
FN
9348 break;
9349 case 2: /* add */
9350 if (s->condexec_mask)
9351 tcg_gen_add_i32(tmp, tmp, tmp2);
9352 else
72485ec4 9353 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9354 tcg_temp_free_i32(tmp2);
396e467c
FN
9355 store_reg(s, rd, tmp);
9356 break;
9357 case 3: /* sub */
9358 if (s->condexec_mask)
9359 tcg_gen_sub_i32(tmp, tmp, tmp2);
9360 else
72485ec4 9361 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9362 tcg_temp_free_i32(tmp2);
396e467c
FN
9363 store_reg(s, rd, tmp);
9364 break;
9365 }
99c475ab 9366 }
99c475ab
FB
9367 break;
9368 case 4:
9369 if (insn & (1 << 11)) {
9370 rd = (insn >> 8) & 7;
5899f386
FB
9371 /* load pc-relative. Bit 1 of PC is ignored. */
9372 val = s->pc + 2 + ((insn & 0xff) * 4);
9373 val &= ~(uint32_t)2;
7d1b0095 9374 addr = tcg_temp_new_i32();
b0109805 9375 tcg_gen_movi_i32(addr, val);
c40c8556 9376 tmp = tcg_temp_new_i32();
08307563 9377 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9378 tcg_temp_free_i32(addr);
b0109805 9379 store_reg(s, rd, tmp);
99c475ab
FB
9380 break;
9381 }
9382 if (insn & (1 << 10)) {
9383 /* data processing extended or blx */
9384 rd = (insn & 7) | ((insn >> 4) & 8);
9385 rm = (insn >> 3) & 0xf;
9386 op = (insn >> 8) & 3;
9387 switch (op) {
9388 case 0: /* add */
396e467c
FN
9389 tmp = load_reg(s, rd);
9390 tmp2 = load_reg(s, rm);
9391 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9392 tcg_temp_free_i32(tmp2);
396e467c 9393 store_reg(s, rd, tmp);
99c475ab
FB
9394 break;
9395 case 1: /* cmp */
396e467c
FN
9396 tmp = load_reg(s, rd);
9397 tmp2 = load_reg(s, rm);
72485ec4 9398 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9399 tcg_temp_free_i32(tmp2);
9400 tcg_temp_free_i32(tmp);
99c475ab
FB
9401 break;
9402 case 2: /* mov/cpy */
396e467c
FN
9403 tmp = load_reg(s, rm);
9404 store_reg(s, rd, tmp);
99c475ab
FB
9405 break;
9406 case 3:/* branch [and link] exchange thumb register */
b0109805 9407 tmp = load_reg(s, rm);
99c475ab 9408 if (insn & (1 << 7)) {
be5e7a76 9409 ARCH(5);
99c475ab 9410 val = (uint32_t)s->pc | 1;
7d1b0095 9411 tmp2 = tcg_temp_new_i32();
b0109805
PB
9412 tcg_gen_movi_i32(tmp2, val);
9413 store_reg(s, 14, tmp2);
99c475ab 9414 }
be5e7a76 9415 /* already thumb, no need to check */
d9ba4830 9416 gen_bx(s, tmp);
99c475ab
FB
9417 break;
9418 }
9419 break;
9420 }
9421
9422 /* data processing register */
9423 rd = insn & 7;
9424 rm = (insn >> 3) & 7;
9425 op = (insn >> 6) & 0xf;
9426 if (op == 2 || op == 3 || op == 4 || op == 7) {
9427 /* the shift/rotate ops want the operands backwards */
9428 val = rm;
9429 rm = rd;
9430 rd = val;
9431 val = 1;
9432 } else {
9433 val = 0;
9434 }
9435
396e467c 9436 if (op == 9) { /* neg */
7d1b0095 9437 tmp = tcg_temp_new_i32();
396e467c
FN
9438 tcg_gen_movi_i32(tmp, 0);
9439 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9440 tmp = load_reg(s, rd);
9441 } else {
39d5492a 9442 TCGV_UNUSED_I32(tmp);
396e467c 9443 }
99c475ab 9444
396e467c 9445 tmp2 = load_reg(s, rm);
5899f386 9446 switch (op) {
99c475ab 9447 case 0x0: /* and */
396e467c 9448 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9449 if (!s->condexec_mask)
396e467c 9450 gen_logic_CC(tmp);
99c475ab
FB
9451 break;
9452 case 0x1: /* eor */
396e467c 9453 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9454 if (!s->condexec_mask)
396e467c 9455 gen_logic_CC(tmp);
99c475ab
FB
9456 break;
9457 case 0x2: /* lsl */
9ee6e8bb 9458 if (s->condexec_mask) {
365af80e 9459 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9460 } else {
9ef39277 9461 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9462 gen_logic_CC(tmp2);
9ee6e8bb 9463 }
99c475ab
FB
9464 break;
9465 case 0x3: /* lsr */
9ee6e8bb 9466 if (s->condexec_mask) {
365af80e 9467 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9468 } else {
9ef39277 9469 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9470 gen_logic_CC(tmp2);
9ee6e8bb 9471 }
99c475ab
FB
9472 break;
9473 case 0x4: /* asr */
9ee6e8bb 9474 if (s->condexec_mask) {
365af80e 9475 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9476 } else {
9ef39277 9477 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9478 gen_logic_CC(tmp2);
9ee6e8bb 9479 }
99c475ab
FB
9480 break;
9481 case 0x5: /* adc */
49b4c31e 9482 if (s->condexec_mask) {
396e467c 9483 gen_adc(tmp, tmp2);
49b4c31e
RH
9484 } else {
9485 gen_adc_CC(tmp, tmp, tmp2);
9486 }
99c475ab
FB
9487 break;
9488 case 0x6: /* sbc */
2de68a49 9489 if (s->condexec_mask) {
396e467c 9490 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9491 } else {
9492 gen_sbc_CC(tmp, tmp, tmp2);
9493 }
99c475ab
FB
9494 break;
9495 case 0x7: /* ror */
9ee6e8bb 9496 if (s->condexec_mask) {
f669df27
AJ
9497 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9498 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9499 } else {
9ef39277 9500 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9501 gen_logic_CC(tmp2);
9ee6e8bb 9502 }
99c475ab
FB
9503 break;
9504 case 0x8: /* tst */
396e467c
FN
9505 tcg_gen_and_i32(tmp, tmp, tmp2);
9506 gen_logic_CC(tmp);
99c475ab 9507 rd = 16;
5899f386 9508 break;
99c475ab 9509 case 0x9: /* neg */
9ee6e8bb 9510 if (s->condexec_mask)
396e467c 9511 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9512 else
72485ec4 9513 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9514 break;
9515 case 0xa: /* cmp */
72485ec4 9516 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9517 rd = 16;
9518 break;
9519 case 0xb: /* cmn */
72485ec4 9520 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9521 rd = 16;
9522 break;
9523 case 0xc: /* orr */
396e467c 9524 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9525 if (!s->condexec_mask)
396e467c 9526 gen_logic_CC(tmp);
99c475ab
FB
9527 break;
9528 case 0xd: /* mul */
7b2919a0 9529 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9530 if (!s->condexec_mask)
396e467c 9531 gen_logic_CC(tmp);
99c475ab
FB
9532 break;
9533 case 0xe: /* bic */
f669df27 9534 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9535 if (!s->condexec_mask)
396e467c 9536 gen_logic_CC(tmp);
99c475ab
FB
9537 break;
9538 case 0xf: /* mvn */
396e467c 9539 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9540 if (!s->condexec_mask)
396e467c 9541 gen_logic_CC(tmp2);
99c475ab 9542 val = 1;
5899f386 9543 rm = rd;
99c475ab
FB
9544 break;
9545 }
9546 if (rd != 16) {
396e467c
FN
9547 if (val) {
9548 store_reg(s, rm, tmp2);
9549 if (op != 0xf)
7d1b0095 9550 tcg_temp_free_i32(tmp);
396e467c
FN
9551 } else {
9552 store_reg(s, rd, tmp);
7d1b0095 9553 tcg_temp_free_i32(tmp2);
396e467c
FN
9554 }
9555 } else {
7d1b0095
PM
9556 tcg_temp_free_i32(tmp);
9557 tcg_temp_free_i32(tmp2);
99c475ab
FB
9558 }
9559 break;
9560
9561 case 5:
9562 /* load/store register offset. */
9563 rd = insn & 7;
9564 rn = (insn >> 3) & 7;
9565 rm = (insn >> 6) & 7;
9566 op = (insn >> 9) & 7;
b0109805 9567 addr = load_reg(s, rn);
b26eefb6 9568 tmp = load_reg(s, rm);
b0109805 9569 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9570 tcg_temp_free_i32(tmp);
99c475ab 9571
c40c8556 9572 if (op < 3) { /* store */
b0109805 9573 tmp = load_reg(s, rd);
c40c8556
PM
9574 } else {
9575 tmp = tcg_temp_new_i32();
9576 }
99c475ab
FB
9577
9578 switch (op) {
9579 case 0: /* str */
08307563 9580 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9581 break;
9582 case 1: /* strh */
08307563 9583 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9584 break;
9585 case 2: /* strb */
08307563 9586 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9587 break;
9588 case 3: /* ldrsb */
08307563 9589 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9590 break;
9591 case 4: /* ldr */
08307563 9592 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9593 break;
9594 case 5: /* ldrh */
08307563 9595 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9596 break;
9597 case 6: /* ldrb */
08307563 9598 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9599 break;
9600 case 7: /* ldrsh */
08307563 9601 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9602 break;
9603 }
c40c8556 9604 if (op >= 3) { /* load */
b0109805 9605 store_reg(s, rd, tmp);
c40c8556
PM
9606 } else {
9607 tcg_temp_free_i32(tmp);
9608 }
7d1b0095 9609 tcg_temp_free_i32(addr);
99c475ab
FB
9610 break;
9611
9612 case 6:
9613 /* load/store word immediate offset */
9614 rd = insn & 7;
9615 rn = (insn >> 3) & 7;
b0109805 9616 addr = load_reg(s, rn);
99c475ab 9617 val = (insn >> 4) & 0x7c;
b0109805 9618 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9619
9620 if (insn & (1 << 11)) {
9621 /* load */
c40c8556 9622 tmp = tcg_temp_new_i32();
08307563 9623 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9624 store_reg(s, rd, tmp);
99c475ab
FB
9625 } else {
9626 /* store */
b0109805 9627 tmp = load_reg(s, rd);
08307563 9628 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9629 tcg_temp_free_i32(tmp);
99c475ab 9630 }
7d1b0095 9631 tcg_temp_free_i32(addr);
99c475ab
FB
9632 break;
9633
9634 case 7:
9635 /* load/store byte immediate offset */
9636 rd = insn & 7;
9637 rn = (insn >> 3) & 7;
b0109805 9638 addr = load_reg(s, rn);
99c475ab 9639 val = (insn >> 6) & 0x1f;
b0109805 9640 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9641
9642 if (insn & (1 << 11)) {
9643 /* load */
c40c8556 9644 tmp = tcg_temp_new_i32();
08307563 9645 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 9646 store_reg(s, rd, tmp);
99c475ab
FB
9647 } else {
9648 /* store */
b0109805 9649 tmp = load_reg(s, rd);
08307563 9650 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 9651 tcg_temp_free_i32(tmp);
99c475ab 9652 }
7d1b0095 9653 tcg_temp_free_i32(addr);
99c475ab
FB
9654 break;
9655
9656 case 8:
9657 /* load/store halfword immediate offset */
9658 rd = insn & 7;
9659 rn = (insn >> 3) & 7;
b0109805 9660 addr = load_reg(s, rn);
99c475ab 9661 val = (insn >> 5) & 0x3e;
b0109805 9662 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9663
9664 if (insn & (1 << 11)) {
9665 /* load */
c40c8556 9666 tmp = tcg_temp_new_i32();
08307563 9667 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 9668 store_reg(s, rd, tmp);
99c475ab
FB
9669 } else {
9670 /* store */
b0109805 9671 tmp = load_reg(s, rd);
08307563 9672 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 9673 tcg_temp_free_i32(tmp);
99c475ab 9674 }
7d1b0095 9675 tcg_temp_free_i32(addr);
99c475ab
FB
9676 break;
9677
9678 case 9:
9679 /* load/store from stack */
9680 rd = (insn >> 8) & 7;
b0109805 9681 addr = load_reg(s, 13);
99c475ab 9682 val = (insn & 0xff) * 4;
b0109805 9683 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9684
9685 if (insn & (1 << 11)) {
9686 /* load */
c40c8556 9687 tmp = tcg_temp_new_i32();
08307563 9688 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9689 store_reg(s, rd, tmp);
99c475ab
FB
9690 } else {
9691 /* store */
b0109805 9692 tmp = load_reg(s, rd);
08307563 9693 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9694 tcg_temp_free_i32(tmp);
99c475ab 9695 }
7d1b0095 9696 tcg_temp_free_i32(addr);
99c475ab
FB
9697 break;
9698
9699 case 10:
9700 /* add to high reg */
9701 rd = (insn >> 8) & 7;
5899f386
FB
9702 if (insn & (1 << 11)) {
9703 /* SP */
5e3f878a 9704 tmp = load_reg(s, 13);
5899f386
FB
9705 } else {
9706 /* PC. bit 1 is ignored. */
7d1b0095 9707 tmp = tcg_temp_new_i32();
5e3f878a 9708 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9709 }
99c475ab 9710 val = (insn & 0xff) * 4;
5e3f878a
PB
9711 tcg_gen_addi_i32(tmp, tmp, val);
9712 store_reg(s, rd, tmp);
99c475ab
FB
9713 break;
9714
9715 case 11:
9716 /* misc */
9717 op = (insn >> 8) & 0xf;
9718 switch (op) {
9719 case 0:
9720 /* adjust stack pointer */
b26eefb6 9721 tmp = load_reg(s, 13);
99c475ab
FB
9722 val = (insn & 0x7f) * 4;
9723 if (insn & (1 << 7))
6a0d8a1d 9724 val = -(int32_t)val;
b26eefb6
PB
9725 tcg_gen_addi_i32(tmp, tmp, val);
9726 store_reg(s, 13, tmp);
99c475ab
FB
9727 break;
9728
9ee6e8bb
PB
9729 case 2: /* sign/zero extend. */
9730 ARCH(6);
9731 rd = insn & 7;
9732 rm = (insn >> 3) & 7;
b0109805 9733 tmp = load_reg(s, rm);
9ee6e8bb 9734 switch ((insn >> 6) & 3) {
b0109805
PB
9735 case 0: gen_sxth(tmp); break;
9736 case 1: gen_sxtb(tmp); break;
9737 case 2: gen_uxth(tmp); break;
9738 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9739 }
b0109805 9740 store_reg(s, rd, tmp);
9ee6e8bb 9741 break;
99c475ab
FB
9742 case 4: case 5: case 0xc: case 0xd:
9743 /* push/pop */
b0109805 9744 addr = load_reg(s, 13);
5899f386
FB
9745 if (insn & (1 << 8))
9746 offset = 4;
99c475ab 9747 else
5899f386
FB
9748 offset = 0;
9749 for (i = 0; i < 8; i++) {
9750 if (insn & (1 << i))
9751 offset += 4;
9752 }
9753 if ((insn & (1 << 11)) == 0) {
b0109805 9754 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9755 }
99c475ab
FB
9756 for (i = 0; i < 8; i++) {
9757 if (insn & (1 << i)) {
9758 if (insn & (1 << 11)) {
9759 /* pop */
c40c8556 9760 tmp = tcg_temp_new_i32();
08307563 9761 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9762 store_reg(s, i, tmp);
99c475ab
FB
9763 } else {
9764 /* push */
b0109805 9765 tmp = load_reg(s, i);
08307563 9766 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9767 tcg_temp_free_i32(tmp);
99c475ab 9768 }
5899f386 9769 /* advance to the next address. */
b0109805 9770 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9771 }
9772 }
39d5492a 9773 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9774 if (insn & (1 << 8)) {
9775 if (insn & (1 << 11)) {
9776 /* pop pc */
c40c8556 9777 tmp = tcg_temp_new_i32();
08307563 9778 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9779 /* don't set the pc until the rest of the instruction
9780 has completed */
9781 } else {
9782 /* push lr */
b0109805 9783 tmp = load_reg(s, 14);
08307563 9784 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9785 tcg_temp_free_i32(tmp);
99c475ab 9786 }
b0109805 9787 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9788 }
5899f386 9789 if ((insn & (1 << 11)) == 0) {
b0109805 9790 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9791 }
99c475ab 9792 /* write back the new stack pointer */
b0109805 9793 store_reg(s, 13, addr);
99c475ab 9794 /* set the new PC value */
be5e7a76
DES
9795 if ((insn & 0x0900) == 0x0900) {
9796 store_reg_from_load(env, s, 15, tmp);
9797 }
99c475ab
FB
9798 break;
9799
9ee6e8bb
PB
9800 case 1: case 3: case 9: case 11: /* czb */
9801 rm = insn & 7;
d9ba4830 9802 tmp = load_reg(s, rm);
9ee6e8bb
PB
9803 s->condlabel = gen_new_label();
9804 s->condjmp = 1;
9805 if (insn & (1 << 11))
cb63669a 9806 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9807 else
cb63669a 9808 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9809 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9810 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9811 val = (uint32_t)s->pc + 2;
9812 val += offset;
9813 gen_jmp(s, val);
9814 break;
9815
9816 case 15: /* IT, nop-hint. */
9817 if ((insn & 0xf) == 0) {
9818 gen_nop_hint(s, (insn >> 4) & 0xf);
9819 break;
9820 }
9821 /* If Then. */
9822 s->condexec_cond = (insn >> 4) & 0xe;
9823 s->condexec_mask = insn & 0x1f;
9824 /* No actual code generated for this insn, just setup state. */
9825 break;
9826
06c949e6 9827 case 0xe: /* bkpt */
be5e7a76 9828 ARCH(5);
bc4a0de0 9829 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9830 break;
9831
9ee6e8bb
PB
9832 case 0xa: /* rev */
9833 ARCH(6);
9834 rn = (insn >> 3) & 0x7;
9835 rd = insn & 0x7;
b0109805 9836 tmp = load_reg(s, rn);
9ee6e8bb 9837 switch ((insn >> 6) & 3) {
66896cb8 9838 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9839 case 1: gen_rev16(tmp); break;
9840 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9841 default: goto illegal_op;
9842 }
b0109805 9843 store_reg(s, rd, tmp);
9ee6e8bb
PB
9844 break;
9845
d9e028c1
PM
9846 case 6:
9847 switch ((insn >> 5) & 7) {
9848 case 2:
9849 /* setend */
9850 ARCH(6);
10962fd5
PM
9851 if (((insn >> 3) & 1) != s->bswap_code) {
9852 /* Dynamic endianness switching not implemented. */
e0c270d9 9853 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
9854 goto illegal_op;
9855 }
9ee6e8bb 9856 break;
d9e028c1
PM
9857 case 3:
9858 /* cps */
9859 ARCH(6);
9860 if (IS_USER(s)) {
9861 break;
8984bd2e 9862 }
d9e028c1
PM
9863 if (IS_M(env)) {
9864 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9865 /* FAULTMASK */
9866 if (insn & 1) {
9867 addr = tcg_const_i32(19);
9868 gen_helper_v7m_msr(cpu_env, addr, tmp);
9869 tcg_temp_free_i32(addr);
9870 }
9871 /* PRIMASK */
9872 if (insn & 2) {
9873 addr = tcg_const_i32(16);
9874 gen_helper_v7m_msr(cpu_env, addr, tmp);
9875 tcg_temp_free_i32(addr);
9876 }
9877 tcg_temp_free_i32(tmp);
9878 gen_lookup_tb(s);
9879 } else {
9880 if (insn & (1 << 4)) {
9881 shift = CPSR_A | CPSR_I | CPSR_F;
9882 } else {
9883 shift = 0;
9884 }
9885 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9886 }
d9e028c1
PM
9887 break;
9888 default:
9889 goto undef;
9ee6e8bb
PB
9890 }
9891 break;
9892
99c475ab
FB
9893 default:
9894 goto undef;
9895 }
9896 break;
9897
9898 case 12:
a7d3970d 9899 {
99c475ab 9900 /* load/store multiple */
39d5492a
PM
9901 TCGv_i32 loaded_var;
9902 TCGV_UNUSED_I32(loaded_var);
99c475ab 9903 rn = (insn >> 8) & 0x7;
b0109805 9904 addr = load_reg(s, rn);
99c475ab
FB
9905 for (i = 0; i < 8; i++) {
9906 if (insn & (1 << i)) {
99c475ab
FB
9907 if (insn & (1 << 11)) {
9908 /* load */
c40c8556 9909 tmp = tcg_temp_new_i32();
08307563 9910 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9911 if (i == rn) {
9912 loaded_var = tmp;
9913 } else {
9914 store_reg(s, i, tmp);
9915 }
99c475ab
FB
9916 } else {
9917 /* store */
b0109805 9918 tmp = load_reg(s, i);
08307563 9919 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9920 tcg_temp_free_i32(tmp);
99c475ab 9921 }
5899f386 9922 /* advance to the next address */
b0109805 9923 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9924 }
9925 }
b0109805 9926 if ((insn & (1 << rn)) == 0) {
a7d3970d 9927 /* base reg not in list: base register writeback */
b0109805
PB
9928 store_reg(s, rn, addr);
9929 } else {
a7d3970d
PM
9930 /* base reg in list: if load, complete it now */
9931 if (insn & (1 << 11)) {
9932 store_reg(s, rn, loaded_var);
9933 }
7d1b0095 9934 tcg_temp_free_i32(addr);
b0109805 9935 }
99c475ab 9936 break;
a7d3970d 9937 }
99c475ab
FB
9938 case 13:
9939 /* conditional branch or swi */
9940 cond = (insn >> 8) & 0xf;
9941 if (cond == 0xe)
9942 goto undef;
9943
9944 if (cond == 0xf) {
9945 /* swi */
eaed129d 9946 gen_set_pc_im(s, s->pc);
9ee6e8bb 9947 s->is_jmp = DISAS_SWI;
99c475ab
FB
9948 break;
9949 }
9950 /* generate a conditional jump to next instruction */
e50e6a20 9951 s->condlabel = gen_new_label();
d9ba4830 9952 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9953 s->condjmp = 1;
99c475ab
FB
9954
9955 /* jump to the offset */
5899f386 9956 val = (uint32_t)s->pc + 2;
99c475ab 9957 offset = ((int32_t)insn << 24) >> 24;
5899f386 9958 val += offset << 1;
8aaca4c0 9959 gen_jmp(s, val);
99c475ab
FB
9960 break;
9961
9962 case 14:
358bf29e 9963 if (insn & (1 << 11)) {
9ee6e8bb
PB
9964 if (disas_thumb2_insn(env, s, insn))
9965 goto undef32;
358bf29e
PB
9966 break;
9967 }
9ee6e8bb 9968 /* unconditional branch */
99c475ab
FB
9969 val = (uint32_t)s->pc;
9970 offset = ((int32_t)insn << 21) >> 21;
9971 val += (offset << 1) + 2;
8aaca4c0 9972 gen_jmp(s, val);
99c475ab
FB
9973 break;
9974
9975 case 15:
9ee6e8bb 9976 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9977 goto undef32;
9ee6e8bb 9978 break;
99c475ab
FB
9979 }
9980 return;
9ee6e8bb 9981undef32:
bc4a0de0 9982 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9983 return;
9984illegal_op:
99c475ab 9985undef:
bc4a0de0 9986 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9987}
9988
2c0262af
FB
9989/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9990 basic block 'tb'. If search_pc is TRUE, also generate PC
9991 information for each intermediate instruction. */
5639c3f2 9992static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 9993 TranslationBlock *tb,
5639c3f2 9994 bool search_pc)
2c0262af 9995{
ed2803da 9996 CPUState *cs = CPU(cpu);
5639c3f2 9997 CPUARMState *env = &cpu->env;
2c0262af 9998 DisasContext dc1, *dc = &dc1;
a1d1bb31 9999 CPUBreakpoint *bp;
2c0262af
FB
10000 uint16_t *gen_opc_end;
10001 int j, lj;
0fa85d43 10002 target_ulong pc_start;
0a2461fa 10003 target_ulong next_page_start;
2e70f6ef
PB
10004 int num_insns;
10005 int max_insns;
3b46e624 10006
2c0262af 10007 /* generate intermediate code */
0fa85d43 10008 pc_start = tb->pc;
3b46e624 10009
2c0262af
FB
10010 dc->tb = tb;
10011
92414b31 10012 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10013
10014 dc->is_jmp = DISAS_NEXT;
10015 dc->pc = pc_start;
ed2803da 10016 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10017 dc->condjmp = 0;
3926cc84
AG
10018
10019 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
10020 dc->aarch64 = 1;
10021 dc->thumb = 0;
10022 dc->bswap_code = 0;
10023 dc->condexec_mask = 0;
10024 dc->condexec_cond = 0;
10025#if !defined(CONFIG_USER_ONLY)
10026 dc->user = 0;
10027#endif
10028 dc->vfp_enabled = 0;
10029 dc->vec_len = 0;
10030 dc->vec_stride = 0;
10031 } else {
10032 dc->aarch64 = 0;
10033 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10034 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10035 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10036 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 10037#if !defined(CONFIG_USER_ONLY)
3926cc84 10038 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 10039#endif
3926cc84
AG
10040 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10041 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10042 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
10043 }
a7812ae4
PB
10044 cpu_F0s = tcg_temp_new_i32();
10045 cpu_F1s = tcg_temp_new_i32();
10046 cpu_F0d = tcg_temp_new_i64();
10047 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10048 cpu_V0 = cpu_F0d;
10049 cpu_V1 = cpu_F1d;
e677137d 10050 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10051 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10052 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10053 lj = -1;
2e70f6ef
PB
10054 num_insns = 0;
10055 max_insns = tb->cflags & CF_COUNT_MASK;
10056 if (max_insns == 0)
10057 max_insns = CF_COUNT_MASK;
10058
806f352d 10059 gen_tb_start();
e12ce78d 10060
3849902c
PM
10061 tcg_clear_temp_count();
10062
e12ce78d
PM
10063 /* A note on handling of the condexec (IT) bits:
10064 *
10065 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10066 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10067 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10068 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10069 * to do it at the end of the block. (For example if we don't do this
10070 * it's hard to identify whether we can safely skip writing condexec
10071 * at the end of the TB, which we definitely want to do for the case
10072 * where a TB doesn't do anything with the IT state at all.)
10073 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10074 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10075 * This is done both for leaving the TB at the end, and for leaving
10076 * it because of an exception we know will happen, which is done in
10077 * gen_exception_insn(). The latter is necessary because we need to
10078 * leave the TB with the PC/IT state just prior to execution of the
10079 * instruction which caused the exception.
10080 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10081 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10082 * This is handled in the same way as restoration of the
10083 * PC in these situations: we will be called again with search_pc=1
10084 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10085 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10086 * this to restore the condexec bits.
e12ce78d
PM
10087 *
10088 * Note that there are no instructions which can read the condexec
10089 * bits, and none which can write non-static values to them, so
0ecb72a5 10090 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10091 * middle of a TB.
10092 */
10093
9ee6e8bb
PB
10094 /* Reset the conditional execution bits immediately. This avoids
10095 complications trying to do it at the end of the block. */
98eac7ca 10096 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10097 {
39d5492a 10098 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10099 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10100 store_cpu_field(tmp, condexec_bits);
8f01245e 10101 }
2c0262af 10102 do {
fbb4a2e3
PB
10103#ifdef CONFIG_USER_ONLY
10104 /* Intercept jump to the magic kernel page. */
14ade10f 10105 if (!dc->aarch64 && dc->pc >= 0xffff0000) {
fbb4a2e3
PB
10106 /* We always get here via a jump, so know we are not in a
10107 conditional execution block. */
10108 gen_exception(EXCP_KERNEL_TRAP);
10109 dc->is_jmp = DISAS_UPDATE;
10110 break;
10111 }
10112#else
9ee6e8bb
PB
10113 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10114 /* We always get here via a jump, so know we are not in a
10115 conditional execution block. */
d9ba4830 10116 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10117 dc->is_jmp = DISAS_UPDATE;
10118 break;
9ee6e8bb
PB
10119 }
10120#endif
10121
72cf2d4f
BS
10122 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10123 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10124 if (bp->pc == dc->pc) {
bc4a0de0 10125 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10126 /* Advance PC so that clearing the breakpoint will
10127 invalidate this TB. */
10128 dc->pc += 2;
10129 goto done_generating;
1fddef4b
FB
10130 }
10131 }
10132 }
2c0262af 10133 if (search_pc) {
92414b31 10134 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10135 if (lj < j) {
10136 lj++;
10137 while (lj < j)
ab1103de 10138 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10139 }
25983cad 10140 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10141 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10142 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10143 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10144 }
e50e6a20 10145
2e70f6ef
PB
10146 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10147 gen_io_start();
10148
fdefe51c 10149 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10150 tcg_gen_debug_insn_start(dc->pc);
10151 }
10152
14ade10f
AG
10153 if (dc->aarch64) {
10154 disas_a64_insn(env, dc);
10155 } else if (dc->thumb) {
9ee6e8bb
PB
10156 disas_thumb_insn(env, dc);
10157 if (dc->condexec_mask) {
10158 dc->condexec_cond = (dc->condexec_cond & 0xe)
10159 | ((dc->condexec_mask >> 4) & 1);
10160 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10161 if (dc->condexec_mask == 0) {
10162 dc->condexec_cond = 0;
10163 }
10164 }
10165 } else {
10166 disas_arm_insn(env, dc);
10167 }
e50e6a20
FB
10168
10169 if (dc->condjmp && !dc->is_jmp) {
10170 gen_set_label(dc->condlabel);
10171 dc->condjmp = 0;
10172 }
3849902c
PM
10173
10174 if (tcg_check_temp_count()) {
0a2461fa
AG
10175 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10176 dc->pc);
3849902c
PM
10177 }
10178
aaf2d97d 10179 /* Translation stops when a conditional branch is encountered.
e50e6a20 10180 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10181 * Also stop translation when a page boundary is reached. This
bf20dc07 10182 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10183 num_insns ++;
efd7f486 10184 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10185 !cs->singlestep_enabled &&
1b530a6d 10186 !singlestep &&
2e70f6ef
PB
10187 dc->pc < next_page_start &&
10188 num_insns < max_insns);
10189
10190 if (tb->cflags & CF_LAST_IO) {
10191 if (dc->condjmp) {
10192 /* FIXME: This can theoretically happen with self-modifying
10193 code. */
10194 cpu_abort(env, "IO on conditional branch instruction");
10195 }
10196 gen_io_end();
10197 }
9ee6e8bb 10198
b5ff1b31 10199 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10200 instruction was a conditional branch or trap, and the PC has
10201 already been written. */
ed2803da 10202 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10203 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10204 if (dc->condjmp) {
9ee6e8bb
PB
10205 gen_set_condexec(dc);
10206 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10207 gen_exception(EXCP_SWI);
9ee6e8bb 10208 } else {
d9ba4830 10209 gen_exception(EXCP_DEBUG);
9ee6e8bb 10210 }
e50e6a20
FB
10211 gen_set_label(dc->condlabel);
10212 }
10213 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10214 gen_set_pc_im(dc, dc->pc);
e50e6a20 10215 dc->condjmp = 0;
8aaca4c0 10216 }
9ee6e8bb
PB
10217 gen_set_condexec(dc);
10218 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10219 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10220 } else {
10221 /* FIXME: Single stepping a WFI insn will not halt
10222 the CPU. */
d9ba4830 10223 gen_exception(EXCP_DEBUG);
9ee6e8bb 10224 }
8aaca4c0 10225 } else {
9ee6e8bb
PB
10226 /* While branches must always occur at the end of an IT block,
10227 there are a few other things that can cause us to terminate
65626741 10228 the TB in the middle of an IT block:
9ee6e8bb
PB
10229 - Exception generating instructions (bkpt, swi, undefined).
10230 - Page boundaries.
10231 - Hardware watchpoints.
10232 Hardware breakpoints have already been handled and skip this code.
10233 */
10234 gen_set_condexec(dc);
8aaca4c0 10235 switch(dc->is_jmp) {
8aaca4c0 10236 case DISAS_NEXT:
6e256c93 10237 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10238 break;
10239 default:
10240 case DISAS_JUMP:
10241 case DISAS_UPDATE:
10242 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10243 tcg_gen_exit_tb(0);
8aaca4c0
FB
10244 break;
10245 case DISAS_TB_JUMP:
10246 /* nothing more to generate */
10247 break;
9ee6e8bb 10248 case DISAS_WFI:
1ce94f81 10249 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10250 break;
10251 case DISAS_SWI:
d9ba4830 10252 gen_exception(EXCP_SWI);
9ee6e8bb 10253 break;
8aaca4c0 10254 }
e50e6a20
FB
10255 if (dc->condjmp) {
10256 gen_set_label(dc->condlabel);
9ee6e8bb 10257 gen_set_condexec(dc);
6e256c93 10258 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10259 dc->condjmp = 0;
10260 }
2c0262af 10261 }
2e70f6ef 10262
9ee6e8bb 10263done_generating:
806f352d 10264 gen_tb_end(tb, num_insns);
efd7f486 10265 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10266
10267#ifdef DEBUG_DISAS
8fec2b8c 10268 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10269 qemu_log("----------------\n");
10270 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10271 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10272 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10273 qemu_log("\n");
2c0262af
FB
10274 }
10275#endif
b5ff1b31 10276 if (search_pc) {
92414b31 10277 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10278 lj++;
10279 while (lj <= j)
ab1103de 10280 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10281 } else {
2c0262af 10282 tb->size = dc->pc - pc_start;
2e70f6ef 10283 tb->icount = num_insns;
b5ff1b31 10284 }
2c0262af
FB
10285}
10286
0ecb72a5 10287void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10288{
5639c3f2 10289 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10290}
10291
0ecb72a5 10292void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10293{
5639c3f2 10294 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10295}
10296
b5ff1b31
FB
10297static const char *cpu_mode_names[16] = {
10298 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10299 "???", "???", "???", "und", "???", "???", "???", "sys"
10300};
9ee6e8bb 10301
878096ee
AF
10302void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10303 int flags)
2c0262af 10304{
878096ee
AF
10305 ARMCPU *cpu = ARM_CPU(cs);
10306 CPUARMState *env = &cpu->env;
2c0262af 10307 int i;
b5ff1b31 10308 uint32_t psr;
2c0262af
FB
10309
10310 for(i=0;i<16;i++) {
7fe48483 10311 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10312 if ((i % 4) == 3)
7fe48483 10313 cpu_fprintf(f, "\n");
2c0262af 10314 else
7fe48483 10315 cpu_fprintf(f, " ");
2c0262af 10316 }
b5ff1b31 10317 psr = cpsr_read(env);
687fa640
TS
10318 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10319 psr,
b5ff1b31
FB
10320 psr & (1 << 31) ? 'N' : '-',
10321 psr & (1 << 30) ? 'Z' : '-',
10322 psr & (1 << 29) ? 'C' : '-',
10323 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10324 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10325 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10326
f2617cfc
PM
10327 if (flags & CPU_DUMP_FPU) {
10328 int numvfpregs = 0;
10329 if (arm_feature(env, ARM_FEATURE_VFP)) {
10330 numvfpregs += 16;
10331 }
10332 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10333 numvfpregs += 16;
10334 }
10335 for (i = 0; i < numvfpregs; i++) {
10336 uint64_t v = float64_val(env->vfp.regs[i]);
10337 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10338 i * 2, (uint32_t)v,
10339 i * 2 + 1, (uint32_t)(v >> 32),
10340 i, v);
10341 }
10342 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10343 }
2c0262af 10344}
a6b025d3 10345
0ecb72a5 10346void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10347{
3926cc84
AG
10348 if (is_a64(env)) {
10349 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
10350 } else {
10351 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
10352 }
e12ce78d 10353 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10354}