]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Abstract out load/store from a vaddr in AArch32
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
534df156 31#include "qemu/bitops.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 46#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
2c0262af
FB
50/* internal defines */
51typedef struct DisasContext {
0fa85d43 52 target_ulong pc;
2c0262af 53 int is_jmp;
e50e6a20
FB
54 /* Nonzero if this instruction has been conditionally skipped. */
55 int condjmp;
56 /* The label that will be jumped to when the instruction is skipped. */
57 int condlabel;
b90372ad 58 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
59 int condexec_mask;
60 int condexec_cond;
2c0262af 61 struct TranslationBlock *tb;
8aaca4c0 62 int singlestep_enabled;
5899f386 63 int thumb;
d8fd2954 64 int bswap_code;
b5ff1b31
FB
65#if !defined(CONFIG_USER_ONLY)
66 int user;
67#endif
5df8bac1 68 int vfp_enabled;
69d1fc22
PM
69 int vec_len;
70 int vec_stride;
2c0262af
FB
71} DisasContext;
72
e12ce78d
PM
73static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
74
b5ff1b31
FB
75#if defined(CONFIG_USER_ONLY)
76#define IS_USER(s) 1
77#else
78#define IS_USER(s) (s->user)
79#endif
80
9ee6e8bb 81/* These instructions trap after executing, so defer them until after the
b90372ad 82 conditional execution state has been updated. */
9ee6e8bb
PB
83#define DISAS_WFI 4
84#define DISAS_SWI 5
2c0262af 85
a7812ae4 86static TCGv_ptr cpu_env;
ad69471c 87/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 88static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 89static TCGv_i32 cpu_R[16];
66c374de 90static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
91static TCGv_i32 cpu_exclusive_addr;
92static TCGv_i32 cpu_exclusive_val;
93static TCGv_i32 cpu_exclusive_high;
94#ifdef CONFIG_USER_ONLY
95static TCGv_i32 cpu_exclusive_test;
96static TCGv_i32 cpu_exclusive_info;
97#endif
ad69471c 98
b26eefb6 99/* FIXME: These should be removed. */
39d5492a 100static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 101static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 102
022c62cb 103#include "exec/gen-icount.h"
2e70f6ef 104
155c3eac
FN
105static const char *regnames[] =
106 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
107 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
108
b26eefb6
PB
109/* initialize TCG globals. */
110void arm_translate_init(void)
111{
155c3eac
FN
112 int i;
113
a7812ae4
PB
114 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
115
155c3eac
FN
116 for (i = 0; i < 16; i++) {
117 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 118 offsetof(CPUARMState, regs[i]),
155c3eac
FN
119 regnames[i]);
120 }
66c374de
AJ
121 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
122 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
123 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
124 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
125
426f5abc 126 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 128 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 130 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 131 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
132#ifdef CONFIG_USER_ONLY
133 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 135 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 136 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 137#endif
155c3eac 138
a7812ae4 139#define GEN_HELPER 2
7b59220e 140#include "helper.h"
b26eefb6
PB
141}
142
39d5492a 143static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 144{
39d5492a 145 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
146 tcg_gen_ld_i32(tmp, cpu_env, offset);
147 return tmp;
148}
149
0ecb72a5 150#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 151
39d5492a 152static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
153{
154 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 155 tcg_temp_free_i32(var);
d9ba4830
PB
156}
157
158#define store_cpu_field(var, name) \
0ecb72a5 159 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 160
b26eefb6 161/* Set a variable to the value of a CPU register. */
39d5492a 162static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
163{
164 if (reg == 15) {
165 uint32_t addr;
b90372ad 166 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
167 if (s->thumb)
168 addr = (long)s->pc + 2;
169 else
170 addr = (long)s->pc + 4;
171 tcg_gen_movi_i32(var, addr);
172 } else {
155c3eac 173 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
174 }
175}
176
177/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 178static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 179{
39d5492a 180 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
181 load_reg_var(s, tmp, reg);
182 return tmp;
183}
184
185/* Set a CPU register. The source must be a temporary and will be
186 marked as dead. */
39d5492a 187static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
188{
189 if (reg == 15) {
190 tcg_gen_andi_i32(var, var, ~1);
191 s->is_jmp = DISAS_JUMP;
192 }
155c3eac 193 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 194 tcg_temp_free_i32(var);
b26eefb6
PB
195}
196
b26eefb6 197/* Value extensions. */
86831435
PB
198#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
199#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
200#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
201#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
202
1497c961
PB
203#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
204#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 205
b26eefb6 206
39d5492a 207static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 208{
39d5492a 209 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 210 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
211 tcg_temp_free_i32(tmp_mask);
212}
d9ba4830
PB
213/* Set NZCV flags from the high 4 bits of var. */
214#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
215
216static void gen_exception(int excp)
217{
39d5492a 218 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 219 tcg_gen_movi_i32(tmp, excp);
1ce94f81 220 gen_helper_exception(cpu_env, tmp);
7d1b0095 221 tcg_temp_free_i32(tmp);
d9ba4830
PB
222}
223
39d5492a 224static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 225{
39d5492a
PM
226 TCGv_i32 tmp1 = tcg_temp_new_i32();
227 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
228 tcg_gen_ext16s_i32(tmp1, a);
229 tcg_gen_ext16s_i32(tmp2, b);
3670669c 230 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 231 tcg_temp_free_i32(tmp2);
3670669c
PB
232 tcg_gen_sari_i32(a, a, 16);
233 tcg_gen_sari_i32(b, b, 16);
234 tcg_gen_mul_i32(b, b, a);
235 tcg_gen_mov_i32(a, tmp1);
7d1b0095 236 tcg_temp_free_i32(tmp1);
3670669c
PB
237}
238
239/* Byteswap each halfword. */
39d5492a 240static void gen_rev16(TCGv_i32 var)
3670669c 241{
39d5492a 242 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
243 tcg_gen_shri_i32(tmp, var, 8);
244 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
245 tcg_gen_shli_i32(var, var, 8);
246 tcg_gen_andi_i32(var, var, 0xff00ff00);
247 tcg_gen_or_i32(var, var, tmp);
7d1b0095 248 tcg_temp_free_i32(tmp);
3670669c
PB
249}
250
251/* Byteswap low halfword and sign extend. */
39d5492a 252static void gen_revsh(TCGv_i32 var)
3670669c 253{
1a855029
AJ
254 tcg_gen_ext16u_i32(var, var);
255 tcg_gen_bswap16_i32(var, var);
256 tcg_gen_ext16s_i32(var, var);
3670669c
PB
257}
258
259/* Unsigned bitfield extract. */
39d5492a 260static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
261{
262 if (shift)
263 tcg_gen_shri_i32(var, var, shift);
264 tcg_gen_andi_i32(var, var, mask);
265}
266
267/* Signed bitfield extract. */
39d5492a 268static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
269{
270 uint32_t signbit;
271
272 if (shift)
273 tcg_gen_sari_i32(var, var, shift);
274 if (shift + width < 32) {
275 signbit = 1u << (width - 1);
276 tcg_gen_andi_i32(var, var, (1u << width) - 1);
277 tcg_gen_xori_i32(var, var, signbit);
278 tcg_gen_subi_i32(var, var, signbit);
279 }
280}
281
838fa72d 282/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 283static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 284{
838fa72d
AJ
285 TCGv_i64 tmp64 = tcg_temp_new_i64();
286
287 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 288 tcg_temp_free_i32(b);
838fa72d
AJ
289 tcg_gen_shli_i64(tmp64, tmp64, 32);
290 tcg_gen_add_i64(a, tmp64, a);
291
292 tcg_temp_free_i64(tmp64);
293 return a;
294}
295
296/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 297static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
298{
299 TCGv_i64 tmp64 = tcg_temp_new_i64();
300
301 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 302 tcg_temp_free_i32(b);
838fa72d
AJ
303 tcg_gen_shli_i64(tmp64, tmp64, 32);
304 tcg_gen_sub_i64(a, tmp64, a);
305
306 tcg_temp_free_i64(tmp64);
307 return a;
3670669c
PB
308}
309
5e3f878a 310/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 311static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 312{
39d5492a
PM
313 TCGv_i32 lo = tcg_temp_new_i32();
314 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 315 TCGv_i64 ret;
5e3f878a 316
831d7fe8 317 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 318 tcg_temp_free_i32(a);
7d1b0095 319 tcg_temp_free_i32(b);
831d7fe8
RH
320
321 ret = tcg_temp_new_i64();
322 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
323 tcg_temp_free_i32(lo);
324 tcg_temp_free_i32(hi);
831d7fe8
RH
325
326 return ret;
5e3f878a
PB
327}
328
39d5492a 329static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 330{
39d5492a
PM
331 TCGv_i32 lo = tcg_temp_new_i32();
332 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 333 TCGv_i64 ret;
5e3f878a 334
831d7fe8 335 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 336 tcg_temp_free_i32(a);
7d1b0095 337 tcg_temp_free_i32(b);
831d7fe8
RH
338
339 ret = tcg_temp_new_i64();
340 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
341 tcg_temp_free_i32(lo);
342 tcg_temp_free_i32(hi);
831d7fe8
RH
343
344 return ret;
5e3f878a
PB
345}
346
8f01245e 347/* Swap low and high halfwords. */
39d5492a 348static void gen_swap_half(TCGv_i32 var)
8f01245e 349{
39d5492a 350 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
351 tcg_gen_shri_i32(tmp, var, 16);
352 tcg_gen_shli_i32(var, var, 16);
353 tcg_gen_or_i32(var, var, tmp);
7d1b0095 354 tcg_temp_free_i32(tmp);
8f01245e
PB
355}
356
b26eefb6
PB
357/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
358 tmp = (t0 ^ t1) & 0x8000;
359 t0 &= ~0x8000;
360 t1 &= ~0x8000;
361 t0 = (t0 + t1) ^ tmp;
362 */
363
39d5492a 364static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 365{
39d5492a 366 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
367 tcg_gen_xor_i32(tmp, t0, t1);
368 tcg_gen_andi_i32(tmp, tmp, 0x8000);
369 tcg_gen_andi_i32(t0, t0, ~0x8000);
370 tcg_gen_andi_i32(t1, t1, ~0x8000);
371 tcg_gen_add_i32(t0, t0, t1);
372 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
373 tcg_temp_free_i32(tmp);
374 tcg_temp_free_i32(t1);
b26eefb6
PB
375}
376
377/* Set CF to the top bit of var. */
39d5492a 378static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 379{
66c374de 380 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
381}
382
383/* Set N and Z flags from var. */
39d5492a 384static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 385{
66c374de
AJ
386 tcg_gen_mov_i32(cpu_NF, var);
387 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
388}
389
390/* T0 += T1 + CF. */
39d5492a 391static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 392{
396e467c 393 tcg_gen_add_i32(t0, t0, t1);
66c374de 394 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
395}
396
e9bb4aa9 397/* dest = T0 + T1 + CF. */
39d5492a 398static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 399{
e9bb4aa9 400 tcg_gen_add_i32(dest, t0, t1);
66c374de 401 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
402}
403
3670669c 404/* dest = T0 - T1 + CF - 1. */
39d5492a 405static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 406{
3670669c 407 tcg_gen_sub_i32(dest, t0, t1);
66c374de 408 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 409 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
410}
411
72485ec4 412/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 413static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 414{
39d5492a 415 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
416 tcg_gen_movi_i32(tmp, 0);
417 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 418 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 419 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
420 tcg_gen_xor_i32(tmp, t0, t1);
421 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
422 tcg_temp_free_i32(tmp);
423 tcg_gen_mov_i32(dest, cpu_NF);
424}
425
49b4c31e 426/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 427static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 428{
39d5492a 429 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
430 if (TCG_TARGET_HAS_add2_i32) {
431 tcg_gen_movi_i32(tmp, 0);
432 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 433 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
434 } else {
435 TCGv_i64 q0 = tcg_temp_new_i64();
436 TCGv_i64 q1 = tcg_temp_new_i64();
437 tcg_gen_extu_i32_i64(q0, t0);
438 tcg_gen_extu_i32_i64(q1, t1);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extu_i32_i64(q1, cpu_CF);
441 tcg_gen_add_i64(q0, q0, q1);
442 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
443 tcg_temp_free_i64(q0);
444 tcg_temp_free_i64(q1);
445 }
446 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
447 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
448 tcg_gen_xor_i32(tmp, t0, t1);
449 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
450 tcg_temp_free_i32(tmp);
451 tcg_gen_mov_i32(dest, cpu_NF);
452}
453
72485ec4 454/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 455static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 456{
39d5492a 457 TCGv_i32 tmp;
72485ec4
AJ
458 tcg_gen_sub_i32(cpu_NF, t0, t1);
459 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
460 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
461 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
462 tmp = tcg_temp_new_i32();
463 tcg_gen_xor_i32(tmp, t0, t1);
464 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
465 tcg_temp_free_i32(tmp);
466 tcg_gen_mov_i32(dest, cpu_NF);
467}
468
e77f0832 469/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 470static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 471{
39d5492a 472 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
473 tcg_gen_not_i32(tmp, t1);
474 gen_adc_CC(dest, t0, tmp);
39d5492a 475 tcg_temp_free_i32(tmp);
2de68a49
RH
476}
477
365af80e 478#define GEN_SHIFT(name) \
39d5492a 479static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 480{ \
39d5492a 481 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
482 tmp1 = tcg_temp_new_i32(); \
483 tcg_gen_andi_i32(tmp1, t1, 0xff); \
484 tmp2 = tcg_const_i32(0); \
485 tmp3 = tcg_const_i32(0x1f); \
486 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
487 tcg_temp_free_i32(tmp3); \
488 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
489 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
490 tcg_temp_free_i32(tmp2); \
491 tcg_temp_free_i32(tmp1); \
492}
493GEN_SHIFT(shl)
494GEN_SHIFT(shr)
495#undef GEN_SHIFT
496
39d5492a 497static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 498{
39d5492a 499 TCGv_i32 tmp1, tmp2;
365af80e
AJ
500 tmp1 = tcg_temp_new_i32();
501 tcg_gen_andi_i32(tmp1, t1, 0xff);
502 tmp2 = tcg_const_i32(0x1f);
503 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
504 tcg_temp_free_i32(tmp2);
505 tcg_gen_sar_i32(dest, t0, tmp1);
506 tcg_temp_free_i32(tmp1);
507}
508
39d5492a 509static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 510{
39d5492a
PM
511 TCGv_i32 c0 = tcg_const_i32(0);
512 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
513 tcg_gen_neg_i32(tmp, src);
514 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
515 tcg_temp_free_i32(c0);
516 tcg_temp_free_i32(tmp);
517}
ad69471c 518
39d5492a 519static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 520{
9a119ff6 521 if (shift == 0) {
66c374de 522 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 523 } else {
66c374de
AJ
524 tcg_gen_shri_i32(cpu_CF, var, shift);
525 if (shift != 31) {
526 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
527 }
9a119ff6 528 }
9a119ff6 529}
b26eefb6 530
9a119ff6 531/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
532static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
533 int shift, int flags)
9a119ff6
PB
534{
535 switch (shiftop) {
536 case 0: /* LSL */
537 if (shift != 0) {
538 if (flags)
539 shifter_out_im(var, 32 - shift);
540 tcg_gen_shli_i32(var, var, shift);
541 }
542 break;
543 case 1: /* LSR */
544 if (shift == 0) {
545 if (flags) {
66c374de 546 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
547 }
548 tcg_gen_movi_i32(var, 0);
549 } else {
550 if (flags)
551 shifter_out_im(var, shift - 1);
552 tcg_gen_shri_i32(var, var, shift);
553 }
554 break;
555 case 2: /* ASR */
556 if (shift == 0)
557 shift = 32;
558 if (flags)
559 shifter_out_im(var, shift - 1);
560 if (shift == 32)
561 shift = 31;
562 tcg_gen_sari_i32(var, var, shift);
563 break;
564 case 3: /* ROR/RRX */
565 if (shift != 0) {
566 if (flags)
567 shifter_out_im(var, shift - 1);
f669df27 568 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 569 } else {
39d5492a 570 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 571 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
572 if (flags)
573 shifter_out_im(var, 0);
574 tcg_gen_shri_i32(var, var, 1);
b26eefb6 575 tcg_gen_or_i32(var, var, tmp);
7d1b0095 576 tcg_temp_free_i32(tmp);
b26eefb6
PB
577 }
578 }
579};
580
39d5492a
PM
581static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
582 TCGv_i32 shift, int flags)
8984bd2e
PB
583{
584 if (flags) {
585 switch (shiftop) {
9ef39277
BS
586 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
587 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
588 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
589 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
590 }
591 } else {
592 switch (shiftop) {
365af80e
AJ
593 case 0:
594 gen_shl(var, var, shift);
595 break;
596 case 1:
597 gen_shr(var, var, shift);
598 break;
599 case 2:
600 gen_sar(var, var, shift);
601 break;
f669df27
AJ
602 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
603 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
604 }
605 }
7d1b0095 606 tcg_temp_free_i32(shift);
8984bd2e
PB
607}
608
6ddbc6e4
PB
609#define PAS_OP(pfx) \
610 switch (op2) { \
611 case 0: gen_pas_helper(glue(pfx,add16)); break; \
612 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
613 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
614 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
615 case 4: gen_pas_helper(glue(pfx,add8)); break; \
616 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
617 }
39d5492a 618static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 619{
a7812ae4 620 TCGv_ptr tmp;
6ddbc6e4
PB
621
622 switch (op1) {
623#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
624 case 1:
a7812ae4 625 tmp = tcg_temp_new_ptr();
0ecb72a5 626 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 627 PAS_OP(s)
b75263d6 628 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
629 break;
630 case 5:
a7812ae4 631 tmp = tcg_temp_new_ptr();
0ecb72a5 632 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 633 PAS_OP(u)
b75263d6 634 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
635 break;
636#undef gen_pas_helper
637#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
638 case 2:
639 PAS_OP(q);
640 break;
641 case 3:
642 PAS_OP(sh);
643 break;
644 case 6:
645 PAS_OP(uq);
646 break;
647 case 7:
648 PAS_OP(uh);
649 break;
650#undef gen_pas_helper
651 }
652}
9ee6e8bb
PB
653#undef PAS_OP
654
6ddbc6e4
PB
655/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
656#define PAS_OP(pfx) \
ed89a2f1 657 switch (op1) { \
6ddbc6e4
PB
658 case 0: gen_pas_helper(glue(pfx,add8)); break; \
659 case 1: gen_pas_helper(glue(pfx,add16)); break; \
660 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
661 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
662 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
663 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
664 }
39d5492a 665static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 666{
a7812ae4 667 TCGv_ptr tmp;
6ddbc6e4 668
ed89a2f1 669 switch (op2) {
6ddbc6e4
PB
670#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
671 case 0:
a7812ae4 672 tmp = tcg_temp_new_ptr();
0ecb72a5 673 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 674 PAS_OP(s)
b75263d6 675 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
676 break;
677 case 4:
a7812ae4 678 tmp = tcg_temp_new_ptr();
0ecb72a5 679 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 680 PAS_OP(u)
b75263d6 681 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
682 break;
683#undef gen_pas_helper
684#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
685 case 1:
686 PAS_OP(q);
687 break;
688 case 2:
689 PAS_OP(sh);
690 break;
691 case 5:
692 PAS_OP(uq);
693 break;
694 case 6:
695 PAS_OP(uh);
696 break;
697#undef gen_pas_helper
698 }
699}
9ee6e8bb
PB
700#undef PAS_OP
701
d9ba4830
PB
702static void gen_test_cc(int cc, int label)
703{
39d5492a 704 TCGv_i32 tmp;
d9ba4830
PB
705 int inv;
706
d9ba4830
PB
707 switch (cc) {
708 case 0: /* eq: Z */
66c374de 709 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
710 break;
711 case 1: /* ne: !Z */
66c374de 712 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
713 break;
714 case 2: /* cs: C */
66c374de 715 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
716 break;
717 case 3: /* cc: !C */
66c374de 718 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
719 break;
720 case 4: /* mi: N */
66c374de 721 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
722 break;
723 case 5: /* pl: !N */
66c374de 724 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
725 break;
726 case 6: /* vs: V */
66c374de 727 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
728 break;
729 case 7: /* vc: !V */
66c374de 730 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
731 break;
732 case 8: /* hi: C && !Z */
733 inv = gen_new_label();
66c374de
AJ
734 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
735 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
736 gen_set_label(inv);
737 break;
738 case 9: /* ls: !C || Z */
66c374de
AJ
739 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
740 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
741 break;
742 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
743 tmp = tcg_temp_new_i32();
744 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 745 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 746 tcg_temp_free_i32(tmp);
d9ba4830
PB
747 break;
748 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
749 tmp = tcg_temp_new_i32();
750 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 751 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 752 tcg_temp_free_i32(tmp);
d9ba4830
PB
753 break;
754 case 12: /* gt: !Z && N == V */
755 inv = gen_new_label();
66c374de
AJ
756 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
757 tmp = tcg_temp_new_i32();
758 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 759 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 760 tcg_temp_free_i32(tmp);
d9ba4830
PB
761 gen_set_label(inv);
762 break;
763 case 13: /* le: Z || N != V */
66c374de
AJ
764 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
765 tmp = tcg_temp_new_i32();
766 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 767 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 768 tcg_temp_free_i32(tmp);
d9ba4830
PB
769 break;
770 default:
771 fprintf(stderr, "Bad condition code 0x%x\n", cc);
772 abort();
773 }
d9ba4830 774}
2c0262af 775
b1d8e52e 776static const uint8_t table_logic_cc[16] = {
2c0262af
FB
777 1, /* and */
778 1, /* xor */
779 0, /* sub */
780 0, /* rsb */
781 0, /* add */
782 0, /* adc */
783 0, /* sbc */
784 0, /* rsc */
785 1, /* andl */
786 1, /* xorl */
787 0, /* cmp */
788 0, /* cmn */
789 1, /* orr */
790 1, /* mov */
791 1, /* bic */
792 1, /* mvn */
793};
3b46e624 794
d9ba4830
PB
795/* Set PC and Thumb state from an immediate address. */
796static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 797{
39d5492a 798 TCGv_i32 tmp;
99c475ab 799
b26eefb6 800 s->is_jmp = DISAS_UPDATE;
d9ba4830 801 if (s->thumb != (addr & 1)) {
7d1b0095 802 tmp = tcg_temp_new_i32();
d9ba4830 803 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 804 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 805 tcg_temp_free_i32(tmp);
d9ba4830 806 }
155c3eac 807 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
808}
809
810/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 811static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 812{
d9ba4830 813 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
814 tcg_gen_andi_i32(cpu_R[15], var, ~1);
815 tcg_gen_andi_i32(var, var, 1);
816 store_cpu_field(var, thumb);
d9ba4830
PB
817}
818
21aeb343
JR
819/* Variant of store_reg which uses branch&exchange logic when storing
820 to r15 in ARM architecture v7 and above. The source must be a temporary
821 and will be marked as dead. */
0ecb72a5 822static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 823 int reg, TCGv_i32 var)
21aeb343
JR
824{
825 if (reg == 15 && ENABLE_ARCH_7) {
826 gen_bx(s, var);
827 } else {
828 store_reg(s, reg, var);
829 }
830}
831
be5e7a76
DES
832/* Variant of store_reg which uses branch&exchange logic when storing
833 * to r15 in ARM architecture v5T and above. This is used for storing
834 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
835 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 836static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 837 int reg, TCGv_i32 var)
be5e7a76
DES
838{
839 if (reg == 15 && ENABLE_ARCH_5) {
840 gen_bx(s, var);
841 } else {
842 store_reg(s, reg, var);
843 }
844}
845
08307563
PM
846/* Abstractions of "generate code to do a guest load/store for
847 * AArch32", where a vaddr is always 32 bits (and is zero
848 * extended if we're a 64 bit core) and data is also
849 * 32 bits unless specifically doing a 64 bit access.
850 * These functions work like tcg_gen_qemu_{ld,st}* except
851 * that their arguments are TCGv_i32 rather than TCGv.
852 */
853#if TARGET_LONG_BITS == 32
854
855#define DO_GEN_LD(OP) \
856static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
857{ \
858 tcg_gen_qemu_##OP(val, addr, index); \
859}
860
861#define DO_GEN_ST(OP) \
862static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
863{ \
864 tcg_gen_qemu_##OP(val, addr, index); \
865}
866
867static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
868{
869 tcg_gen_qemu_ld64(val, addr, index);
870}
871
872static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
873{
874 tcg_gen_qemu_st64(val, addr, index);
875}
876
877#else
878
879#define DO_GEN_LD(OP) \
880static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
881{ \
882 TCGv addr64 = tcg_temp_new(); \
883 TCGv val64 = tcg_temp_new(); \
884 tcg_gen_extu_i32_i64(addr64, addr); \
885 tcg_gen_qemu_##OP(val64, addr64, index); \
886 tcg_temp_free(addr64); \
887 tcg_gen_trunc_i64_i32(val, val64); \
888 tcg_temp_free(val64); \
889}
890
891#define DO_GEN_ST(OP) \
892static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
893{ \
894 TCGv addr64 = tcg_temp_new(); \
895 TCGv val64 = tcg_temp_new(); \
896 tcg_gen_extu_i32_i64(addr64, addr); \
897 tcg_gen_extu_i32_i64(val64, val); \
898 tcg_gen_qemu_##OP(val64, addr64, index); \
899 tcg_temp_free(addr64); \
900 tcg_temp_free(val64); \
901}
902
903static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
904{
905 TCGv addr64 = tcg_temp_new();
906 tcg_gen_extu_i32_i64(addr64, addr);
907 tcg_gen_qemu_ld64(val, addr64, index);
908 tcg_temp_free(addr64);
909}
910
911static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
912{
913 TCGv addr64 = tcg_temp_new();
914 tcg_gen_extu_i32_i64(addr64, addr);
915 tcg_gen_qemu_st64(val, addr64, index);
916 tcg_temp_free(addr64);
917}
918
919#endif
920
921DO_GEN_LD(ld8s)
922DO_GEN_LD(ld8u)
923DO_GEN_LD(ld16s)
924DO_GEN_LD(ld16u)
925DO_GEN_LD(ld32u)
926DO_GEN_ST(st8)
927DO_GEN_ST(st16)
928DO_GEN_ST(st32)
929
5e3f878a
PB
930static inline void gen_set_pc_im(uint32_t val)
931{
155c3eac 932 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
933}
934
b5ff1b31
FB
935/* Force a TB lookup after an instruction that changes the CPU state. */
936static inline void gen_lookup_tb(DisasContext *s)
937{
a6445c52 938 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
939 s->is_jmp = DISAS_UPDATE;
940}
941
b0109805 942static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 943 TCGv_i32 var)
2c0262af 944{
1e8d4eec 945 int val, rm, shift, shiftop;
39d5492a 946 TCGv_i32 offset;
2c0262af
FB
947
948 if (!(insn & (1 << 25))) {
949 /* immediate */
950 val = insn & 0xfff;
951 if (!(insn & (1 << 23)))
952 val = -val;
537730b9 953 if (val != 0)
b0109805 954 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
955 } else {
956 /* shift/register */
957 rm = (insn) & 0xf;
958 shift = (insn >> 7) & 0x1f;
1e8d4eec 959 shiftop = (insn >> 5) & 3;
b26eefb6 960 offset = load_reg(s, rm);
9a119ff6 961 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 962 if (!(insn & (1 << 23)))
b0109805 963 tcg_gen_sub_i32(var, var, offset);
2c0262af 964 else
b0109805 965 tcg_gen_add_i32(var, var, offset);
7d1b0095 966 tcg_temp_free_i32(offset);
2c0262af
FB
967 }
968}
969
191f9a93 970static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 971 int extra, TCGv_i32 var)
2c0262af
FB
972{
973 int val, rm;
39d5492a 974 TCGv_i32 offset;
3b46e624 975
2c0262af
FB
976 if (insn & (1 << 22)) {
977 /* immediate */
978 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
979 if (!(insn & (1 << 23)))
980 val = -val;
18acad92 981 val += extra;
537730b9 982 if (val != 0)
b0109805 983 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
984 } else {
985 /* register */
191f9a93 986 if (extra)
b0109805 987 tcg_gen_addi_i32(var, var, extra);
2c0262af 988 rm = (insn) & 0xf;
b26eefb6 989 offset = load_reg(s, rm);
2c0262af 990 if (!(insn & (1 << 23)))
b0109805 991 tcg_gen_sub_i32(var, var, offset);
2c0262af 992 else
b0109805 993 tcg_gen_add_i32(var, var, offset);
7d1b0095 994 tcg_temp_free_i32(offset);
2c0262af
FB
995 }
996}
997
5aaebd13
PM
998static TCGv_ptr get_fpstatus_ptr(int neon)
999{
1000 TCGv_ptr statusptr = tcg_temp_new_ptr();
1001 int offset;
1002 if (neon) {
0ecb72a5 1003 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1004 } else {
0ecb72a5 1005 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1006 }
1007 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1008 return statusptr;
1009}
1010
4373f3ce
PB
1011#define VFP_OP2(name) \
1012static inline void gen_vfp_##name(int dp) \
1013{ \
ae1857ec
PM
1014 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1015 if (dp) { \
1016 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1017 } else { \
1018 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1019 } \
1020 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1021}
1022
4373f3ce
PB
1023VFP_OP2(add)
1024VFP_OP2(sub)
1025VFP_OP2(mul)
1026VFP_OP2(div)
1027
1028#undef VFP_OP2
1029
605a6aed
PM
1030static inline void gen_vfp_F1_mul(int dp)
1031{
1032 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1033 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1034 if (dp) {
ae1857ec 1035 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1036 } else {
ae1857ec 1037 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1038 }
ae1857ec 1039 tcg_temp_free_ptr(fpst);
605a6aed
PM
1040}
1041
1042static inline void gen_vfp_F1_neg(int dp)
1043{
1044 /* Like gen_vfp_neg() but put result in F1 */
1045 if (dp) {
1046 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1047 } else {
1048 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1049 }
1050}
1051
4373f3ce
PB
1052static inline void gen_vfp_abs(int dp)
1053{
1054 if (dp)
1055 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1056 else
1057 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1058}
1059
1060static inline void gen_vfp_neg(int dp)
1061{
1062 if (dp)
1063 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1064 else
1065 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1066}
1067
1068static inline void gen_vfp_sqrt(int dp)
1069{
1070 if (dp)
1071 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1072 else
1073 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1074}
1075
1076static inline void gen_vfp_cmp(int dp)
1077{
1078 if (dp)
1079 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1080 else
1081 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1082}
1083
1084static inline void gen_vfp_cmpe(int dp)
1085{
1086 if (dp)
1087 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1088 else
1089 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1090}
1091
1092static inline void gen_vfp_F1_ld0(int dp)
1093{
1094 if (dp)
5b340b51 1095 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1096 else
5b340b51 1097 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1098}
1099
5500b06c
PM
1100#define VFP_GEN_ITOF(name) \
1101static inline void gen_vfp_##name(int dp, int neon) \
1102{ \
5aaebd13 1103 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1104 if (dp) { \
1105 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1106 } else { \
1107 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1108 } \
b7fa9214 1109 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1110}
1111
5500b06c
PM
1112VFP_GEN_ITOF(uito)
1113VFP_GEN_ITOF(sito)
1114#undef VFP_GEN_ITOF
4373f3ce 1115
5500b06c
PM
1116#define VFP_GEN_FTOI(name) \
1117static inline void gen_vfp_##name(int dp, int neon) \
1118{ \
5aaebd13 1119 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1120 if (dp) { \
1121 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1122 } else { \
1123 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1124 } \
b7fa9214 1125 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1126}
1127
5500b06c
PM
1128VFP_GEN_FTOI(toui)
1129VFP_GEN_FTOI(touiz)
1130VFP_GEN_FTOI(tosi)
1131VFP_GEN_FTOI(tosiz)
1132#undef VFP_GEN_FTOI
4373f3ce
PB
1133
1134#define VFP_GEN_FIX(name) \
5500b06c 1135static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1136{ \
39d5492a 1137 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1138 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1139 if (dp) { \
1140 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1141 } else { \
1142 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1143 } \
b75263d6 1144 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1145 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1146}
4373f3ce
PB
1147VFP_GEN_FIX(tosh)
1148VFP_GEN_FIX(tosl)
1149VFP_GEN_FIX(touh)
1150VFP_GEN_FIX(toul)
1151VFP_GEN_FIX(shto)
1152VFP_GEN_FIX(slto)
1153VFP_GEN_FIX(uhto)
1154VFP_GEN_FIX(ulto)
1155#undef VFP_GEN_FIX
9ee6e8bb 1156
39d5492a 1157static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1158{
08307563
PM
1159 if (dp) {
1160 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1161 } else {
1162 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1163 }
b5ff1b31
FB
1164}
1165
39d5492a 1166static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1167{
08307563
PM
1168 if (dp) {
1169 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1170 } else {
1171 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1172 }
b5ff1b31
FB
1173}
1174
8e96005d
FB
1175static inline long
1176vfp_reg_offset (int dp, int reg)
1177{
1178 if (dp)
1179 return offsetof(CPUARMState, vfp.regs[reg]);
1180 else if (reg & 1) {
1181 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1182 + offsetof(CPU_DoubleU, l.upper);
1183 } else {
1184 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1185 + offsetof(CPU_DoubleU, l.lower);
1186 }
1187}
9ee6e8bb
PB
1188
1189/* Return the offset of a 32-bit piece of a NEON register.
1190 zero is the least significant end of the register. */
1191static inline long
1192neon_reg_offset (int reg, int n)
1193{
1194 int sreg;
1195 sreg = reg * 2 + n;
1196 return vfp_reg_offset(0, sreg);
1197}
1198
39d5492a 1199static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1200{
39d5492a 1201 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1202 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1203 return tmp;
1204}
1205
39d5492a 1206static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1207{
1208 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1209 tcg_temp_free_i32(var);
8f8e3aa4
PB
1210}
1211
a7812ae4 1212static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1213{
1214 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1215}
1216
a7812ae4 1217static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1218{
1219 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1220}
1221
4373f3ce
PB
1222#define tcg_gen_ld_f32 tcg_gen_ld_i32
1223#define tcg_gen_ld_f64 tcg_gen_ld_i64
1224#define tcg_gen_st_f32 tcg_gen_st_i32
1225#define tcg_gen_st_f64 tcg_gen_st_i64
1226
b7bcbe95
FB
1227static inline void gen_mov_F0_vreg(int dp, int reg)
1228{
1229 if (dp)
4373f3ce 1230 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1231 else
4373f3ce 1232 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1233}
1234
1235static inline void gen_mov_F1_vreg(int dp, int reg)
1236{
1237 if (dp)
4373f3ce 1238 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1239 else
4373f3ce 1240 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1241}
1242
1243static inline void gen_mov_vreg_F0(int dp, int reg)
1244{
1245 if (dp)
4373f3ce 1246 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1247 else
4373f3ce 1248 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1249}
1250
18c9b560
AZ
1251#define ARM_CP_RW_BIT (1 << 20)
1252
a7812ae4 1253static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1254{
0ecb72a5 1255 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1256}
1257
a7812ae4 1258static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1259{
0ecb72a5 1260 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1261}
1262
39d5492a 1263static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1264{
39d5492a 1265 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1266 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1267 return var;
e677137d
PB
1268}
1269
39d5492a 1270static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1271{
0ecb72a5 1272 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1273 tcg_temp_free_i32(var);
e677137d
PB
1274}
1275
1276static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1277{
1278 iwmmxt_store_reg(cpu_M0, rn);
1279}
1280
1281static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1282{
1283 iwmmxt_load_reg(cpu_M0, rn);
1284}
1285
1286static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1287{
1288 iwmmxt_load_reg(cpu_V1, rn);
1289 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1290}
1291
1292static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1293{
1294 iwmmxt_load_reg(cpu_V1, rn);
1295 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1296}
1297
1298static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1299{
1300 iwmmxt_load_reg(cpu_V1, rn);
1301 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1302}
1303
1304#define IWMMXT_OP(name) \
1305static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1306{ \
1307 iwmmxt_load_reg(cpu_V1, rn); \
1308 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1309}
1310
477955bd
PM
1311#define IWMMXT_OP_ENV(name) \
1312static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1313{ \
1314 iwmmxt_load_reg(cpu_V1, rn); \
1315 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1316}
1317
1318#define IWMMXT_OP_ENV_SIZE(name) \
1319IWMMXT_OP_ENV(name##b) \
1320IWMMXT_OP_ENV(name##w) \
1321IWMMXT_OP_ENV(name##l)
e677137d 1322
477955bd 1323#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1324static inline void gen_op_iwmmxt_##name##_M0(void) \
1325{ \
477955bd 1326 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1327}
1328
1329IWMMXT_OP(maddsq)
1330IWMMXT_OP(madduq)
1331IWMMXT_OP(sadb)
1332IWMMXT_OP(sadw)
1333IWMMXT_OP(mulslw)
1334IWMMXT_OP(mulshw)
1335IWMMXT_OP(mululw)
1336IWMMXT_OP(muluhw)
1337IWMMXT_OP(macsw)
1338IWMMXT_OP(macuw)
1339
477955bd
PM
1340IWMMXT_OP_ENV_SIZE(unpackl)
1341IWMMXT_OP_ENV_SIZE(unpackh)
1342
1343IWMMXT_OP_ENV1(unpacklub)
1344IWMMXT_OP_ENV1(unpackluw)
1345IWMMXT_OP_ENV1(unpacklul)
1346IWMMXT_OP_ENV1(unpackhub)
1347IWMMXT_OP_ENV1(unpackhuw)
1348IWMMXT_OP_ENV1(unpackhul)
1349IWMMXT_OP_ENV1(unpacklsb)
1350IWMMXT_OP_ENV1(unpacklsw)
1351IWMMXT_OP_ENV1(unpacklsl)
1352IWMMXT_OP_ENV1(unpackhsb)
1353IWMMXT_OP_ENV1(unpackhsw)
1354IWMMXT_OP_ENV1(unpackhsl)
1355
1356IWMMXT_OP_ENV_SIZE(cmpeq)
1357IWMMXT_OP_ENV_SIZE(cmpgtu)
1358IWMMXT_OP_ENV_SIZE(cmpgts)
1359
1360IWMMXT_OP_ENV_SIZE(mins)
1361IWMMXT_OP_ENV_SIZE(minu)
1362IWMMXT_OP_ENV_SIZE(maxs)
1363IWMMXT_OP_ENV_SIZE(maxu)
1364
1365IWMMXT_OP_ENV_SIZE(subn)
1366IWMMXT_OP_ENV_SIZE(addn)
1367IWMMXT_OP_ENV_SIZE(subu)
1368IWMMXT_OP_ENV_SIZE(addu)
1369IWMMXT_OP_ENV_SIZE(subs)
1370IWMMXT_OP_ENV_SIZE(adds)
1371
1372IWMMXT_OP_ENV(avgb0)
1373IWMMXT_OP_ENV(avgb1)
1374IWMMXT_OP_ENV(avgw0)
1375IWMMXT_OP_ENV(avgw1)
e677137d
PB
1376
1377IWMMXT_OP(msadb)
1378
477955bd
PM
1379IWMMXT_OP_ENV(packuw)
1380IWMMXT_OP_ENV(packul)
1381IWMMXT_OP_ENV(packuq)
1382IWMMXT_OP_ENV(packsw)
1383IWMMXT_OP_ENV(packsl)
1384IWMMXT_OP_ENV(packsq)
e677137d 1385
e677137d
PB
1386static void gen_op_iwmmxt_set_mup(void)
1387{
39d5492a 1388 TCGv_i32 tmp;
e677137d
PB
1389 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1390 tcg_gen_ori_i32(tmp, tmp, 2);
1391 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1392}
1393
1394static void gen_op_iwmmxt_set_cup(void)
1395{
39d5492a 1396 TCGv_i32 tmp;
e677137d
PB
1397 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1398 tcg_gen_ori_i32(tmp, tmp, 1);
1399 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400}
1401
1402static void gen_op_iwmmxt_setpsr_nz(void)
1403{
39d5492a 1404 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1405 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1406 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1407}
1408
1409static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1410{
1411 iwmmxt_load_reg(cpu_V1, rn);
86831435 1412 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1413 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1414}
1415
39d5492a
PM
1416static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1417 TCGv_i32 dest)
18c9b560
AZ
1418{
1419 int rd;
1420 uint32_t offset;
39d5492a 1421 TCGv_i32 tmp;
18c9b560
AZ
1422
1423 rd = (insn >> 16) & 0xf;
da6b5335 1424 tmp = load_reg(s, rd);
18c9b560
AZ
1425
1426 offset = (insn & 0xff) << ((insn >> 7) & 2);
1427 if (insn & (1 << 24)) {
1428 /* Pre indexed */
1429 if (insn & (1 << 23))
da6b5335 1430 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1431 else
da6b5335
FN
1432 tcg_gen_addi_i32(tmp, tmp, -offset);
1433 tcg_gen_mov_i32(dest, tmp);
18c9b560 1434 if (insn & (1 << 21))
da6b5335
FN
1435 store_reg(s, rd, tmp);
1436 else
7d1b0095 1437 tcg_temp_free_i32(tmp);
18c9b560
AZ
1438 } else if (insn & (1 << 21)) {
1439 /* Post indexed */
da6b5335 1440 tcg_gen_mov_i32(dest, tmp);
18c9b560 1441 if (insn & (1 << 23))
da6b5335 1442 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1443 else
da6b5335
FN
1444 tcg_gen_addi_i32(tmp, tmp, -offset);
1445 store_reg(s, rd, tmp);
18c9b560
AZ
1446 } else if (!(insn & (1 << 23)))
1447 return 1;
1448 return 0;
1449}
1450
39d5492a 1451static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1452{
1453 int rd = (insn >> 0) & 0xf;
39d5492a 1454 TCGv_i32 tmp;
18c9b560 1455
da6b5335
FN
1456 if (insn & (1 << 8)) {
1457 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1458 return 1;
da6b5335
FN
1459 } else {
1460 tmp = iwmmxt_load_creg(rd);
1461 }
1462 } else {
7d1b0095 1463 tmp = tcg_temp_new_i32();
da6b5335
FN
1464 iwmmxt_load_reg(cpu_V0, rd);
1465 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1466 }
1467 tcg_gen_andi_i32(tmp, tmp, mask);
1468 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1469 tcg_temp_free_i32(tmp);
18c9b560
AZ
1470 return 0;
1471}
1472
a1c7273b 1473/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1474 (ie. an undefined instruction). */
0ecb72a5 1475static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1476{
1477 int rd, wrd;
1478 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1479 TCGv_i32 addr;
1480 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1481
1482 if ((insn & 0x0e000e00) == 0x0c000000) {
1483 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1484 wrd = insn & 0xf;
1485 rdlo = (insn >> 12) & 0xf;
1486 rdhi = (insn >> 16) & 0xf;
1487 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1488 iwmmxt_load_reg(cpu_V0, wrd);
1489 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1490 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1491 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1492 } else { /* TMCRR */
da6b5335
FN
1493 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1494 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1495 gen_op_iwmmxt_set_mup();
1496 }
1497 return 0;
1498 }
1499
1500 wrd = (insn >> 12) & 0xf;
7d1b0095 1501 addr = tcg_temp_new_i32();
da6b5335 1502 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1503 tcg_temp_free_i32(addr);
18c9b560 1504 return 1;
da6b5335 1505 }
18c9b560
AZ
1506 if (insn & ARM_CP_RW_BIT) {
1507 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1508 tmp = tcg_temp_new_i32();
08307563 1509 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1510 iwmmxt_store_creg(wrd, tmp);
18c9b560 1511 } else {
e677137d
PB
1512 i = 1;
1513 if (insn & (1 << 8)) {
1514 if (insn & (1 << 22)) { /* WLDRD */
08307563 1515 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1516 i = 0;
1517 } else { /* WLDRW wRd */
29531141 1518 tmp = tcg_temp_new_i32();
08307563 1519 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1520 }
1521 } else {
29531141 1522 tmp = tcg_temp_new_i32();
e677137d 1523 if (insn & (1 << 22)) { /* WLDRH */
08307563 1524 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1525 } else { /* WLDRB */
08307563 1526 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1527 }
1528 }
1529 if (i) {
1530 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1531 tcg_temp_free_i32(tmp);
e677137d 1532 }
18c9b560
AZ
1533 gen_op_iwmmxt_movq_wRn_M0(wrd);
1534 }
1535 } else {
1536 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1537 tmp = iwmmxt_load_creg(wrd);
08307563 1538 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1539 } else {
1540 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1541 tmp = tcg_temp_new_i32();
e677137d
PB
1542 if (insn & (1 << 8)) {
1543 if (insn & (1 << 22)) { /* WSTRD */
08307563 1544 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1545 } else { /* WSTRW wRd */
1546 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1547 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1548 }
1549 } else {
1550 if (insn & (1 << 22)) { /* WSTRH */
1551 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1552 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1553 } else { /* WSTRB */
1554 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1555 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1556 }
1557 }
18c9b560 1558 }
29531141 1559 tcg_temp_free_i32(tmp);
18c9b560 1560 }
7d1b0095 1561 tcg_temp_free_i32(addr);
18c9b560
AZ
1562 return 0;
1563 }
1564
1565 if ((insn & 0x0f000000) != 0x0e000000)
1566 return 1;
1567
1568 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1569 case 0x000: /* WOR */
1570 wrd = (insn >> 12) & 0xf;
1571 rd0 = (insn >> 0) & 0xf;
1572 rd1 = (insn >> 16) & 0xf;
1573 gen_op_iwmmxt_movq_M0_wRn(rd0);
1574 gen_op_iwmmxt_orq_M0_wRn(rd1);
1575 gen_op_iwmmxt_setpsr_nz();
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x011: /* TMCR */
1581 if (insn & 0xf)
1582 return 1;
1583 rd = (insn >> 12) & 0xf;
1584 wrd = (insn >> 16) & 0xf;
1585 switch (wrd) {
1586 case ARM_IWMMXT_wCID:
1587 case ARM_IWMMXT_wCASF:
1588 break;
1589 case ARM_IWMMXT_wCon:
1590 gen_op_iwmmxt_set_cup();
1591 /* Fall through. */
1592 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1593 tmp = iwmmxt_load_creg(wrd);
1594 tmp2 = load_reg(s, rd);
f669df27 1595 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1596 tcg_temp_free_i32(tmp2);
da6b5335 1597 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1598 break;
1599 case ARM_IWMMXT_wCGR0:
1600 case ARM_IWMMXT_wCGR1:
1601 case ARM_IWMMXT_wCGR2:
1602 case ARM_IWMMXT_wCGR3:
1603 gen_op_iwmmxt_set_cup();
da6b5335
FN
1604 tmp = load_reg(s, rd);
1605 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1606 break;
1607 default:
1608 return 1;
1609 }
1610 break;
1611 case 0x100: /* WXOR */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
1616 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1617 gen_op_iwmmxt_setpsr_nz();
1618 gen_op_iwmmxt_movq_wRn_M0(wrd);
1619 gen_op_iwmmxt_set_mup();
1620 gen_op_iwmmxt_set_cup();
1621 break;
1622 case 0x111: /* TMRC */
1623 if (insn & 0xf)
1624 return 1;
1625 rd = (insn >> 12) & 0xf;
1626 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1627 tmp = iwmmxt_load_creg(wrd);
1628 store_reg(s, rd, tmp);
18c9b560
AZ
1629 break;
1630 case 0x300: /* WANDN */
1631 wrd = (insn >> 12) & 0xf;
1632 rd0 = (insn >> 0) & 0xf;
1633 rd1 = (insn >> 16) & 0xf;
1634 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1635 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1636 gen_op_iwmmxt_andq_M0_wRn(rd1);
1637 gen_op_iwmmxt_setpsr_nz();
1638 gen_op_iwmmxt_movq_wRn_M0(wrd);
1639 gen_op_iwmmxt_set_mup();
1640 gen_op_iwmmxt_set_cup();
1641 break;
1642 case 0x200: /* WAND */
1643 wrd = (insn >> 12) & 0xf;
1644 rd0 = (insn >> 0) & 0xf;
1645 rd1 = (insn >> 16) & 0xf;
1646 gen_op_iwmmxt_movq_M0_wRn(rd0);
1647 gen_op_iwmmxt_andq_M0_wRn(rd1);
1648 gen_op_iwmmxt_setpsr_nz();
1649 gen_op_iwmmxt_movq_wRn_M0(wrd);
1650 gen_op_iwmmxt_set_mup();
1651 gen_op_iwmmxt_set_cup();
1652 break;
1653 case 0x810: case 0xa10: /* WMADD */
1654 wrd = (insn >> 12) & 0xf;
1655 rd0 = (insn >> 0) & 0xf;
1656 rd1 = (insn >> 16) & 0xf;
1657 gen_op_iwmmxt_movq_M0_wRn(rd0);
1658 if (insn & (1 << 21))
1659 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1660 else
1661 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1662 gen_op_iwmmxt_movq_wRn_M0(wrd);
1663 gen_op_iwmmxt_set_mup();
1664 break;
1665 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1666 wrd = (insn >> 12) & 0xf;
1667 rd0 = (insn >> 16) & 0xf;
1668 rd1 = (insn >> 0) & 0xf;
1669 gen_op_iwmmxt_movq_M0_wRn(rd0);
1670 switch ((insn >> 22) & 3) {
1671 case 0:
1672 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1673 break;
1674 case 1:
1675 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1676 break;
1677 case 2:
1678 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1679 break;
1680 case 3:
1681 return 1;
1682 }
1683 gen_op_iwmmxt_movq_wRn_M0(wrd);
1684 gen_op_iwmmxt_set_mup();
1685 gen_op_iwmmxt_set_cup();
1686 break;
1687 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1688 wrd = (insn >> 12) & 0xf;
1689 rd0 = (insn >> 16) & 0xf;
1690 rd1 = (insn >> 0) & 0xf;
1691 gen_op_iwmmxt_movq_M0_wRn(rd0);
1692 switch ((insn >> 22) & 3) {
1693 case 0:
1694 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1695 break;
1696 case 1:
1697 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1698 break;
1699 case 2:
1700 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1701 break;
1702 case 3:
1703 return 1;
1704 }
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 gen_op_iwmmxt_set_cup();
1708 break;
1709 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1710 wrd = (insn >> 12) & 0xf;
1711 rd0 = (insn >> 16) & 0xf;
1712 rd1 = (insn >> 0) & 0xf;
1713 gen_op_iwmmxt_movq_M0_wRn(rd0);
1714 if (insn & (1 << 22))
1715 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1716 else
1717 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1718 if (!(insn & (1 << 20)))
1719 gen_op_iwmmxt_addl_M0_wRn(wrd);
1720 gen_op_iwmmxt_movq_wRn_M0(wrd);
1721 gen_op_iwmmxt_set_mup();
1722 break;
1723 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1724 wrd = (insn >> 12) & 0xf;
1725 rd0 = (insn >> 16) & 0xf;
1726 rd1 = (insn >> 0) & 0xf;
1727 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1728 if (insn & (1 << 21)) {
1729 if (insn & (1 << 20))
1730 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1731 else
1732 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1733 } else {
1734 if (insn & (1 << 20))
1735 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1736 else
1737 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1738 }
18c9b560
AZ
1739 gen_op_iwmmxt_movq_wRn_M0(wrd);
1740 gen_op_iwmmxt_set_mup();
1741 break;
1742 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1743 wrd = (insn >> 12) & 0xf;
1744 rd0 = (insn >> 16) & 0xf;
1745 rd1 = (insn >> 0) & 0xf;
1746 gen_op_iwmmxt_movq_M0_wRn(rd0);
1747 if (insn & (1 << 21))
1748 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1749 else
1750 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1751 if (!(insn & (1 << 20))) {
e677137d
PB
1752 iwmmxt_load_reg(cpu_V1, wrd);
1753 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1754 }
1755 gen_op_iwmmxt_movq_wRn_M0(wrd);
1756 gen_op_iwmmxt_set_mup();
1757 break;
1758 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1759 wrd = (insn >> 12) & 0xf;
1760 rd0 = (insn >> 16) & 0xf;
1761 rd1 = (insn >> 0) & 0xf;
1762 gen_op_iwmmxt_movq_M0_wRn(rd0);
1763 switch ((insn >> 22) & 3) {
1764 case 0:
1765 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1766 break;
1767 case 1:
1768 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1769 break;
1770 case 2:
1771 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1772 break;
1773 case 3:
1774 return 1;
1775 }
1776 gen_op_iwmmxt_movq_wRn_M0(wrd);
1777 gen_op_iwmmxt_set_mup();
1778 gen_op_iwmmxt_set_cup();
1779 break;
1780 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1781 wrd = (insn >> 12) & 0xf;
1782 rd0 = (insn >> 16) & 0xf;
1783 rd1 = (insn >> 0) & 0xf;
1784 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1785 if (insn & (1 << 22)) {
1786 if (insn & (1 << 20))
1787 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1788 else
1789 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1790 } else {
1791 if (insn & (1 << 20))
1792 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1793 else
1794 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1795 }
18c9b560
AZ
1796 gen_op_iwmmxt_movq_wRn_M0(wrd);
1797 gen_op_iwmmxt_set_mup();
1798 gen_op_iwmmxt_set_cup();
1799 break;
1800 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1801 wrd = (insn >> 12) & 0xf;
1802 rd0 = (insn >> 16) & 0xf;
1803 rd1 = (insn >> 0) & 0xf;
1804 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1805 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1806 tcg_gen_andi_i32(tmp, tmp, 7);
1807 iwmmxt_load_reg(cpu_V1, rd1);
1808 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1809 tcg_temp_free_i32(tmp);
18c9b560
AZ
1810 gen_op_iwmmxt_movq_wRn_M0(wrd);
1811 gen_op_iwmmxt_set_mup();
1812 break;
1813 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1814 if (((insn >> 6) & 3) == 3)
1815 return 1;
18c9b560
AZ
1816 rd = (insn >> 12) & 0xf;
1817 wrd = (insn >> 16) & 0xf;
da6b5335 1818 tmp = load_reg(s, rd);
18c9b560
AZ
1819 gen_op_iwmmxt_movq_M0_wRn(wrd);
1820 switch ((insn >> 6) & 3) {
1821 case 0:
da6b5335
FN
1822 tmp2 = tcg_const_i32(0xff);
1823 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1824 break;
1825 case 1:
da6b5335
FN
1826 tmp2 = tcg_const_i32(0xffff);
1827 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1828 break;
1829 case 2:
da6b5335
FN
1830 tmp2 = tcg_const_i32(0xffffffff);
1831 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1832 break;
da6b5335 1833 default:
39d5492a
PM
1834 TCGV_UNUSED_I32(tmp2);
1835 TCGV_UNUSED_I32(tmp3);
18c9b560 1836 }
da6b5335 1837 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1838 tcg_temp_free_i32(tmp3);
1839 tcg_temp_free_i32(tmp2);
7d1b0095 1840 tcg_temp_free_i32(tmp);
18c9b560
AZ
1841 gen_op_iwmmxt_movq_wRn_M0(wrd);
1842 gen_op_iwmmxt_set_mup();
1843 break;
1844 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1845 rd = (insn >> 12) & 0xf;
1846 wrd = (insn >> 16) & 0xf;
da6b5335 1847 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1848 return 1;
1849 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1850 tmp = tcg_temp_new_i32();
18c9b560
AZ
1851 switch ((insn >> 22) & 3) {
1852 case 0:
da6b5335
FN
1853 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1854 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1855 if (insn & 8) {
1856 tcg_gen_ext8s_i32(tmp, tmp);
1857 } else {
1858 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1859 }
1860 break;
1861 case 1:
da6b5335
FN
1862 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1863 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1864 if (insn & 8) {
1865 tcg_gen_ext16s_i32(tmp, tmp);
1866 } else {
1867 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1868 }
1869 break;
1870 case 2:
da6b5335
FN
1871 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1872 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1873 break;
18c9b560 1874 }
da6b5335 1875 store_reg(s, rd, tmp);
18c9b560
AZ
1876 break;
1877 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1878 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1879 return 1;
da6b5335 1880 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1881 switch ((insn >> 22) & 3) {
1882 case 0:
da6b5335 1883 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1884 break;
1885 case 1:
da6b5335 1886 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1887 break;
1888 case 2:
da6b5335 1889 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1890 break;
18c9b560 1891 }
da6b5335
FN
1892 tcg_gen_shli_i32(tmp, tmp, 28);
1893 gen_set_nzcv(tmp);
7d1b0095 1894 tcg_temp_free_i32(tmp);
18c9b560
AZ
1895 break;
1896 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1897 if (((insn >> 6) & 3) == 3)
1898 return 1;
18c9b560
AZ
1899 rd = (insn >> 12) & 0xf;
1900 wrd = (insn >> 16) & 0xf;
da6b5335 1901 tmp = load_reg(s, rd);
18c9b560
AZ
1902 switch ((insn >> 6) & 3) {
1903 case 0:
da6b5335 1904 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1905 break;
1906 case 1:
da6b5335 1907 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1908 break;
1909 case 2:
da6b5335 1910 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1911 break;
18c9b560 1912 }
7d1b0095 1913 tcg_temp_free_i32(tmp);
18c9b560
AZ
1914 gen_op_iwmmxt_movq_wRn_M0(wrd);
1915 gen_op_iwmmxt_set_mup();
1916 break;
1917 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1918 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1919 return 1;
da6b5335 1920 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1921 tmp2 = tcg_temp_new_i32();
da6b5335 1922 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1923 switch ((insn >> 22) & 3) {
1924 case 0:
1925 for (i = 0; i < 7; i ++) {
da6b5335
FN
1926 tcg_gen_shli_i32(tmp2, tmp2, 4);
1927 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1928 }
1929 break;
1930 case 1:
1931 for (i = 0; i < 3; i ++) {
da6b5335
FN
1932 tcg_gen_shli_i32(tmp2, tmp2, 8);
1933 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1934 }
1935 break;
1936 case 2:
da6b5335
FN
1937 tcg_gen_shli_i32(tmp2, tmp2, 16);
1938 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1939 break;
18c9b560 1940 }
da6b5335 1941 gen_set_nzcv(tmp);
7d1b0095
PM
1942 tcg_temp_free_i32(tmp2);
1943 tcg_temp_free_i32(tmp);
18c9b560
AZ
1944 break;
1945 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1946 wrd = (insn >> 12) & 0xf;
1947 rd0 = (insn >> 16) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0);
1949 switch ((insn >> 22) & 3) {
1950 case 0:
e677137d 1951 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1952 break;
1953 case 1:
e677137d 1954 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1955 break;
1956 case 2:
e677137d 1957 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 break;
1965 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1966 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1967 return 1;
da6b5335 1968 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1969 tmp2 = tcg_temp_new_i32();
da6b5335 1970 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1971 switch ((insn >> 22) & 3) {
1972 case 0:
1973 for (i = 0; i < 7; i ++) {
da6b5335
FN
1974 tcg_gen_shli_i32(tmp2, tmp2, 4);
1975 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1976 }
1977 break;
1978 case 1:
1979 for (i = 0; i < 3; i ++) {
da6b5335
FN
1980 tcg_gen_shli_i32(tmp2, tmp2, 8);
1981 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1982 }
1983 break;
1984 case 2:
da6b5335
FN
1985 tcg_gen_shli_i32(tmp2, tmp2, 16);
1986 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1987 break;
18c9b560 1988 }
da6b5335 1989 gen_set_nzcv(tmp);
7d1b0095
PM
1990 tcg_temp_free_i32(tmp2);
1991 tcg_temp_free_i32(tmp);
18c9b560
AZ
1992 break;
1993 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1994 rd = (insn >> 12) & 0xf;
1995 rd0 = (insn >> 16) & 0xf;
da6b5335 1996 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1997 return 1;
1998 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1999 tmp = tcg_temp_new_i32();
18c9b560
AZ
2000 switch ((insn >> 22) & 3) {
2001 case 0:
da6b5335 2002 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2003 break;
2004 case 1:
da6b5335 2005 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2006 break;
2007 case 2:
da6b5335 2008 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2009 break;
18c9b560 2010 }
da6b5335 2011 store_reg(s, rd, tmp);
18c9b560
AZ
2012 break;
2013 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2014 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2015 wrd = (insn >> 12) & 0xf;
2016 rd0 = (insn >> 16) & 0xf;
2017 rd1 = (insn >> 0) & 0xf;
2018 gen_op_iwmmxt_movq_M0_wRn(rd0);
2019 switch ((insn >> 22) & 3) {
2020 case 0:
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2023 else
2024 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2025 break;
2026 case 1:
2027 if (insn & (1 << 21))
2028 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2029 else
2030 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2031 break;
2032 case 2:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2035 else
2036 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2037 break;
2038 case 3:
2039 return 1;
2040 }
2041 gen_op_iwmmxt_movq_wRn_M0(wrd);
2042 gen_op_iwmmxt_set_mup();
2043 gen_op_iwmmxt_set_cup();
2044 break;
2045 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2046 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2047 wrd = (insn >> 12) & 0xf;
2048 rd0 = (insn >> 16) & 0xf;
2049 gen_op_iwmmxt_movq_M0_wRn(rd0);
2050 switch ((insn >> 22) & 3) {
2051 case 0:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpacklsb_M0();
2054 else
2055 gen_op_iwmmxt_unpacklub_M0();
2056 break;
2057 case 1:
2058 if (insn & (1 << 21))
2059 gen_op_iwmmxt_unpacklsw_M0();
2060 else
2061 gen_op_iwmmxt_unpackluw_M0();
2062 break;
2063 case 2:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpacklsl_M0();
2066 else
2067 gen_op_iwmmxt_unpacklul_M0();
2068 break;
2069 case 3:
2070 return 1;
2071 }
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2077 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2078 wrd = (insn >> 12) & 0xf;
2079 rd0 = (insn >> 16) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0);
2081 switch ((insn >> 22) & 3) {
2082 case 0:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_unpackhsb_M0();
2085 else
2086 gen_op_iwmmxt_unpackhub_M0();
2087 break;
2088 case 1:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_unpackhsw_M0();
2091 else
2092 gen_op_iwmmxt_unpackhuw_M0();
2093 break;
2094 case 2:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_unpackhsl_M0();
2097 else
2098 gen_op_iwmmxt_unpackhul_M0();
2099 break;
2100 case 3:
2101 return 1;
2102 }
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 gen_op_iwmmxt_set_cup();
2106 break;
2107 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2108 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2109 if (((insn >> 22) & 3) == 0)
2110 return 1;
18c9b560
AZ
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2114 tmp = tcg_temp_new_i32();
da6b5335 2115 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2116 tcg_temp_free_i32(tmp);
18c9b560 2117 return 1;
da6b5335 2118 }
18c9b560 2119 switch ((insn >> 22) & 3) {
18c9b560 2120 case 1:
477955bd 2121 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2122 break;
2123 case 2:
477955bd 2124 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2125 break;
2126 case 3:
477955bd 2127 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2128 break;
2129 }
7d1b0095 2130 tcg_temp_free_i32(tmp);
18c9b560
AZ
2131 gen_op_iwmmxt_movq_wRn_M0(wrd);
2132 gen_op_iwmmxt_set_mup();
2133 gen_op_iwmmxt_set_cup();
2134 break;
2135 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2136 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2137 if (((insn >> 22) & 3) == 0)
2138 return 1;
18c9b560
AZ
2139 wrd = (insn >> 12) & 0xf;
2140 rd0 = (insn >> 16) & 0xf;
2141 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2142 tmp = tcg_temp_new_i32();
da6b5335 2143 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2144 tcg_temp_free_i32(tmp);
18c9b560 2145 return 1;
da6b5335 2146 }
18c9b560 2147 switch ((insn >> 22) & 3) {
18c9b560 2148 case 1:
477955bd 2149 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2150 break;
2151 case 2:
477955bd 2152 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2153 break;
2154 case 3:
477955bd 2155 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2156 break;
2157 }
7d1b0095 2158 tcg_temp_free_i32(tmp);
18c9b560
AZ
2159 gen_op_iwmmxt_movq_wRn_M0(wrd);
2160 gen_op_iwmmxt_set_mup();
2161 gen_op_iwmmxt_set_cup();
2162 break;
2163 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2164 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2165 if (((insn >> 22) & 3) == 0)
2166 return 1;
18c9b560
AZ
2167 wrd = (insn >> 12) & 0xf;
2168 rd0 = (insn >> 16) & 0xf;
2169 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2170 tmp = tcg_temp_new_i32();
da6b5335 2171 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2172 tcg_temp_free_i32(tmp);
18c9b560 2173 return 1;
da6b5335 2174 }
18c9b560 2175 switch ((insn >> 22) & 3) {
18c9b560 2176 case 1:
477955bd 2177 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2178 break;
2179 case 2:
477955bd 2180 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2181 break;
2182 case 3:
477955bd 2183 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2184 break;
2185 }
7d1b0095 2186 tcg_temp_free_i32(tmp);
18c9b560
AZ
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 gen_op_iwmmxt_set_cup();
2190 break;
2191 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2192 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2193 if (((insn >> 22) & 3) == 0)
2194 return 1;
18c9b560
AZ
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2198 tmp = tcg_temp_new_i32();
18c9b560 2199 switch ((insn >> 22) & 3) {
18c9b560 2200 case 1:
da6b5335 2201 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2202 tcg_temp_free_i32(tmp);
18c9b560 2203 return 1;
da6b5335 2204 }
477955bd 2205 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2206 break;
2207 case 2:
da6b5335 2208 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2209 tcg_temp_free_i32(tmp);
18c9b560 2210 return 1;
da6b5335 2211 }
477955bd 2212 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2213 break;
2214 case 3:
da6b5335 2215 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2216 tcg_temp_free_i32(tmp);
18c9b560 2217 return 1;
da6b5335 2218 }
477955bd 2219 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2220 break;
2221 }
7d1b0095 2222 tcg_temp_free_i32(tmp);
18c9b560
AZ
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 gen_op_iwmmxt_set_cup();
2226 break;
2227 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2228 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2229 wrd = (insn >> 12) & 0xf;
2230 rd0 = (insn >> 16) & 0xf;
2231 rd1 = (insn >> 0) & 0xf;
2232 gen_op_iwmmxt_movq_M0_wRn(rd0);
2233 switch ((insn >> 22) & 3) {
2234 case 0:
2235 if (insn & (1 << 21))
2236 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2237 else
2238 gen_op_iwmmxt_minub_M0_wRn(rd1);
2239 break;
2240 case 1:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2245 break;
2246 case 2:
2247 if (insn & (1 << 21))
2248 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2249 else
2250 gen_op_iwmmxt_minul_M0_wRn(rd1);
2251 break;
2252 case 3:
2253 return 1;
2254 }
2255 gen_op_iwmmxt_movq_wRn_M0(wrd);
2256 gen_op_iwmmxt_set_mup();
2257 break;
2258 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2259 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2260 wrd = (insn >> 12) & 0xf;
2261 rd0 = (insn >> 16) & 0xf;
2262 rd1 = (insn >> 0) & 0xf;
2263 gen_op_iwmmxt_movq_M0_wRn(rd0);
2264 switch ((insn >> 22) & 3) {
2265 case 0:
2266 if (insn & (1 << 21))
2267 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2268 else
2269 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2270 break;
2271 case 1:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2276 break;
2277 case 2:
2278 if (insn & (1 << 21))
2279 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2280 else
2281 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2282 break;
2283 case 3:
2284 return 1;
2285 }
2286 gen_op_iwmmxt_movq_wRn_M0(wrd);
2287 gen_op_iwmmxt_set_mup();
2288 break;
2289 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2290 case 0x402: case 0x502: case 0x602: case 0x702:
2291 wrd = (insn >> 12) & 0xf;
2292 rd0 = (insn >> 16) & 0xf;
2293 rd1 = (insn >> 0) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2295 tmp = tcg_const_i32((insn >> 20) & 3);
2296 iwmmxt_load_reg(cpu_V1, rd1);
2297 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2298 tcg_temp_free_i32(tmp);
18c9b560
AZ
2299 gen_op_iwmmxt_movq_wRn_M0(wrd);
2300 gen_op_iwmmxt_set_mup();
2301 break;
2302 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2303 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2304 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2305 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2306 wrd = (insn >> 12) & 0xf;
2307 rd0 = (insn >> 16) & 0xf;
2308 rd1 = (insn >> 0) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0);
2310 switch ((insn >> 20) & 0xf) {
2311 case 0x0:
2312 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2313 break;
2314 case 0x1:
2315 gen_op_iwmmxt_subub_M0_wRn(rd1);
2316 break;
2317 case 0x3:
2318 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2319 break;
2320 case 0x4:
2321 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2322 break;
2323 case 0x5:
2324 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2325 break;
2326 case 0x7:
2327 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2328 break;
2329 case 0x8:
2330 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2331 break;
2332 case 0x9:
2333 gen_op_iwmmxt_subul_M0_wRn(rd1);
2334 break;
2335 case 0xb:
2336 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2337 break;
2338 default:
2339 return 1;
2340 }
2341 gen_op_iwmmxt_movq_wRn_M0(wrd);
2342 gen_op_iwmmxt_set_mup();
2343 gen_op_iwmmxt_set_cup();
2344 break;
2345 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2346 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2347 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2348 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2349 wrd = (insn >> 12) & 0xf;
2350 rd0 = (insn >> 16) & 0xf;
2351 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2352 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2353 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2354 tcg_temp_free_i32(tmp);
18c9b560
AZ
2355 gen_op_iwmmxt_movq_wRn_M0(wrd);
2356 gen_op_iwmmxt_set_mup();
2357 gen_op_iwmmxt_set_cup();
2358 break;
2359 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2360 case 0x418: case 0x518: case 0x618: case 0x718:
2361 case 0x818: case 0x918: case 0xa18: case 0xb18:
2362 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2363 wrd = (insn >> 12) & 0xf;
2364 rd0 = (insn >> 16) & 0xf;
2365 rd1 = (insn >> 0) & 0xf;
2366 gen_op_iwmmxt_movq_M0_wRn(rd0);
2367 switch ((insn >> 20) & 0xf) {
2368 case 0x0:
2369 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2370 break;
2371 case 0x1:
2372 gen_op_iwmmxt_addub_M0_wRn(rd1);
2373 break;
2374 case 0x3:
2375 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2376 break;
2377 case 0x4:
2378 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2379 break;
2380 case 0x5:
2381 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2382 break;
2383 case 0x7:
2384 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2385 break;
2386 case 0x8:
2387 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2388 break;
2389 case 0x9:
2390 gen_op_iwmmxt_addul_M0_wRn(rd1);
2391 break;
2392 case 0xb:
2393 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2394 break;
2395 default:
2396 return 1;
2397 }
2398 gen_op_iwmmxt_movq_wRn_M0(wrd);
2399 gen_op_iwmmxt_set_mup();
2400 gen_op_iwmmxt_set_cup();
2401 break;
2402 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2403 case 0x408: case 0x508: case 0x608: case 0x708:
2404 case 0x808: case 0x908: case 0xa08: case 0xb08:
2405 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2406 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2407 return 1;
18c9b560
AZ
2408 wrd = (insn >> 12) & 0xf;
2409 rd0 = (insn >> 16) & 0xf;
2410 rd1 = (insn >> 0) & 0xf;
2411 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2412 switch ((insn >> 22) & 3) {
18c9b560
AZ
2413 case 1:
2414 if (insn & (1 << 21))
2415 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2416 else
2417 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2418 break;
2419 case 2:
2420 if (insn & (1 << 21))
2421 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2422 else
2423 gen_op_iwmmxt_packul_M0_wRn(rd1);
2424 break;
2425 case 3:
2426 if (insn & (1 << 21))
2427 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2428 else
2429 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2430 break;
2431 }
2432 gen_op_iwmmxt_movq_wRn_M0(wrd);
2433 gen_op_iwmmxt_set_mup();
2434 gen_op_iwmmxt_set_cup();
2435 break;
2436 case 0x201: case 0x203: case 0x205: case 0x207:
2437 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2438 case 0x211: case 0x213: case 0x215: case 0x217:
2439 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2440 wrd = (insn >> 5) & 0xf;
2441 rd0 = (insn >> 12) & 0xf;
2442 rd1 = (insn >> 0) & 0xf;
2443 if (rd0 == 0xf || rd1 == 0xf)
2444 return 1;
2445 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2446 tmp = load_reg(s, rd0);
2447 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2448 switch ((insn >> 16) & 0xf) {
2449 case 0x0: /* TMIA */
da6b5335 2450 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2451 break;
2452 case 0x8: /* TMIAPH */
da6b5335 2453 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2454 break;
2455 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2456 if (insn & (1 << 16))
da6b5335 2457 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2458 if (insn & (1 << 17))
da6b5335
FN
2459 tcg_gen_shri_i32(tmp2, tmp2, 16);
2460 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2461 break;
2462 default:
7d1b0095
PM
2463 tcg_temp_free_i32(tmp2);
2464 tcg_temp_free_i32(tmp);
18c9b560
AZ
2465 return 1;
2466 }
7d1b0095
PM
2467 tcg_temp_free_i32(tmp2);
2468 tcg_temp_free_i32(tmp);
18c9b560
AZ
2469 gen_op_iwmmxt_movq_wRn_M0(wrd);
2470 gen_op_iwmmxt_set_mup();
2471 break;
2472 default:
2473 return 1;
2474 }
2475
2476 return 0;
2477}
2478
a1c7273b 2479/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2480 (ie. an undefined instruction). */
0ecb72a5 2481static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2482{
2483 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2484 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2485
2486 if ((insn & 0x0ff00f10) == 0x0e200010) {
2487 /* Multiply with Internal Accumulate Format */
2488 rd0 = (insn >> 12) & 0xf;
2489 rd1 = insn & 0xf;
2490 acc = (insn >> 5) & 7;
2491
2492 if (acc != 0)
2493 return 1;
2494
3a554c0f
FN
2495 tmp = load_reg(s, rd0);
2496 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2497 switch ((insn >> 16) & 0xf) {
2498 case 0x0: /* MIA */
3a554c0f 2499 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2500 break;
2501 case 0x8: /* MIAPH */
3a554c0f 2502 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2503 break;
2504 case 0xc: /* MIABB */
2505 case 0xd: /* MIABT */
2506 case 0xe: /* MIATB */
2507 case 0xf: /* MIATT */
18c9b560 2508 if (insn & (1 << 16))
3a554c0f 2509 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2510 if (insn & (1 << 17))
3a554c0f
FN
2511 tcg_gen_shri_i32(tmp2, tmp2, 16);
2512 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2513 break;
2514 default:
2515 return 1;
2516 }
7d1b0095
PM
2517 tcg_temp_free_i32(tmp2);
2518 tcg_temp_free_i32(tmp);
18c9b560
AZ
2519
2520 gen_op_iwmmxt_movq_wRn_M0(acc);
2521 return 0;
2522 }
2523
2524 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2525 /* Internal Accumulator Access Format */
2526 rdhi = (insn >> 16) & 0xf;
2527 rdlo = (insn >> 12) & 0xf;
2528 acc = insn & 7;
2529
2530 if (acc != 0)
2531 return 1;
2532
2533 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2534 iwmmxt_load_reg(cpu_V0, acc);
2535 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2536 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2537 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2538 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2539 } else { /* MAR */
3a554c0f
FN
2540 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2541 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2542 }
2543 return 0;
2544 }
2545
2546 return 1;
2547}
2548
9ee6e8bb
PB
2549#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2550#define VFP_SREG(insn, bigbit, smallbit) \
2551 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2552#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2553 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2554 reg = (((insn) >> (bigbit)) & 0x0f) \
2555 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2556 } else { \
2557 if (insn & (1 << (smallbit))) \
2558 return 1; \
2559 reg = ((insn) >> (bigbit)) & 0x0f; \
2560 }} while (0)
2561
2562#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2563#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2564#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2565#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2566#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2567#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2568
4373f3ce 2569/* Move between integer and VFP cores. */
39d5492a 2570static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2571{
39d5492a 2572 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2573 tcg_gen_mov_i32(tmp, cpu_F0s);
2574 return tmp;
2575}
2576
39d5492a 2577static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2578{
2579 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2580 tcg_temp_free_i32(tmp);
4373f3ce
PB
2581}
2582
39d5492a 2583static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2584{
39d5492a 2585 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2586 if (shift)
2587 tcg_gen_shri_i32(var, var, shift);
86831435 2588 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2589 tcg_gen_shli_i32(tmp, var, 8);
2590 tcg_gen_or_i32(var, var, tmp);
2591 tcg_gen_shli_i32(tmp, var, 16);
2592 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2593 tcg_temp_free_i32(tmp);
ad69471c
PB
2594}
2595
39d5492a 2596static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2597{
39d5492a 2598 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2599 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2600 tcg_gen_shli_i32(tmp, var, 16);
2601 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2602 tcg_temp_free_i32(tmp);
ad69471c
PB
2603}
2604
39d5492a 2605static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2606{
39d5492a 2607 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2608 tcg_gen_andi_i32(var, var, 0xffff0000);
2609 tcg_gen_shri_i32(tmp, var, 16);
2610 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2611 tcg_temp_free_i32(tmp);
ad69471c
PB
2612}
2613
39d5492a 2614static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2615{
2616 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2617 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2618 switch (size) {
2619 case 0:
08307563 2620 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2621 gen_neon_dup_u8(tmp, 0);
2622 break;
2623 case 1:
08307563 2624 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2625 gen_neon_dup_low16(tmp);
2626 break;
2627 case 2:
08307563 2628 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2629 break;
2630 default: /* Avoid compiler warnings. */
2631 abort();
2632 }
2633 return tmp;
2634}
2635
a1c7273b 2636/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2637 (ie. an undefined instruction). */
0ecb72a5 2638static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2639{
2640 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2641 int dp, veclen;
39d5492a
PM
2642 TCGv_i32 addr;
2643 TCGv_i32 tmp;
2644 TCGv_i32 tmp2;
b7bcbe95 2645
40f137e1
PB
2646 if (!arm_feature(env, ARM_FEATURE_VFP))
2647 return 1;
2648
5df8bac1 2649 if (!s->vfp_enabled) {
9ee6e8bb 2650 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2651 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2652 return 1;
2653 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2654 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2655 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2656 return 1;
2657 }
b7bcbe95
FB
2658 dp = ((insn & 0xf00) == 0xb00);
2659 switch ((insn >> 24) & 0xf) {
2660 case 0xe:
2661 if (insn & (1 << 4)) {
2662 /* single register transfer */
b7bcbe95
FB
2663 rd = (insn >> 12) & 0xf;
2664 if (dp) {
9ee6e8bb
PB
2665 int size;
2666 int pass;
2667
2668 VFP_DREG_N(rn, insn);
2669 if (insn & 0xf)
b7bcbe95 2670 return 1;
9ee6e8bb
PB
2671 if (insn & 0x00c00060
2672 && !arm_feature(env, ARM_FEATURE_NEON))
2673 return 1;
2674
2675 pass = (insn >> 21) & 1;
2676 if (insn & (1 << 22)) {
2677 size = 0;
2678 offset = ((insn >> 5) & 3) * 8;
2679 } else if (insn & (1 << 5)) {
2680 size = 1;
2681 offset = (insn & (1 << 6)) ? 16 : 0;
2682 } else {
2683 size = 2;
2684 offset = 0;
2685 }
18c9b560 2686 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2687 /* vfp->arm */
ad69471c 2688 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2689 switch (size) {
2690 case 0:
9ee6e8bb 2691 if (offset)
ad69471c 2692 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2693 if (insn & (1 << 23))
ad69471c 2694 gen_uxtb(tmp);
9ee6e8bb 2695 else
ad69471c 2696 gen_sxtb(tmp);
9ee6e8bb
PB
2697 break;
2698 case 1:
9ee6e8bb
PB
2699 if (insn & (1 << 23)) {
2700 if (offset) {
ad69471c 2701 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2702 } else {
ad69471c 2703 gen_uxth(tmp);
9ee6e8bb
PB
2704 }
2705 } else {
2706 if (offset) {
ad69471c 2707 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2708 } else {
ad69471c 2709 gen_sxth(tmp);
9ee6e8bb
PB
2710 }
2711 }
2712 break;
2713 case 2:
9ee6e8bb
PB
2714 break;
2715 }
ad69471c 2716 store_reg(s, rd, tmp);
b7bcbe95
FB
2717 } else {
2718 /* arm->vfp */
ad69471c 2719 tmp = load_reg(s, rd);
9ee6e8bb
PB
2720 if (insn & (1 << 23)) {
2721 /* VDUP */
2722 if (size == 0) {
ad69471c 2723 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2724 } else if (size == 1) {
ad69471c 2725 gen_neon_dup_low16(tmp);
9ee6e8bb 2726 }
cbbccffc 2727 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2728 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2729 tcg_gen_mov_i32(tmp2, tmp);
2730 neon_store_reg(rn, n, tmp2);
2731 }
2732 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2733 } else {
2734 /* VMOV */
2735 switch (size) {
2736 case 0:
ad69471c 2737 tmp2 = neon_load_reg(rn, pass);
d593c48e 2738 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2739 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2740 break;
2741 case 1:
ad69471c 2742 tmp2 = neon_load_reg(rn, pass);
d593c48e 2743 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2744 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2745 break;
2746 case 2:
9ee6e8bb
PB
2747 break;
2748 }
ad69471c 2749 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2750 }
b7bcbe95 2751 }
9ee6e8bb
PB
2752 } else { /* !dp */
2753 if ((insn & 0x6f) != 0x00)
2754 return 1;
2755 rn = VFP_SREG_N(insn);
18c9b560 2756 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2757 /* vfp->arm */
2758 if (insn & (1 << 21)) {
2759 /* system register */
40f137e1 2760 rn >>= 1;
9ee6e8bb 2761
b7bcbe95 2762 switch (rn) {
40f137e1 2763 case ARM_VFP_FPSID:
4373f3ce 2764 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2765 VFP3 restricts all id registers to privileged
2766 accesses. */
2767 if (IS_USER(s)
2768 && arm_feature(env, ARM_FEATURE_VFP3))
2769 return 1;
4373f3ce 2770 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2771 break;
40f137e1 2772 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2773 if (IS_USER(s))
2774 return 1;
4373f3ce 2775 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2776 break;
40f137e1
PB
2777 case ARM_VFP_FPINST:
2778 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2779 /* Not present in VFP3. */
2780 if (IS_USER(s)
2781 || arm_feature(env, ARM_FEATURE_VFP3))
2782 return 1;
4373f3ce 2783 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2784 break;
40f137e1 2785 case ARM_VFP_FPSCR:
601d70b9 2786 if (rd == 15) {
4373f3ce
PB
2787 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2788 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2789 } else {
7d1b0095 2790 tmp = tcg_temp_new_i32();
4373f3ce
PB
2791 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2792 }
b7bcbe95 2793 break;
9ee6e8bb
PB
2794 case ARM_VFP_MVFR0:
2795 case ARM_VFP_MVFR1:
2796 if (IS_USER(s)
06ed5d66 2797 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2798 return 1;
4373f3ce 2799 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2800 break;
b7bcbe95
FB
2801 default:
2802 return 1;
2803 }
2804 } else {
2805 gen_mov_F0_vreg(0, rn);
4373f3ce 2806 tmp = gen_vfp_mrs();
b7bcbe95
FB
2807 }
2808 if (rd == 15) {
b5ff1b31 2809 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2810 gen_set_nzcv(tmp);
7d1b0095 2811 tcg_temp_free_i32(tmp);
4373f3ce
PB
2812 } else {
2813 store_reg(s, rd, tmp);
2814 }
b7bcbe95
FB
2815 } else {
2816 /* arm->vfp */
b7bcbe95 2817 if (insn & (1 << 21)) {
40f137e1 2818 rn >>= 1;
b7bcbe95
FB
2819 /* system register */
2820 switch (rn) {
40f137e1 2821 case ARM_VFP_FPSID:
9ee6e8bb
PB
2822 case ARM_VFP_MVFR0:
2823 case ARM_VFP_MVFR1:
b7bcbe95
FB
2824 /* Writes are ignored. */
2825 break;
40f137e1 2826 case ARM_VFP_FPSCR:
e4c1cfa5 2827 tmp = load_reg(s, rd);
4373f3ce 2828 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2829 tcg_temp_free_i32(tmp);
b5ff1b31 2830 gen_lookup_tb(s);
b7bcbe95 2831 break;
40f137e1 2832 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2833 if (IS_USER(s))
2834 return 1;
71b3c3de
JR
2835 /* TODO: VFP subarchitecture support.
2836 * For now, keep the EN bit only */
e4c1cfa5 2837 tmp = load_reg(s, rd);
71b3c3de 2838 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2839 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2840 gen_lookup_tb(s);
2841 break;
2842 case ARM_VFP_FPINST:
2843 case ARM_VFP_FPINST2:
e4c1cfa5 2844 tmp = load_reg(s, rd);
4373f3ce 2845 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2846 break;
b7bcbe95
FB
2847 default:
2848 return 1;
2849 }
2850 } else {
e4c1cfa5 2851 tmp = load_reg(s, rd);
4373f3ce 2852 gen_vfp_msr(tmp);
b7bcbe95
FB
2853 gen_mov_vreg_F0(0, rn);
2854 }
2855 }
2856 }
2857 } else {
2858 /* data processing */
2859 /* The opcode is in bits 23, 21, 20 and 6. */
2860 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2861 if (dp) {
2862 if (op == 15) {
2863 /* rn is opcode */
2864 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2865 } else {
2866 /* rn is register number */
9ee6e8bb 2867 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2868 }
2869
04595bf6 2870 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2871 /* Integer or single precision destination. */
9ee6e8bb 2872 rd = VFP_SREG_D(insn);
b7bcbe95 2873 } else {
9ee6e8bb 2874 VFP_DREG_D(rd, insn);
b7bcbe95 2875 }
04595bf6
PM
2876 if (op == 15 &&
2877 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2878 /* VCVT from int is always from S reg regardless of dp bit.
2879 * VCVT with immediate frac_bits has same format as SREG_M
2880 */
2881 rm = VFP_SREG_M(insn);
b7bcbe95 2882 } else {
9ee6e8bb 2883 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2884 }
2885 } else {
9ee6e8bb 2886 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2887 if (op == 15 && rn == 15) {
2888 /* Double precision destination. */
9ee6e8bb
PB
2889 VFP_DREG_D(rd, insn);
2890 } else {
2891 rd = VFP_SREG_D(insn);
2892 }
04595bf6
PM
2893 /* NB that we implicitly rely on the encoding for the frac_bits
2894 * in VCVT of fixed to float being the same as that of an SREG_M
2895 */
9ee6e8bb 2896 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2897 }
2898
69d1fc22 2899 veclen = s->vec_len;
b7bcbe95
FB
2900 if (op == 15 && rn > 3)
2901 veclen = 0;
2902
2903 /* Shut up compiler warnings. */
2904 delta_m = 0;
2905 delta_d = 0;
2906 bank_mask = 0;
3b46e624 2907
b7bcbe95
FB
2908 if (veclen > 0) {
2909 if (dp)
2910 bank_mask = 0xc;
2911 else
2912 bank_mask = 0x18;
2913
2914 /* Figure out what type of vector operation this is. */
2915 if ((rd & bank_mask) == 0) {
2916 /* scalar */
2917 veclen = 0;
2918 } else {
2919 if (dp)
69d1fc22 2920 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2921 else
69d1fc22 2922 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2923
2924 if ((rm & bank_mask) == 0) {
2925 /* mixed scalar/vector */
2926 delta_m = 0;
2927 } else {
2928 /* vector */
2929 delta_m = delta_d;
2930 }
2931 }
2932 }
2933
2934 /* Load the initial operands. */
2935 if (op == 15) {
2936 switch (rn) {
2937 case 16:
2938 case 17:
2939 /* Integer source */
2940 gen_mov_F0_vreg(0, rm);
2941 break;
2942 case 8:
2943 case 9:
2944 /* Compare */
2945 gen_mov_F0_vreg(dp, rd);
2946 gen_mov_F1_vreg(dp, rm);
2947 break;
2948 case 10:
2949 case 11:
2950 /* Compare with zero */
2951 gen_mov_F0_vreg(dp, rd);
2952 gen_vfp_F1_ld0(dp);
2953 break;
9ee6e8bb
PB
2954 case 20:
2955 case 21:
2956 case 22:
2957 case 23:
644ad806
PB
2958 case 28:
2959 case 29:
2960 case 30:
2961 case 31:
9ee6e8bb
PB
2962 /* Source and destination the same. */
2963 gen_mov_F0_vreg(dp, rd);
2964 break;
6e0c0ed1
PM
2965 case 4:
2966 case 5:
2967 case 6:
2968 case 7:
2969 /* VCVTB, VCVTT: only present with the halfprec extension,
2970 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2971 */
2972 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2973 return 1;
2974 }
2975 /* Otherwise fall through */
b7bcbe95
FB
2976 default:
2977 /* One source operand. */
2978 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2979 break;
b7bcbe95
FB
2980 }
2981 } else {
2982 /* Two source operands. */
2983 gen_mov_F0_vreg(dp, rn);
2984 gen_mov_F1_vreg(dp, rm);
2985 }
2986
2987 for (;;) {
2988 /* Perform the calculation. */
2989 switch (op) {
605a6aed
PM
2990 case 0: /* VMLA: fd + (fn * fm) */
2991 /* Note that order of inputs to the add matters for NaNs */
2992 gen_vfp_F1_mul(dp);
2993 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2994 gen_vfp_add(dp);
2995 break;
605a6aed 2996 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2997 gen_vfp_mul(dp);
605a6aed
PM
2998 gen_vfp_F1_neg(dp);
2999 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3000 gen_vfp_add(dp);
3001 break;
605a6aed
PM
3002 case 2: /* VNMLS: -fd + (fn * fm) */
3003 /* Note that it isn't valid to replace (-A + B) with (B - A)
3004 * or similar plausible looking simplifications
3005 * because this will give wrong results for NaNs.
3006 */
3007 gen_vfp_F1_mul(dp);
3008 gen_mov_F0_vreg(dp, rd);
3009 gen_vfp_neg(dp);
3010 gen_vfp_add(dp);
b7bcbe95 3011 break;
605a6aed 3012 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3013 gen_vfp_mul(dp);
605a6aed
PM
3014 gen_vfp_F1_neg(dp);
3015 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3016 gen_vfp_neg(dp);
605a6aed 3017 gen_vfp_add(dp);
b7bcbe95
FB
3018 break;
3019 case 4: /* mul: fn * fm */
3020 gen_vfp_mul(dp);
3021 break;
3022 case 5: /* nmul: -(fn * fm) */
3023 gen_vfp_mul(dp);
3024 gen_vfp_neg(dp);
3025 break;
3026 case 6: /* add: fn + fm */
3027 gen_vfp_add(dp);
3028 break;
3029 case 7: /* sub: fn - fm */
3030 gen_vfp_sub(dp);
3031 break;
3032 case 8: /* div: fn / fm */
3033 gen_vfp_div(dp);
3034 break;
da97f52c
PM
3035 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3036 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3037 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3038 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3039 /* These are fused multiply-add, and must be done as one
3040 * floating point operation with no rounding between the
3041 * multiplication and addition steps.
3042 * NB that doing the negations here as separate steps is
3043 * correct : an input NaN should come out with its sign bit
3044 * flipped if it is a negated-input.
3045 */
3046 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3047 return 1;
3048 }
3049 if (dp) {
3050 TCGv_ptr fpst;
3051 TCGv_i64 frd;
3052 if (op & 1) {
3053 /* VFNMS, VFMS */
3054 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3055 }
3056 frd = tcg_temp_new_i64();
3057 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3058 if (op & 2) {
3059 /* VFNMA, VFNMS */
3060 gen_helper_vfp_negd(frd, frd);
3061 }
3062 fpst = get_fpstatus_ptr(0);
3063 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3064 cpu_F1d, frd, fpst);
3065 tcg_temp_free_ptr(fpst);
3066 tcg_temp_free_i64(frd);
3067 } else {
3068 TCGv_ptr fpst;
3069 TCGv_i32 frd;
3070 if (op & 1) {
3071 /* VFNMS, VFMS */
3072 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3073 }
3074 frd = tcg_temp_new_i32();
3075 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3076 if (op & 2) {
3077 gen_helper_vfp_negs(frd, frd);
3078 }
3079 fpst = get_fpstatus_ptr(0);
3080 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3081 cpu_F1s, frd, fpst);
3082 tcg_temp_free_ptr(fpst);
3083 tcg_temp_free_i32(frd);
3084 }
3085 break;
9ee6e8bb
PB
3086 case 14: /* fconst */
3087 if (!arm_feature(env, ARM_FEATURE_VFP3))
3088 return 1;
3089
3090 n = (insn << 12) & 0x80000000;
3091 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3092 if (dp) {
3093 if (i & 0x40)
3094 i |= 0x3f80;
3095 else
3096 i |= 0x4000;
3097 n |= i << 16;
4373f3ce 3098 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3099 } else {
3100 if (i & 0x40)
3101 i |= 0x780;
3102 else
3103 i |= 0x800;
3104 n |= i << 19;
5b340b51 3105 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3106 }
9ee6e8bb 3107 break;
b7bcbe95
FB
3108 case 15: /* extension space */
3109 switch (rn) {
3110 case 0: /* cpy */
3111 /* no-op */
3112 break;
3113 case 1: /* abs */
3114 gen_vfp_abs(dp);
3115 break;
3116 case 2: /* neg */
3117 gen_vfp_neg(dp);
3118 break;
3119 case 3: /* sqrt */
3120 gen_vfp_sqrt(dp);
3121 break;
60011498 3122 case 4: /* vcvtb.f32.f16 */
60011498
PB
3123 tmp = gen_vfp_mrs();
3124 tcg_gen_ext16u_i32(tmp, tmp);
3125 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3126 tcg_temp_free_i32(tmp);
60011498
PB
3127 break;
3128 case 5: /* vcvtt.f32.f16 */
60011498
PB
3129 tmp = gen_vfp_mrs();
3130 tcg_gen_shri_i32(tmp, tmp, 16);
3131 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3132 tcg_temp_free_i32(tmp);
60011498
PB
3133 break;
3134 case 6: /* vcvtb.f16.f32 */
7d1b0095 3135 tmp = tcg_temp_new_i32();
60011498
PB
3136 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3137 gen_mov_F0_vreg(0, rd);
3138 tmp2 = gen_vfp_mrs();
3139 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3140 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3141 tcg_temp_free_i32(tmp2);
60011498
PB
3142 gen_vfp_msr(tmp);
3143 break;
3144 case 7: /* vcvtt.f16.f32 */
7d1b0095 3145 tmp = tcg_temp_new_i32();
60011498
PB
3146 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3147 tcg_gen_shli_i32(tmp, tmp, 16);
3148 gen_mov_F0_vreg(0, rd);
3149 tmp2 = gen_vfp_mrs();
3150 tcg_gen_ext16u_i32(tmp2, tmp2);
3151 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3152 tcg_temp_free_i32(tmp2);
60011498
PB
3153 gen_vfp_msr(tmp);
3154 break;
b7bcbe95
FB
3155 case 8: /* cmp */
3156 gen_vfp_cmp(dp);
3157 break;
3158 case 9: /* cmpe */
3159 gen_vfp_cmpe(dp);
3160 break;
3161 case 10: /* cmpz */
3162 gen_vfp_cmp(dp);
3163 break;
3164 case 11: /* cmpez */
3165 gen_vfp_F1_ld0(dp);
3166 gen_vfp_cmpe(dp);
3167 break;
3168 case 15: /* single<->double conversion */
3169 if (dp)
4373f3ce 3170 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3171 else
4373f3ce 3172 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3173 break;
3174 case 16: /* fuito */
5500b06c 3175 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3176 break;
3177 case 17: /* fsito */
5500b06c 3178 gen_vfp_sito(dp, 0);
b7bcbe95 3179 break;
9ee6e8bb
PB
3180 case 20: /* fshto */
3181 if (!arm_feature(env, ARM_FEATURE_VFP3))
3182 return 1;
5500b06c 3183 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3184 break;
3185 case 21: /* fslto */
3186 if (!arm_feature(env, ARM_FEATURE_VFP3))
3187 return 1;
5500b06c 3188 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3189 break;
3190 case 22: /* fuhto */
3191 if (!arm_feature(env, ARM_FEATURE_VFP3))
3192 return 1;
5500b06c 3193 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3194 break;
3195 case 23: /* fulto */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
5500b06c 3198 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3199 break;
b7bcbe95 3200 case 24: /* ftoui */
5500b06c 3201 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3202 break;
3203 case 25: /* ftouiz */
5500b06c 3204 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3205 break;
3206 case 26: /* ftosi */
5500b06c 3207 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3208 break;
3209 case 27: /* ftosiz */
5500b06c 3210 gen_vfp_tosiz(dp, 0);
b7bcbe95 3211 break;
9ee6e8bb
PB
3212 case 28: /* ftosh */
3213 if (!arm_feature(env, ARM_FEATURE_VFP3))
3214 return 1;
5500b06c 3215 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3216 break;
3217 case 29: /* ftosl */
3218 if (!arm_feature(env, ARM_FEATURE_VFP3))
3219 return 1;
5500b06c 3220 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3221 break;
3222 case 30: /* ftouh */
3223 if (!arm_feature(env, ARM_FEATURE_VFP3))
3224 return 1;
5500b06c 3225 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3226 break;
3227 case 31: /* ftoul */
3228 if (!arm_feature(env, ARM_FEATURE_VFP3))
3229 return 1;
5500b06c 3230 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3231 break;
b7bcbe95 3232 default: /* undefined */
b7bcbe95
FB
3233 return 1;
3234 }
3235 break;
3236 default: /* undefined */
b7bcbe95
FB
3237 return 1;
3238 }
3239
3240 /* Write back the result. */
3241 if (op == 15 && (rn >= 8 && rn <= 11))
3242 ; /* Comparison, do nothing. */
04595bf6
PM
3243 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3244 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3245 gen_mov_vreg_F0(0, rd);
3246 else if (op == 15 && rn == 15)
3247 /* conversion */
3248 gen_mov_vreg_F0(!dp, rd);
3249 else
3250 gen_mov_vreg_F0(dp, rd);
3251
3252 /* break out of the loop if we have finished */
3253 if (veclen == 0)
3254 break;
3255
3256 if (op == 15 && delta_m == 0) {
3257 /* single source one-many */
3258 while (veclen--) {
3259 rd = ((rd + delta_d) & (bank_mask - 1))
3260 | (rd & bank_mask);
3261 gen_mov_vreg_F0(dp, rd);
3262 }
3263 break;
3264 }
3265 /* Setup the next operands. */
3266 veclen--;
3267 rd = ((rd + delta_d) & (bank_mask - 1))
3268 | (rd & bank_mask);
3269
3270 if (op == 15) {
3271 /* One source operand. */
3272 rm = ((rm + delta_m) & (bank_mask - 1))
3273 | (rm & bank_mask);
3274 gen_mov_F0_vreg(dp, rm);
3275 } else {
3276 /* Two source operands. */
3277 rn = ((rn + delta_d) & (bank_mask - 1))
3278 | (rn & bank_mask);
3279 gen_mov_F0_vreg(dp, rn);
3280 if (delta_m) {
3281 rm = ((rm + delta_m) & (bank_mask - 1))
3282 | (rm & bank_mask);
3283 gen_mov_F1_vreg(dp, rm);
3284 }
3285 }
3286 }
3287 }
3288 break;
3289 case 0xc:
3290 case 0xd:
8387da81 3291 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3292 /* two-register transfer */
3293 rn = (insn >> 16) & 0xf;
3294 rd = (insn >> 12) & 0xf;
3295 if (dp) {
9ee6e8bb
PB
3296 VFP_DREG_M(rm, insn);
3297 } else {
3298 rm = VFP_SREG_M(insn);
3299 }
b7bcbe95 3300
18c9b560 3301 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3302 /* vfp->arm */
3303 if (dp) {
4373f3ce
PB
3304 gen_mov_F0_vreg(0, rm * 2);
3305 tmp = gen_vfp_mrs();
3306 store_reg(s, rd, tmp);
3307 gen_mov_F0_vreg(0, rm * 2 + 1);
3308 tmp = gen_vfp_mrs();
3309 store_reg(s, rn, tmp);
b7bcbe95
FB
3310 } else {
3311 gen_mov_F0_vreg(0, rm);
4373f3ce 3312 tmp = gen_vfp_mrs();
8387da81 3313 store_reg(s, rd, tmp);
b7bcbe95 3314 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3315 tmp = gen_vfp_mrs();
8387da81 3316 store_reg(s, rn, tmp);
b7bcbe95
FB
3317 }
3318 } else {
3319 /* arm->vfp */
3320 if (dp) {
4373f3ce
PB
3321 tmp = load_reg(s, rd);
3322 gen_vfp_msr(tmp);
3323 gen_mov_vreg_F0(0, rm * 2);
3324 tmp = load_reg(s, rn);
3325 gen_vfp_msr(tmp);
3326 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3327 } else {
8387da81 3328 tmp = load_reg(s, rd);
4373f3ce 3329 gen_vfp_msr(tmp);
b7bcbe95 3330 gen_mov_vreg_F0(0, rm);
8387da81 3331 tmp = load_reg(s, rn);
4373f3ce 3332 gen_vfp_msr(tmp);
b7bcbe95
FB
3333 gen_mov_vreg_F0(0, rm + 1);
3334 }
3335 }
3336 } else {
3337 /* Load/store */
3338 rn = (insn >> 16) & 0xf;
3339 if (dp)
9ee6e8bb 3340 VFP_DREG_D(rd, insn);
b7bcbe95 3341 else
9ee6e8bb 3342 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3343 if ((insn & 0x01200000) == 0x01000000) {
3344 /* Single load/store */
3345 offset = (insn & 0xff) << 2;
3346 if ((insn & (1 << 23)) == 0)
3347 offset = -offset;
934814f1
PM
3348 if (s->thumb && rn == 15) {
3349 /* This is actually UNPREDICTABLE */
3350 addr = tcg_temp_new_i32();
3351 tcg_gen_movi_i32(addr, s->pc & ~2);
3352 } else {
3353 addr = load_reg(s, rn);
3354 }
312eea9f 3355 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3356 if (insn & (1 << 20)) {
312eea9f 3357 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3358 gen_mov_vreg_F0(dp, rd);
3359 } else {
3360 gen_mov_F0_vreg(dp, rd);
312eea9f 3361 gen_vfp_st(s, dp, addr);
b7bcbe95 3362 }
7d1b0095 3363 tcg_temp_free_i32(addr);
b7bcbe95
FB
3364 } else {
3365 /* load/store multiple */
934814f1 3366 int w = insn & (1 << 21);
b7bcbe95
FB
3367 if (dp)
3368 n = (insn >> 1) & 0x7f;
3369 else
3370 n = insn & 0xff;
3371
934814f1
PM
3372 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3373 /* P == U , W == 1 => UNDEF */
3374 return 1;
3375 }
3376 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3377 /* UNPREDICTABLE cases for bad immediates: we choose to
3378 * UNDEF to avoid generating huge numbers of TCG ops
3379 */
3380 return 1;
3381 }
3382 if (rn == 15 && w) {
3383 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3384 return 1;
3385 }
3386
3387 if (s->thumb && rn == 15) {
3388 /* This is actually UNPREDICTABLE */
3389 addr = tcg_temp_new_i32();
3390 tcg_gen_movi_i32(addr, s->pc & ~2);
3391 } else {
3392 addr = load_reg(s, rn);
3393 }
b7bcbe95 3394 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3395 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3396
3397 if (dp)
3398 offset = 8;
3399 else
3400 offset = 4;
3401 for (i = 0; i < n; i++) {
18c9b560 3402 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3403 /* load */
312eea9f 3404 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3405 gen_mov_vreg_F0(dp, rd + i);
3406 } else {
3407 /* store */
3408 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3409 gen_vfp_st(s, dp, addr);
b7bcbe95 3410 }
312eea9f 3411 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3412 }
934814f1 3413 if (w) {
b7bcbe95
FB
3414 /* writeback */
3415 if (insn & (1 << 24))
3416 offset = -offset * n;
3417 else if (dp && (insn & 1))
3418 offset = 4;
3419 else
3420 offset = 0;
3421
3422 if (offset != 0)
312eea9f
FN
3423 tcg_gen_addi_i32(addr, addr, offset);
3424 store_reg(s, rn, addr);
3425 } else {
7d1b0095 3426 tcg_temp_free_i32(addr);
b7bcbe95
FB
3427 }
3428 }
3429 }
3430 break;
3431 default:
3432 /* Should never happen. */
3433 return 1;
3434 }
3435 return 0;
3436}
3437
6e256c93 3438static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3439{
6e256c93
FB
3440 TranslationBlock *tb;
3441
3442 tb = s->tb;
3443 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3444 tcg_gen_goto_tb(n);
8984bd2e 3445 gen_set_pc_im(dest);
8cfd0495 3446 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3447 } else {
8984bd2e 3448 gen_set_pc_im(dest);
57fec1fe 3449 tcg_gen_exit_tb(0);
6e256c93 3450 }
c53be334
FB
3451}
3452
8aaca4c0
FB
3453static inline void gen_jmp (DisasContext *s, uint32_t dest)
3454{
551bd27f 3455 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3456 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3457 if (s->thumb)
d9ba4830
PB
3458 dest |= 1;
3459 gen_bx_im(s, dest);
8aaca4c0 3460 } else {
6e256c93 3461 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3462 s->is_jmp = DISAS_TB_JUMP;
3463 }
3464}
3465
39d5492a 3466static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3467{
ee097184 3468 if (x)
d9ba4830 3469 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3470 else
d9ba4830 3471 gen_sxth(t0);
ee097184 3472 if (y)
d9ba4830 3473 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3474 else
d9ba4830
PB
3475 gen_sxth(t1);
3476 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3477}
3478
3479/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3480static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3481 uint32_t mask;
3482
3483 mask = 0;
3484 if (flags & (1 << 0))
3485 mask |= 0xff;
3486 if (flags & (1 << 1))
3487 mask |= 0xff00;
3488 if (flags & (1 << 2))
3489 mask |= 0xff0000;
3490 if (flags & (1 << 3))
3491 mask |= 0xff000000;
9ee6e8bb 3492
2ae23e75 3493 /* Mask out undefined bits. */
9ee6e8bb 3494 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3495 if (!arm_feature(env, ARM_FEATURE_V4T))
3496 mask &= ~CPSR_T;
3497 if (!arm_feature(env, ARM_FEATURE_V5))
3498 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3499 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3500 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3501 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3502 mask &= ~CPSR_IT;
9ee6e8bb 3503 /* Mask out execution state bits. */
2ae23e75 3504 if (!spsr)
e160c51c 3505 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3506 /* Mask out privileged bits. */
3507 if (IS_USER(s))
9ee6e8bb 3508 mask &= CPSR_USER;
b5ff1b31
FB
3509 return mask;
3510}
3511
2fbac54b 3512/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3513static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3514{
39d5492a 3515 TCGv_i32 tmp;
b5ff1b31
FB
3516 if (spsr) {
3517 /* ??? This is also undefined in system mode. */
3518 if (IS_USER(s))
3519 return 1;
d9ba4830
PB
3520
3521 tmp = load_cpu_field(spsr);
3522 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3523 tcg_gen_andi_i32(t0, t0, mask);
3524 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3525 store_cpu_field(tmp, spsr);
b5ff1b31 3526 } else {
2fbac54b 3527 gen_set_cpsr(t0, mask);
b5ff1b31 3528 }
7d1b0095 3529 tcg_temp_free_i32(t0);
b5ff1b31
FB
3530 gen_lookup_tb(s);
3531 return 0;
3532}
3533
2fbac54b
FN
3534/* Returns nonzero if access to the PSR is not permitted. */
3535static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3536{
39d5492a 3537 TCGv_i32 tmp;
7d1b0095 3538 tmp = tcg_temp_new_i32();
2fbac54b
FN
3539 tcg_gen_movi_i32(tmp, val);
3540 return gen_set_psr(s, mask, spsr, tmp);
3541}
3542
e9bb4aa9 3543/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3544static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3545{
39d5492a 3546 TCGv_i32 tmp;
e9bb4aa9 3547 store_reg(s, 15, pc);
d9ba4830
PB
3548 tmp = load_cpu_field(spsr);
3549 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3550 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3551 s->is_jmp = DISAS_UPDATE;
3552}
3553
b0109805 3554/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3555static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3556{
b0109805 3557 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3558 tcg_temp_free_i32(cpsr);
b0109805 3559 store_reg(s, 15, pc);
9ee6e8bb
PB
3560 s->is_jmp = DISAS_UPDATE;
3561}
3b46e624 3562
9ee6e8bb
PB
3563static inline void
3564gen_set_condexec (DisasContext *s)
3565{
3566 if (s->condexec_mask) {
8f01245e 3567 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3568 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3569 tcg_gen_movi_i32(tmp, val);
d9ba4830 3570 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3571 }
3572}
3b46e624 3573
bc4a0de0
PM
3574static void gen_exception_insn(DisasContext *s, int offset, int excp)
3575{
3576 gen_set_condexec(s);
3577 gen_set_pc_im(s->pc - offset);
3578 gen_exception(excp);
3579 s->is_jmp = DISAS_JUMP;
3580}
3581
9ee6e8bb
PB
3582static void gen_nop_hint(DisasContext *s, int val)
3583{
3584 switch (val) {
3585 case 3: /* wfi */
8984bd2e 3586 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3587 s->is_jmp = DISAS_WFI;
3588 break;
3589 case 2: /* wfe */
3590 case 4: /* sev */
12b10571
MR
3591 case 5: /* sevl */
3592 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3593 default: /* nop */
3594 break;
3595 }
3596}
99c475ab 3597
ad69471c 3598#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3599
39d5492a 3600static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3601{
3602 switch (size) {
dd8fbd78
FN
3603 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3604 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3605 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3606 default: abort();
9ee6e8bb 3607 }
9ee6e8bb
PB
3608}
3609
39d5492a 3610static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3611{
3612 switch (size) {
dd8fbd78
FN
3613 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3614 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3615 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3616 default: return;
3617 }
3618}
3619
3620/* 32-bit pairwise ops end up the same as the elementwise versions. */
3621#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3622#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3623#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3624#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3625
ad69471c
PB
3626#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3627 switch ((size << 1) | u) { \
3628 case 0: \
dd8fbd78 3629 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3630 break; \
3631 case 1: \
dd8fbd78 3632 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3633 break; \
3634 case 2: \
dd8fbd78 3635 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3636 break; \
3637 case 3: \
dd8fbd78 3638 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3639 break; \
3640 case 4: \
dd8fbd78 3641 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3642 break; \
3643 case 5: \
dd8fbd78 3644 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3645 break; \
3646 default: return 1; \
3647 }} while (0)
9ee6e8bb
PB
3648
3649#define GEN_NEON_INTEGER_OP(name) do { \
3650 switch ((size << 1) | u) { \
ad69471c 3651 case 0: \
dd8fbd78 3652 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3653 break; \
3654 case 1: \
dd8fbd78 3655 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3656 break; \
3657 case 2: \
dd8fbd78 3658 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3659 break; \
3660 case 3: \
dd8fbd78 3661 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3662 break; \
3663 case 4: \
dd8fbd78 3664 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3665 break; \
3666 case 5: \
dd8fbd78 3667 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3668 break; \
9ee6e8bb
PB
3669 default: return 1; \
3670 }} while (0)
3671
39d5492a 3672static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3673{
39d5492a 3674 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3675 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3676 return tmp;
9ee6e8bb
PB
3677}
3678
39d5492a 3679static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3680{
dd8fbd78 3681 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3682 tcg_temp_free_i32(var);
9ee6e8bb
PB
3683}
3684
39d5492a 3685static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3686{
39d5492a 3687 TCGv_i32 tmp;
9ee6e8bb 3688 if (size == 1) {
0fad6efc
PM
3689 tmp = neon_load_reg(reg & 7, reg >> 4);
3690 if (reg & 8) {
dd8fbd78 3691 gen_neon_dup_high16(tmp);
0fad6efc
PM
3692 } else {
3693 gen_neon_dup_low16(tmp);
dd8fbd78 3694 }
0fad6efc
PM
3695 } else {
3696 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3697 }
dd8fbd78 3698 return tmp;
9ee6e8bb
PB
3699}
3700
02acedf9 3701static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3702{
39d5492a 3703 TCGv_i32 tmp, tmp2;
600b828c 3704 if (!q && size == 2) {
02acedf9
PM
3705 return 1;
3706 }
3707 tmp = tcg_const_i32(rd);
3708 tmp2 = tcg_const_i32(rm);
3709 if (q) {
3710 switch (size) {
3711 case 0:
02da0b2d 3712 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3713 break;
3714 case 1:
02da0b2d 3715 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3716 break;
3717 case 2:
02da0b2d 3718 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3719 break;
3720 default:
3721 abort();
3722 }
3723 } else {
3724 switch (size) {
3725 case 0:
02da0b2d 3726 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3727 break;
3728 case 1:
02da0b2d 3729 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3730 break;
3731 default:
3732 abort();
3733 }
3734 }
3735 tcg_temp_free_i32(tmp);
3736 tcg_temp_free_i32(tmp2);
3737 return 0;
19457615
FN
3738}
3739
d68a6f3a 3740static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3741{
39d5492a 3742 TCGv_i32 tmp, tmp2;
600b828c 3743 if (!q && size == 2) {
d68a6f3a
PM
3744 return 1;
3745 }
3746 tmp = tcg_const_i32(rd);
3747 tmp2 = tcg_const_i32(rm);
3748 if (q) {
3749 switch (size) {
3750 case 0:
02da0b2d 3751 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3752 break;
3753 case 1:
02da0b2d 3754 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3755 break;
3756 case 2:
02da0b2d 3757 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3758 break;
3759 default:
3760 abort();
3761 }
3762 } else {
3763 switch (size) {
3764 case 0:
02da0b2d 3765 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3766 break;
3767 case 1:
02da0b2d 3768 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3769 break;
3770 default:
3771 abort();
3772 }
3773 }
3774 tcg_temp_free_i32(tmp);
3775 tcg_temp_free_i32(tmp2);
3776 return 0;
19457615
FN
3777}
3778
39d5492a 3779static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3780{
39d5492a 3781 TCGv_i32 rd, tmp;
19457615 3782
7d1b0095
PM
3783 rd = tcg_temp_new_i32();
3784 tmp = tcg_temp_new_i32();
19457615
FN
3785
3786 tcg_gen_shli_i32(rd, t0, 8);
3787 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3788 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3789 tcg_gen_or_i32(rd, rd, tmp);
3790
3791 tcg_gen_shri_i32(t1, t1, 8);
3792 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3793 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3794 tcg_gen_or_i32(t1, t1, tmp);
3795 tcg_gen_mov_i32(t0, rd);
3796
7d1b0095
PM
3797 tcg_temp_free_i32(tmp);
3798 tcg_temp_free_i32(rd);
19457615
FN
3799}
3800
39d5492a 3801static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3802{
39d5492a 3803 TCGv_i32 rd, tmp;
19457615 3804
7d1b0095
PM
3805 rd = tcg_temp_new_i32();
3806 tmp = tcg_temp_new_i32();
19457615
FN
3807
3808 tcg_gen_shli_i32(rd, t0, 16);
3809 tcg_gen_andi_i32(tmp, t1, 0xffff);
3810 tcg_gen_or_i32(rd, rd, tmp);
3811 tcg_gen_shri_i32(t1, t1, 16);
3812 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3813 tcg_gen_or_i32(t1, t1, tmp);
3814 tcg_gen_mov_i32(t0, rd);
3815
7d1b0095
PM
3816 tcg_temp_free_i32(tmp);
3817 tcg_temp_free_i32(rd);
19457615
FN
3818}
3819
3820
9ee6e8bb
PB
3821static struct {
3822 int nregs;
3823 int interleave;
3824 int spacing;
3825} neon_ls_element_type[11] = {
3826 {4, 4, 1},
3827 {4, 4, 2},
3828 {4, 1, 1},
3829 {4, 2, 1},
3830 {3, 3, 1},
3831 {3, 3, 2},
3832 {3, 1, 1},
3833 {1, 1, 1},
3834 {2, 2, 1},
3835 {2, 2, 2},
3836 {2, 1, 1}
3837};
3838
3839/* Translate a NEON load/store element instruction. Return nonzero if the
3840 instruction is invalid. */
0ecb72a5 3841static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3842{
3843 int rd, rn, rm;
3844 int op;
3845 int nregs;
3846 int interleave;
84496233 3847 int spacing;
9ee6e8bb
PB
3848 int stride;
3849 int size;
3850 int reg;
3851 int pass;
3852 int load;
3853 int shift;
9ee6e8bb 3854 int n;
39d5492a
PM
3855 TCGv_i32 addr;
3856 TCGv_i32 tmp;
3857 TCGv_i32 tmp2;
84496233 3858 TCGv_i64 tmp64;
9ee6e8bb 3859
5df8bac1 3860 if (!s->vfp_enabled)
9ee6e8bb
PB
3861 return 1;
3862 VFP_DREG_D(rd, insn);
3863 rn = (insn >> 16) & 0xf;
3864 rm = insn & 0xf;
3865 load = (insn & (1 << 21)) != 0;
3866 if ((insn & (1 << 23)) == 0) {
3867 /* Load store all elements. */
3868 op = (insn >> 8) & 0xf;
3869 size = (insn >> 6) & 3;
84496233 3870 if (op > 10)
9ee6e8bb 3871 return 1;
f2dd89d0
PM
3872 /* Catch UNDEF cases for bad values of align field */
3873 switch (op & 0xc) {
3874 case 4:
3875 if (((insn >> 5) & 1) == 1) {
3876 return 1;
3877 }
3878 break;
3879 case 8:
3880 if (((insn >> 4) & 3) == 3) {
3881 return 1;
3882 }
3883 break;
3884 default:
3885 break;
3886 }
9ee6e8bb
PB
3887 nregs = neon_ls_element_type[op].nregs;
3888 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3889 spacing = neon_ls_element_type[op].spacing;
3890 if (size == 3 && (interleave | spacing) != 1)
3891 return 1;
e318a60b 3892 addr = tcg_temp_new_i32();
dcc65026 3893 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3894 stride = (1 << size) * interleave;
3895 for (reg = 0; reg < nregs; reg++) {
3896 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3897 load_reg_var(s, addr, rn);
3898 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3899 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3900 load_reg_var(s, addr, rn);
3901 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3902 }
84496233 3903 if (size == 3) {
8ed1237d 3904 tmp64 = tcg_temp_new_i64();
84496233 3905 if (load) {
08307563 3906 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 3907 neon_store_reg64(tmp64, rd);
84496233 3908 } else {
84496233 3909 neon_load_reg64(tmp64, rd);
08307563 3910 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 3911 }
8ed1237d 3912 tcg_temp_free_i64(tmp64);
84496233
JR
3913 tcg_gen_addi_i32(addr, addr, stride);
3914 } else {
3915 for (pass = 0; pass < 2; pass++) {
3916 if (size == 2) {
3917 if (load) {
58ab8e96 3918 tmp = tcg_temp_new_i32();
08307563 3919 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3920 neon_store_reg(rd, pass, tmp);
3921 } else {
3922 tmp = neon_load_reg(rd, pass);
08307563 3923 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 3924 tcg_temp_free_i32(tmp);
84496233 3925 }
1b2b1e54 3926 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3927 } else if (size == 1) {
3928 if (load) {
58ab8e96 3929 tmp = tcg_temp_new_i32();
08307563 3930 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 3931 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 3932 tmp2 = tcg_temp_new_i32();
08307563 3933 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 3934 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3935 tcg_gen_shli_i32(tmp2, tmp2, 16);
3936 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3937 tcg_temp_free_i32(tmp2);
84496233
JR
3938 neon_store_reg(rd, pass, tmp);
3939 } else {
3940 tmp = neon_load_reg(rd, pass);
7d1b0095 3941 tmp2 = tcg_temp_new_i32();
84496233 3942 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 3943 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 3944 tcg_temp_free_i32(tmp);
84496233 3945 tcg_gen_addi_i32(addr, addr, stride);
08307563 3946 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 3947 tcg_temp_free_i32(tmp2);
1b2b1e54 3948 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3949 }
84496233
JR
3950 } else /* size == 0 */ {
3951 if (load) {
39d5492a 3952 TCGV_UNUSED_I32(tmp2);
84496233 3953 for (n = 0; n < 4; n++) {
58ab8e96 3954 tmp = tcg_temp_new_i32();
08307563 3955 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3956 tcg_gen_addi_i32(addr, addr, stride);
3957 if (n == 0) {
3958 tmp2 = tmp;
3959 } else {
41ba8341
PB
3960 tcg_gen_shli_i32(tmp, tmp, n * 8);
3961 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3962 tcg_temp_free_i32(tmp);
84496233 3963 }
9ee6e8bb 3964 }
84496233
JR
3965 neon_store_reg(rd, pass, tmp2);
3966 } else {
3967 tmp2 = neon_load_reg(rd, pass);
3968 for (n = 0; n < 4; n++) {
7d1b0095 3969 tmp = tcg_temp_new_i32();
84496233
JR
3970 if (n == 0) {
3971 tcg_gen_mov_i32(tmp, tmp2);
3972 } else {
3973 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3974 }
08307563 3975 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 3976 tcg_temp_free_i32(tmp);
84496233
JR
3977 tcg_gen_addi_i32(addr, addr, stride);
3978 }
7d1b0095 3979 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3980 }
3981 }
3982 }
3983 }
84496233 3984 rd += spacing;
9ee6e8bb 3985 }
e318a60b 3986 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3987 stride = nregs * 8;
3988 } else {
3989 size = (insn >> 10) & 3;
3990 if (size == 3) {
3991 /* Load single element to all lanes. */
8e18cde3
PM
3992 int a = (insn >> 4) & 1;
3993 if (!load) {
9ee6e8bb 3994 return 1;
8e18cde3 3995 }
9ee6e8bb
PB
3996 size = (insn >> 6) & 3;
3997 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3998
3999 if (size == 3) {
4000 if (nregs != 4 || a == 0) {
9ee6e8bb 4001 return 1;
99c475ab 4002 }
8e18cde3
PM
4003 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4004 size = 2;
4005 }
4006 if (nregs == 1 && a == 1 && size == 0) {
4007 return 1;
4008 }
4009 if (nregs == 3 && a == 1) {
4010 return 1;
4011 }
e318a60b 4012 addr = tcg_temp_new_i32();
8e18cde3
PM
4013 load_reg_var(s, addr, rn);
4014 if (nregs == 1) {
4015 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4016 tmp = gen_load_and_replicate(s, addr, size);
4017 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4018 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4019 if (insn & (1 << 5)) {
4020 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4021 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4022 }
4023 tcg_temp_free_i32(tmp);
4024 } else {
4025 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4026 stride = (insn & (1 << 5)) ? 2 : 1;
4027 for (reg = 0; reg < nregs; reg++) {
4028 tmp = gen_load_and_replicate(s, addr, size);
4029 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4030 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4031 tcg_temp_free_i32(tmp);
4032 tcg_gen_addi_i32(addr, addr, 1 << size);
4033 rd += stride;
4034 }
9ee6e8bb 4035 }
e318a60b 4036 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4037 stride = (1 << size) * nregs;
4038 } else {
4039 /* Single element. */
93262b16 4040 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4041 pass = (insn >> 7) & 1;
4042 switch (size) {
4043 case 0:
4044 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4045 stride = 1;
4046 break;
4047 case 1:
4048 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4049 stride = (insn & (1 << 5)) ? 2 : 1;
4050 break;
4051 case 2:
4052 shift = 0;
9ee6e8bb
PB
4053 stride = (insn & (1 << 6)) ? 2 : 1;
4054 break;
4055 default:
4056 abort();
4057 }
4058 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4059 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4060 switch (nregs) {
4061 case 1:
4062 if (((idx & (1 << size)) != 0) ||
4063 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4064 return 1;
4065 }
4066 break;
4067 case 3:
4068 if ((idx & 1) != 0) {
4069 return 1;
4070 }
4071 /* fall through */
4072 case 2:
4073 if (size == 2 && (idx & 2) != 0) {
4074 return 1;
4075 }
4076 break;
4077 case 4:
4078 if ((size == 2) && ((idx & 3) == 3)) {
4079 return 1;
4080 }
4081 break;
4082 default:
4083 abort();
4084 }
4085 if ((rd + stride * (nregs - 1)) > 31) {
4086 /* Attempts to write off the end of the register file
4087 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4088 * the neon_load_reg() would write off the end of the array.
4089 */
4090 return 1;
4091 }
e318a60b 4092 addr = tcg_temp_new_i32();
dcc65026 4093 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4094 for (reg = 0; reg < nregs; reg++) {
4095 if (load) {
58ab8e96 4096 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4097 switch (size) {
4098 case 0:
08307563 4099 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4100 break;
4101 case 1:
08307563 4102 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4103 break;
4104 case 2:
08307563 4105 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4106 break;
a50f5b91
PB
4107 default: /* Avoid compiler warnings. */
4108 abort();
9ee6e8bb
PB
4109 }
4110 if (size != 2) {
8f8e3aa4 4111 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4112 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4113 shift, size ? 16 : 8);
7d1b0095 4114 tcg_temp_free_i32(tmp2);
9ee6e8bb 4115 }
8f8e3aa4 4116 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4117 } else { /* Store */
8f8e3aa4
PB
4118 tmp = neon_load_reg(rd, pass);
4119 if (shift)
4120 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4121 switch (size) {
4122 case 0:
08307563 4123 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4124 break;
4125 case 1:
08307563 4126 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4127 break;
4128 case 2:
08307563 4129 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4130 break;
99c475ab 4131 }
58ab8e96 4132 tcg_temp_free_i32(tmp);
99c475ab 4133 }
9ee6e8bb 4134 rd += stride;
1b2b1e54 4135 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4136 }
e318a60b 4137 tcg_temp_free_i32(addr);
9ee6e8bb 4138 stride = nregs * (1 << size);
99c475ab 4139 }
9ee6e8bb
PB
4140 }
4141 if (rm != 15) {
39d5492a 4142 TCGv_i32 base;
b26eefb6
PB
4143
4144 base = load_reg(s, rn);
9ee6e8bb 4145 if (rm == 13) {
b26eefb6 4146 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4147 } else {
39d5492a 4148 TCGv_i32 index;
b26eefb6
PB
4149 index = load_reg(s, rm);
4150 tcg_gen_add_i32(base, base, index);
7d1b0095 4151 tcg_temp_free_i32(index);
9ee6e8bb 4152 }
b26eefb6 4153 store_reg(s, rn, base);
9ee6e8bb
PB
4154 }
4155 return 0;
4156}
3b46e624 4157
8f8e3aa4 4158/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4159static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4160{
4161 tcg_gen_and_i32(t, t, c);
f669df27 4162 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4163 tcg_gen_or_i32(dest, t, f);
4164}
4165
39d5492a 4166static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4167{
4168 switch (size) {
4169 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4170 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4171 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4172 default: abort();
4173 }
4174}
4175
39d5492a 4176static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4177{
4178 switch (size) {
02da0b2d
PM
4179 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4180 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4181 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4182 default: abort();
4183 }
4184}
4185
39d5492a 4186static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4187{
4188 switch (size) {
02da0b2d
PM
4189 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4190 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4191 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4192 default: abort();
4193 }
4194}
4195
39d5492a 4196static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4197{
4198 switch (size) {
02da0b2d
PM
4199 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4200 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4201 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4202 default: abort();
4203 }
4204}
4205
39d5492a 4206static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4207 int q, int u)
4208{
4209 if (q) {
4210 if (u) {
4211 switch (size) {
4212 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4213 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4214 default: abort();
4215 }
4216 } else {
4217 switch (size) {
4218 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4219 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4220 default: abort();
4221 }
4222 }
4223 } else {
4224 if (u) {
4225 switch (size) {
b408a9b0
CL
4226 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4227 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4228 default: abort();
4229 }
4230 } else {
4231 switch (size) {
4232 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4233 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4234 default: abort();
4235 }
4236 }
4237 }
4238}
4239
39d5492a 4240static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4241{
4242 if (u) {
4243 switch (size) {
4244 case 0: gen_helper_neon_widen_u8(dest, src); break;
4245 case 1: gen_helper_neon_widen_u16(dest, src); break;
4246 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4247 default: abort();
4248 }
4249 } else {
4250 switch (size) {
4251 case 0: gen_helper_neon_widen_s8(dest, src); break;
4252 case 1: gen_helper_neon_widen_s16(dest, src); break;
4253 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4254 default: abort();
4255 }
4256 }
7d1b0095 4257 tcg_temp_free_i32(src);
ad69471c
PB
4258}
4259
4260static inline void gen_neon_addl(int size)
4261{
4262 switch (size) {
4263 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4264 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4265 case 2: tcg_gen_add_i64(CPU_V001); break;
4266 default: abort();
4267 }
4268}
4269
4270static inline void gen_neon_subl(int size)
4271{
4272 switch (size) {
4273 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4274 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4275 case 2: tcg_gen_sub_i64(CPU_V001); break;
4276 default: abort();
4277 }
4278}
4279
a7812ae4 4280static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4281{
4282 switch (size) {
4283 case 0: gen_helper_neon_negl_u16(var, var); break;
4284 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4285 case 2:
4286 tcg_gen_neg_i64(var, var);
4287 break;
ad69471c
PB
4288 default: abort();
4289 }
4290}
4291
a7812ae4 4292static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4293{
4294 switch (size) {
02da0b2d
PM
4295 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4296 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4297 default: abort();
4298 }
4299}
4300
39d5492a
PM
4301static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4302 int size, int u)
ad69471c 4303{
a7812ae4 4304 TCGv_i64 tmp;
ad69471c
PB
4305
4306 switch ((size << 1) | u) {
4307 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4308 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4309 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4310 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4311 case 4:
4312 tmp = gen_muls_i64_i32(a, b);
4313 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4314 tcg_temp_free_i64(tmp);
ad69471c
PB
4315 break;
4316 case 5:
4317 tmp = gen_mulu_i64_i32(a, b);
4318 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4319 tcg_temp_free_i64(tmp);
ad69471c
PB
4320 break;
4321 default: abort();
4322 }
c6067f04
CL
4323
4324 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4325 Don't forget to clean them now. */
4326 if (size < 2) {
7d1b0095
PM
4327 tcg_temp_free_i32(a);
4328 tcg_temp_free_i32(b);
c6067f04 4329 }
ad69471c
PB
4330}
4331
39d5492a
PM
4332static void gen_neon_narrow_op(int op, int u, int size,
4333 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4334{
4335 if (op) {
4336 if (u) {
4337 gen_neon_unarrow_sats(size, dest, src);
4338 } else {
4339 gen_neon_narrow(size, dest, src);
4340 }
4341 } else {
4342 if (u) {
4343 gen_neon_narrow_satu(size, dest, src);
4344 } else {
4345 gen_neon_narrow_sats(size, dest, src);
4346 }
4347 }
4348}
4349
62698be3
PM
4350/* Symbolic constants for op fields for Neon 3-register same-length.
4351 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4352 * table A7-9.
4353 */
4354#define NEON_3R_VHADD 0
4355#define NEON_3R_VQADD 1
4356#define NEON_3R_VRHADD 2
4357#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4358#define NEON_3R_VHSUB 4
4359#define NEON_3R_VQSUB 5
4360#define NEON_3R_VCGT 6
4361#define NEON_3R_VCGE 7
4362#define NEON_3R_VSHL 8
4363#define NEON_3R_VQSHL 9
4364#define NEON_3R_VRSHL 10
4365#define NEON_3R_VQRSHL 11
4366#define NEON_3R_VMAX 12
4367#define NEON_3R_VMIN 13
4368#define NEON_3R_VABD 14
4369#define NEON_3R_VABA 15
4370#define NEON_3R_VADD_VSUB 16
4371#define NEON_3R_VTST_VCEQ 17
4372#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4373#define NEON_3R_VMUL 19
4374#define NEON_3R_VPMAX 20
4375#define NEON_3R_VPMIN 21
4376#define NEON_3R_VQDMULH_VQRDMULH 22
4377#define NEON_3R_VPADD 23
da97f52c 4378#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4379#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4380#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4381#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4382#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4383#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4384#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4385
4386static const uint8_t neon_3r_sizes[] = {
4387 [NEON_3R_VHADD] = 0x7,
4388 [NEON_3R_VQADD] = 0xf,
4389 [NEON_3R_VRHADD] = 0x7,
4390 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4391 [NEON_3R_VHSUB] = 0x7,
4392 [NEON_3R_VQSUB] = 0xf,
4393 [NEON_3R_VCGT] = 0x7,
4394 [NEON_3R_VCGE] = 0x7,
4395 [NEON_3R_VSHL] = 0xf,
4396 [NEON_3R_VQSHL] = 0xf,
4397 [NEON_3R_VRSHL] = 0xf,
4398 [NEON_3R_VQRSHL] = 0xf,
4399 [NEON_3R_VMAX] = 0x7,
4400 [NEON_3R_VMIN] = 0x7,
4401 [NEON_3R_VABD] = 0x7,
4402 [NEON_3R_VABA] = 0x7,
4403 [NEON_3R_VADD_VSUB] = 0xf,
4404 [NEON_3R_VTST_VCEQ] = 0x7,
4405 [NEON_3R_VML] = 0x7,
4406 [NEON_3R_VMUL] = 0x7,
4407 [NEON_3R_VPMAX] = 0x7,
4408 [NEON_3R_VPMIN] = 0x7,
4409 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4410 [NEON_3R_VPADD] = 0x7,
da97f52c 4411 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4412 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4413 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4414 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4415 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4416 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4417 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4418};
4419
600b828c
PM
4420/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4421 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4422 * table A7-13.
4423 */
4424#define NEON_2RM_VREV64 0
4425#define NEON_2RM_VREV32 1
4426#define NEON_2RM_VREV16 2
4427#define NEON_2RM_VPADDL 4
4428#define NEON_2RM_VPADDL_U 5
4429#define NEON_2RM_VCLS 8
4430#define NEON_2RM_VCLZ 9
4431#define NEON_2RM_VCNT 10
4432#define NEON_2RM_VMVN 11
4433#define NEON_2RM_VPADAL 12
4434#define NEON_2RM_VPADAL_U 13
4435#define NEON_2RM_VQABS 14
4436#define NEON_2RM_VQNEG 15
4437#define NEON_2RM_VCGT0 16
4438#define NEON_2RM_VCGE0 17
4439#define NEON_2RM_VCEQ0 18
4440#define NEON_2RM_VCLE0 19
4441#define NEON_2RM_VCLT0 20
4442#define NEON_2RM_VABS 22
4443#define NEON_2RM_VNEG 23
4444#define NEON_2RM_VCGT0_F 24
4445#define NEON_2RM_VCGE0_F 25
4446#define NEON_2RM_VCEQ0_F 26
4447#define NEON_2RM_VCLE0_F 27
4448#define NEON_2RM_VCLT0_F 28
4449#define NEON_2RM_VABS_F 30
4450#define NEON_2RM_VNEG_F 31
4451#define NEON_2RM_VSWP 32
4452#define NEON_2RM_VTRN 33
4453#define NEON_2RM_VUZP 34
4454#define NEON_2RM_VZIP 35
4455#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4456#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4457#define NEON_2RM_VSHLL 38
4458#define NEON_2RM_VCVT_F16_F32 44
4459#define NEON_2RM_VCVT_F32_F16 46
4460#define NEON_2RM_VRECPE 56
4461#define NEON_2RM_VRSQRTE 57
4462#define NEON_2RM_VRECPE_F 58
4463#define NEON_2RM_VRSQRTE_F 59
4464#define NEON_2RM_VCVT_FS 60
4465#define NEON_2RM_VCVT_FU 61
4466#define NEON_2RM_VCVT_SF 62
4467#define NEON_2RM_VCVT_UF 63
4468
4469static int neon_2rm_is_float_op(int op)
4470{
4471 /* Return true if this neon 2reg-misc op is float-to-float */
4472 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4473 op >= NEON_2RM_VRECPE_F);
4474}
4475
4476/* Each entry in this array has bit n set if the insn allows
4477 * size value n (otherwise it will UNDEF). Since unallocated
4478 * op values will have no bits set they always UNDEF.
4479 */
4480static const uint8_t neon_2rm_sizes[] = {
4481 [NEON_2RM_VREV64] = 0x7,
4482 [NEON_2RM_VREV32] = 0x3,
4483 [NEON_2RM_VREV16] = 0x1,
4484 [NEON_2RM_VPADDL] = 0x7,
4485 [NEON_2RM_VPADDL_U] = 0x7,
4486 [NEON_2RM_VCLS] = 0x7,
4487 [NEON_2RM_VCLZ] = 0x7,
4488 [NEON_2RM_VCNT] = 0x1,
4489 [NEON_2RM_VMVN] = 0x1,
4490 [NEON_2RM_VPADAL] = 0x7,
4491 [NEON_2RM_VPADAL_U] = 0x7,
4492 [NEON_2RM_VQABS] = 0x7,
4493 [NEON_2RM_VQNEG] = 0x7,
4494 [NEON_2RM_VCGT0] = 0x7,
4495 [NEON_2RM_VCGE0] = 0x7,
4496 [NEON_2RM_VCEQ0] = 0x7,
4497 [NEON_2RM_VCLE0] = 0x7,
4498 [NEON_2RM_VCLT0] = 0x7,
4499 [NEON_2RM_VABS] = 0x7,
4500 [NEON_2RM_VNEG] = 0x7,
4501 [NEON_2RM_VCGT0_F] = 0x4,
4502 [NEON_2RM_VCGE0_F] = 0x4,
4503 [NEON_2RM_VCEQ0_F] = 0x4,
4504 [NEON_2RM_VCLE0_F] = 0x4,
4505 [NEON_2RM_VCLT0_F] = 0x4,
4506 [NEON_2RM_VABS_F] = 0x4,
4507 [NEON_2RM_VNEG_F] = 0x4,
4508 [NEON_2RM_VSWP] = 0x1,
4509 [NEON_2RM_VTRN] = 0x7,
4510 [NEON_2RM_VUZP] = 0x7,
4511 [NEON_2RM_VZIP] = 0x7,
4512 [NEON_2RM_VMOVN] = 0x7,
4513 [NEON_2RM_VQMOVN] = 0x7,
4514 [NEON_2RM_VSHLL] = 0x7,
4515 [NEON_2RM_VCVT_F16_F32] = 0x2,
4516 [NEON_2RM_VCVT_F32_F16] = 0x2,
4517 [NEON_2RM_VRECPE] = 0x4,
4518 [NEON_2RM_VRSQRTE] = 0x4,
4519 [NEON_2RM_VRECPE_F] = 0x4,
4520 [NEON_2RM_VRSQRTE_F] = 0x4,
4521 [NEON_2RM_VCVT_FS] = 0x4,
4522 [NEON_2RM_VCVT_FU] = 0x4,
4523 [NEON_2RM_VCVT_SF] = 0x4,
4524 [NEON_2RM_VCVT_UF] = 0x4,
4525};
4526
9ee6e8bb
PB
4527/* Translate a NEON data processing instruction. Return nonzero if the
4528 instruction is invalid.
ad69471c
PB
4529 We process data in a mixture of 32-bit and 64-bit chunks.
4530 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4531
0ecb72a5 4532static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4533{
4534 int op;
4535 int q;
4536 int rd, rn, rm;
4537 int size;
4538 int shift;
4539 int pass;
4540 int count;
4541 int pairwise;
4542 int u;
ca9a32e4 4543 uint32_t imm, mask;
39d5492a 4544 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4545 TCGv_i64 tmp64;
9ee6e8bb 4546
5df8bac1 4547 if (!s->vfp_enabled)
9ee6e8bb
PB
4548 return 1;
4549 q = (insn & (1 << 6)) != 0;
4550 u = (insn >> 24) & 1;
4551 VFP_DREG_D(rd, insn);
4552 VFP_DREG_N(rn, insn);
4553 VFP_DREG_M(rm, insn);
4554 size = (insn >> 20) & 3;
4555 if ((insn & (1 << 23)) == 0) {
4556 /* Three register same length. */
4557 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4558 /* Catch invalid op and bad size combinations: UNDEF */
4559 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4560 return 1;
4561 }
25f84f79
PM
4562 /* All insns of this form UNDEF for either this condition or the
4563 * superset of cases "Q==1"; we catch the latter later.
4564 */
4565 if (q && ((rd | rn | rm) & 1)) {
4566 return 1;
4567 }
62698be3
PM
4568 if (size == 3 && op != NEON_3R_LOGIC) {
4569 /* 64-bit element instructions. */
9ee6e8bb 4570 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4571 neon_load_reg64(cpu_V0, rn + pass);
4572 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4573 switch (op) {
62698be3 4574 case NEON_3R_VQADD:
9ee6e8bb 4575 if (u) {
02da0b2d
PM
4576 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4577 cpu_V0, cpu_V1);
2c0262af 4578 } else {
02da0b2d
PM
4579 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4580 cpu_V0, cpu_V1);
2c0262af 4581 }
9ee6e8bb 4582 break;
62698be3 4583 case NEON_3R_VQSUB:
9ee6e8bb 4584 if (u) {
02da0b2d
PM
4585 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4586 cpu_V0, cpu_V1);
ad69471c 4587 } else {
02da0b2d
PM
4588 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4589 cpu_V0, cpu_V1);
ad69471c
PB
4590 }
4591 break;
62698be3 4592 case NEON_3R_VSHL:
ad69471c
PB
4593 if (u) {
4594 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4595 } else {
4596 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4597 }
4598 break;
62698be3 4599 case NEON_3R_VQSHL:
ad69471c 4600 if (u) {
02da0b2d
PM
4601 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4602 cpu_V1, cpu_V0);
ad69471c 4603 } else {
02da0b2d
PM
4604 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4605 cpu_V1, cpu_V0);
ad69471c
PB
4606 }
4607 break;
62698be3 4608 case NEON_3R_VRSHL:
ad69471c
PB
4609 if (u) {
4610 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4611 } else {
ad69471c
PB
4612 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4613 }
4614 break;
62698be3 4615 case NEON_3R_VQRSHL:
ad69471c 4616 if (u) {
02da0b2d
PM
4617 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4618 cpu_V1, cpu_V0);
ad69471c 4619 } else {
02da0b2d
PM
4620 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4621 cpu_V1, cpu_V0);
1e8d4eec 4622 }
9ee6e8bb 4623 break;
62698be3 4624 case NEON_3R_VADD_VSUB:
9ee6e8bb 4625 if (u) {
ad69471c 4626 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4627 } else {
ad69471c 4628 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4629 }
4630 break;
4631 default:
4632 abort();
2c0262af 4633 }
ad69471c 4634 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4635 }
9ee6e8bb 4636 return 0;
2c0262af 4637 }
25f84f79 4638 pairwise = 0;
9ee6e8bb 4639 switch (op) {
62698be3
PM
4640 case NEON_3R_VSHL:
4641 case NEON_3R_VQSHL:
4642 case NEON_3R_VRSHL:
4643 case NEON_3R_VQRSHL:
9ee6e8bb 4644 {
ad69471c
PB
4645 int rtmp;
4646 /* Shift instruction operands are reversed. */
4647 rtmp = rn;
9ee6e8bb 4648 rn = rm;
ad69471c 4649 rm = rtmp;
9ee6e8bb 4650 }
2c0262af 4651 break;
25f84f79
PM
4652 case NEON_3R_VPADD:
4653 if (u) {
4654 return 1;
4655 }
4656 /* Fall through */
62698be3
PM
4657 case NEON_3R_VPMAX:
4658 case NEON_3R_VPMIN:
9ee6e8bb 4659 pairwise = 1;
2c0262af 4660 break;
25f84f79
PM
4661 case NEON_3R_FLOAT_ARITH:
4662 pairwise = (u && size < 2); /* if VPADD (float) */
4663 break;
4664 case NEON_3R_FLOAT_MINMAX:
4665 pairwise = u; /* if VPMIN/VPMAX (float) */
4666 break;
4667 case NEON_3R_FLOAT_CMP:
4668 if (!u && size) {
4669 /* no encoding for U=0 C=1x */
4670 return 1;
4671 }
4672 break;
4673 case NEON_3R_FLOAT_ACMP:
4674 if (!u) {
4675 return 1;
4676 }
4677 break;
4678 case NEON_3R_VRECPS_VRSQRTS:
4679 if (u) {
4680 return 1;
4681 }
2c0262af 4682 break;
25f84f79
PM
4683 case NEON_3R_VMUL:
4684 if (u && (size != 0)) {
4685 /* UNDEF on invalid size for polynomial subcase */
4686 return 1;
4687 }
2c0262af 4688 break;
da97f52c
PM
4689 case NEON_3R_VFM:
4690 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4691 return 1;
4692 }
4693 break;
9ee6e8bb 4694 default:
2c0262af 4695 break;
9ee6e8bb 4696 }
dd8fbd78 4697
25f84f79
PM
4698 if (pairwise && q) {
4699 /* All the pairwise insns UNDEF if Q is set */
4700 return 1;
4701 }
4702
9ee6e8bb
PB
4703 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4704
4705 if (pairwise) {
4706 /* Pairwise. */
a5a14945
JR
4707 if (pass < 1) {
4708 tmp = neon_load_reg(rn, 0);
4709 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4710 } else {
a5a14945
JR
4711 tmp = neon_load_reg(rm, 0);
4712 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4713 }
4714 } else {
4715 /* Elementwise. */
dd8fbd78
FN
4716 tmp = neon_load_reg(rn, pass);
4717 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4718 }
4719 switch (op) {
62698be3 4720 case NEON_3R_VHADD:
9ee6e8bb
PB
4721 GEN_NEON_INTEGER_OP(hadd);
4722 break;
62698be3 4723 case NEON_3R_VQADD:
02da0b2d 4724 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4725 break;
62698be3 4726 case NEON_3R_VRHADD:
9ee6e8bb 4727 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4728 break;
62698be3 4729 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4730 switch ((u << 2) | size) {
4731 case 0: /* VAND */
dd8fbd78 4732 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4733 break;
4734 case 1: /* BIC */
f669df27 4735 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4736 break;
4737 case 2: /* VORR */
dd8fbd78 4738 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4739 break;
4740 case 3: /* VORN */
f669df27 4741 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4742 break;
4743 case 4: /* VEOR */
dd8fbd78 4744 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4745 break;
4746 case 5: /* VBSL */
dd8fbd78
FN
4747 tmp3 = neon_load_reg(rd, pass);
4748 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4749 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4750 break;
4751 case 6: /* VBIT */
dd8fbd78
FN
4752 tmp3 = neon_load_reg(rd, pass);
4753 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4754 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4755 break;
4756 case 7: /* VBIF */
dd8fbd78
FN
4757 tmp3 = neon_load_reg(rd, pass);
4758 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4759 tcg_temp_free_i32(tmp3);
9ee6e8bb 4760 break;
2c0262af
FB
4761 }
4762 break;
62698be3 4763 case NEON_3R_VHSUB:
9ee6e8bb
PB
4764 GEN_NEON_INTEGER_OP(hsub);
4765 break;
62698be3 4766 case NEON_3R_VQSUB:
02da0b2d 4767 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4768 break;
62698be3 4769 case NEON_3R_VCGT:
9ee6e8bb
PB
4770 GEN_NEON_INTEGER_OP(cgt);
4771 break;
62698be3 4772 case NEON_3R_VCGE:
9ee6e8bb
PB
4773 GEN_NEON_INTEGER_OP(cge);
4774 break;
62698be3 4775 case NEON_3R_VSHL:
ad69471c 4776 GEN_NEON_INTEGER_OP(shl);
2c0262af 4777 break;
62698be3 4778 case NEON_3R_VQSHL:
02da0b2d 4779 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4780 break;
62698be3 4781 case NEON_3R_VRSHL:
ad69471c 4782 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4783 break;
62698be3 4784 case NEON_3R_VQRSHL:
02da0b2d 4785 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4786 break;
62698be3 4787 case NEON_3R_VMAX:
9ee6e8bb
PB
4788 GEN_NEON_INTEGER_OP(max);
4789 break;
62698be3 4790 case NEON_3R_VMIN:
9ee6e8bb
PB
4791 GEN_NEON_INTEGER_OP(min);
4792 break;
62698be3 4793 case NEON_3R_VABD:
9ee6e8bb
PB
4794 GEN_NEON_INTEGER_OP(abd);
4795 break;
62698be3 4796 case NEON_3R_VABA:
9ee6e8bb 4797 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4798 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4799 tmp2 = neon_load_reg(rd, pass);
4800 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4801 break;
62698be3 4802 case NEON_3R_VADD_VSUB:
9ee6e8bb 4803 if (!u) { /* VADD */
62698be3 4804 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4805 } else { /* VSUB */
4806 switch (size) {
dd8fbd78
FN
4807 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4808 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4809 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4810 default: abort();
9ee6e8bb
PB
4811 }
4812 }
4813 break;
62698be3 4814 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4815 if (!u) { /* VTST */
4816 switch (size) {
dd8fbd78
FN
4817 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4818 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4819 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4820 default: abort();
9ee6e8bb
PB
4821 }
4822 } else { /* VCEQ */
4823 switch (size) {
dd8fbd78
FN
4824 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4825 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4826 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4827 default: abort();
9ee6e8bb
PB
4828 }
4829 }
4830 break;
62698be3 4831 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4832 switch (size) {
dd8fbd78
FN
4833 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4834 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4835 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4836 default: abort();
9ee6e8bb 4837 }
7d1b0095 4838 tcg_temp_free_i32(tmp2);
dd8fbd78 4839 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4840 if (u) { /* VMLS */
dd8fbd78 4841 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4842 } else { /* VMLA */
dd8fbd78 4843 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4844 }
4845 break;
62698be3 4846 case NEON_3R_VMUL:
9ee6e8bb 4847 if (u) { /* polynomial */
dd8fbd78 4848 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4849 } else { /* Integer */
4850 switch (size) {
dd8fbd78
FN
4851 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4852 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4853 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4854 default: abort();
9ee6e8bb
PB
4855 }
4856 }
4857 break;
62698be3 4858 case NEON_3R_VPMAX:
9ee6e8bb
PB
4859 GEN_NEON_INTEGER_OP(pmax);
4860 break;
62698be3 4861 case NEON_3R_VPMIN:
9ee6e8bb
PB
4862 GEN_NEON_INTEGER_OP(pmin);
4863 break;
62698be3 4864 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4865 if (!u) { /* VQDMULH */
4866 switch (size) {
02da0b2d
PM
4867 case 1:
4868 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4869 break;
4870 case 2:
4871 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4872 break;
62698be3 4873 default: abort();
9ee6e8bb 4874 }
62698be3 4875 } else { /* VQRDMULH */
9ee6e8bb 4876 switch (size) {
02da0b2d
PM
4877 case 1:
4878 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4879 break;
4880 case 2:
4881 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4882 break;
62698be3 4883 default: abort();
9ee6e8bb
PB
4884 }
4885 }
4886 break;
62698be3 4887 case NEON_3R_VPADD:
9ee6e8bb 4888 switch (size) {
dd8fbd78
FN
4889 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4890 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4891 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4892 default: abort();
9ee6e8bb
PB
4893 }
4894 break;
62698be3 4895 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4896 {
4897 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4898 switch ((u << 2) | size) {
4899 case 0: /* VADD */
aa47cfdd
PM
4900 case 4: /* VPADD */
4901 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4902 break;
4903 case 2: /* VSUB */
aa47cfdd 4904 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4905 break;
4906 case 6: /* VABD */
aa47cfdd 4907 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4908 break;
4909 default:
62698be3 4910 abort();
9ee6e8bb 4911 }
aa47cfdd 4912 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4913 break;
aa47cfdd 4914 }
62698be3 4915 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4916 {
4917 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4918 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4919 if (!u) {
7d1b0095 4920 tcg_temp_free_i32(tmp2);
dd8fbd78 4921 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4922 if (size == 0) {
aa47cfdd 4923 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4924 } else {
aa47cfdd 4925 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4926 }
4927 }
aa47cfdd 4928 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4929 break;
aa47cfdd 4930 }
62698be3 4931 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4932 {
4933 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4934 if (!u) {
aa47cfdd 4935 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4936 } else {
aa47cfdd
PM
4937 if (size == 0) {
4938 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4939 } else {
4940 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4941 }
b5ff1b31 4942 }
aa47cfdd 4943 tcg_temp_free_ptr(fpstatus);
2c0262af 4944 break;
aa47cfdd 4945 }
62698be3 4946 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4947 {
4948 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4949 if (size == 0) {
4950 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4951 } else {
4952 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4953 }
4954 tcg_temp_free_ptr(fpstatus);
2c0262af 4955 break;
aa47cfdd 4956 }
62698be3 4957 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4958 {
4959 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4960 if (size == 0) {
4961 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4962 } else {
4963 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4964 }
4965 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4966 break;
aa47cfdd 4967 }
62698be3 4968 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4969 if (size == 0)
dd8fbd78 4970 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4971 else
dd8fbd78 4972 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4973 break;
da97f52c
PM
4974 case NEON_3R_VFM:
4975 {
4976 /* VFMA, VFMS: fused multiply-add */
4977 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4978 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4979 if (size) {
4980 /* VFMS */
4981 gen_helper_vfp_negs(tmp, tmp);
4982 }
4983 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4984 tcg_temp_free_i32(tmp3);
4985 tcg_temp_free_ptr(fpstatus);
4986 break;
4987 }
9ee6e8bb
PB
4988 default:
4989 abort();
2c0262af 4990 }
7d1b0095 4991 tcg_temp_free_i32(tmp2);
dd8fbd78 4992
9ee6e8bb
PB
4993 /* Save the result. For elementwise operations we can put it
4994 straight into the destination register. For pairwise operations
4995 we have to be careful to avoid clobbering the source operands. */
4996 if (pairwise && rd == rm) {
dd8fbd78 4997 neon_store_scratch(pass, tmp);
9ee6e8bb 4998 } else {
dd8fbd78 4999 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5000 }
5001
5002 } /* for pass */
5003 if (pairwise && rd == rm) {
5004 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5005 tmp = neon_load_scratch(pass);
5006 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5007 }
5008 }
ad69471c 5009 /* End of 3 register same size operations. */
9ee6e8bb
PB
5010 } else if (insn & (1 << 4)) {
5011 if ((insn & 0x00380080) != 0) {
5012 /* Two registers and shift. */
5013 op = (insn >> 8) & 0xf;
5014 if (insn & (1 << 7)) {
cc13115b
PM
5015 /* 64-bit shift. */
5016 if (op > 7) {
5017 return 1;
5018 }
9ee6e8bb
PB
5019 size = 3;
5020 } else {
5021 size = 2;
5022 while ((insn & (1 << (size + 19))) == 0)
5023 size--;
5024 }
5025 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5026 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5027 by immediate using the variable shift operations. */
5028 if (op < 8) {
5029 /* Shift by immediate:
5030 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5031 if (q && ((rd | rm) & 1)) {
5032 return 1;
5033 }
5034 if (!u && (op == 4 || op == 6)) {
5035 return 1;
5036 }
9ee6e8bb
PB
5037 /* Right shifts are encoded as N - shift, where N is the
5038 element size in bits. */
5039 if (op <= 4)
5040 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5041 if (size == 3) {
5042 count = q + 1;
5043 } else {
5044 count = q ? 4: 2;
5045 }
5046 switch (size) {
5047 case 0:
5048 imm = (uint8_t) shift;
5049 imm |= imm << 8;
5050 imm |= imm << 16;
5051 break;
5052 case 1:
5053 imm = (uint16_t) shift;
5054 imm |= imm << 16;
5055 break;
5056 case 2:
5057 case 3:
5058 imm = shift;
5059 break;
5060 default:
5061 abort();
5062 }
5063
5064 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5065 if (size == 3) {
5066 neon_load_reg64(cpu_V0, rm + pass);
5067 tcg_gen_movi_i64(cpu_V1, imm);
5068 switch (op) {
5069 case 0: /* VSHR */
5070 case 1: /* VSRA */
5071 if (u)
5072 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5073 else
ad69471c 5074 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5075 break;
ad69471c
PB
5076 case 2: /* VRSHR */
5077 case 3: /* VRSRA */
5078 if (u)
5079 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5080 else
ad69471c 5081 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5082 break;
ad69471c 5083 case 4: /* VSRI */
ad69471c
PB
5084 case 5: /* VSHL, VSLI */
5085 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5086 break;
0322b26e 5087 case 6: /* VQSHLU */
02da0b2d
PM
5088 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5089 cpu_V0, cpu_V1);
ad69471c 5090 break;
0322b26e
PM
5091 case 7: /* VQSHL */
5092 if (u) {
02da0b2d 5093 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5094 cpu_V0, cpu_V1);
5095 } else {
02da0b2d 5096 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5097 cpu_V0, cpu_V1);
5098 }
9ee6e8bb 5099 break;
9ee6e8bb 5100 }
ad69471c
PB
5101 if (op == 1 || op == 3) {
5102 /* Accumulate. */
5371cb81 5103 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5104 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5105 } else if (op == 4 || (op == 5 && u)) {
5106 /* Insert */
923e6509
CL
5107 neon_load_reg64(cpu_V1, rd + pass);
5108 uint64_t mask;
5109 if (shift < -63 || shift > 63) {
5110 mask = 0;
5111 } else {
5112 if (op == 4) {
5113 mask = 0xffffffffffffffffull >> -shift;
5114 } else {
5115 mask = 0xffffffffffffffffull << shift;
5116 }
5117 }
5118 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5119 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5120 }
5121 neon_store_reg64(cpu_V0, rd + pass);
5122 } else { /* size < 3 */
5123 /* Operands in T0 and T1. */
dd8fbd78 5124 tmp = neon_load_reg(rm, pass);
7d1b0095 5125 tmp2 = tcg_temp_new_i32();
dd8fbd78 5126 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5127 switch (op) {
5128 case 0: /* VSHR */
5129 case 1: /* VSRA */
5130 GEN_NEON_INTEGER_OP(shl);
5131 break;
5132 case 2: /* VRSHR */
5133 case 3: /* VRSRA */
5134 GEN_NEON_INTEGER_OP(rshl);
5135 break;
5136 case 4: /* VSRI */
ad69471c
PB
5137 case 5: /* VSHL, VSLI */
5138 switch (size) {
dd8fbd78
FN
5139 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5140 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5141 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5142 default: abort();
ad69471c
PB
5143 }
5144 break;
0322b26e 5145 case 6: /* VQSHLU */
ad69471c 5146 switch (size) {
0322b26e 5147 case 0:
02da0b2d
PM
5148 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5149 tmp, tmp2);
0322b26e
PM
5150 break;
5151 case 1:
02da0b2d
PM
5152 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5153 tmp, tmp2);
0322b26e
PM
5154 break;
5155 case 2:
02da0b2d
PM
5156 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5157 tmp, tmp2);
0322b26e
PM
5158 break;
5159 default:
cc13115b 5160 abort();
ad69471c
PB
5161 }
5162 break;
0322b26e 5163 case 7: /* VQSHL */
02da0b2d 5164 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5165 break;
ad69471c 5166 }
7d1b0095 5167 tcg_temp_free_i32(tmp2);
ad69471c
PB
5168
5169 if (op == 1 || op == 3) {
5170 /* Accumulate. */
dd8fbd78 5171 tmp2 = neon_load_reg(rd, pass);
5371cb81 5172 gen_neon_add(size, tmp, tmp2);
7d1b0095 5173 tcg_temp_free_i32(tmp2);
ad69471c
PB
5174 } else if (op == 4 || (op == 5 && u)) {
5175 /* Insert */
5176 switch (size) {
5177 case 0:
5178 if (op == 4)
ca9a32e4 5179 mask = 0xff >> -shift;
ad69471c 5180 else
ca9a32e4
JR
5181 mask = (uint8_t)(0xff << shift);
5182 mask |= mask << 8;
5183 mask |= mask << 16;
ad69471c
PB
5184 break;
5185 case 1:
5186 if (op == 4)
ca9a32e4 5187 mask = 0xffff >> -shift;
ad69471c 5188 else
ca9a32e4
JR
5189 mask = (uint16_t)(0xffff << shift);
5190 mask |= mask << 16;
ad69471c
PB
5191 break;
5192 case 2:
ca9a32e4
JR
5193 if (shift < -31 || shift > 31) {
5194 mask = 0;
5195 } else {
5196 if (op == 4)
5197 mask = 0xffffffffu >> -shift;
5198 else
5199 mask = 0xffffffffu << shift;
5200 }
ad69471c
PB
5201 break;
5202 default:
5203 abort();
5204 }
dd8fbd78 5205 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5206 tcg_gen_andi_i32(tmp, tmp, mask);
5207 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5208 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5209 tcg_temp_free_i32(tmp2);
ad69471c 5210 }
dd8fbd78 5211 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5212 }
5213 } /* for pass */
5214 } else if (op < 10) {
ad69471c 5215 /* Shift by immediate and narrow:
9ee6e8bb 5216 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5217 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5218 if (rm & 1) {
5219 return 1;
5220 }
9ee6e8bb
PB
5221 shift = shift - (1 << (size + 3));
5222 size++;
92cdfaeb 5223 if (size == 3) {
a7812ae4 5224 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5225 neon_load_reg64(cpu_V0, rm);
5226 neon_load_reg64(cpu_V1, rm + 1);
5227 for (pass = 0; pass < 2; pass++) {
5228 TCGv_i64 in;
5229 if (pass == 0) {
5230 in = cpu_V0;
5231 } else {
5232 in = cpu_V1;
5233 }
ad69471c 5234 if (q) {
0b36f4cd 5235 if (input_unsigned) {
92cdfaeb 5236 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5237 } else {
92cdfaeb 5238 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5239 }
ad69471c 5240 } else {
0b36f4cd 5241 if (input_unsigned) {
92cdfaeb 5242 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5243 } else {
92cdfaeb 5244 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5245 }
ad69471c 5246 }
7d1b0095 5247 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5248 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5249 neon_store_reg(rd, pass, tmp);
5250 } /* for pass */
5251 tcg_temp_free_i64(tmp64);
5252 } else {
5253 if (size == 1) {
5254 imm = (uint16_t)shift;
5255 imm |= imm << 16;
2c0262af 5256 } else {
92cdfaeb
PM
5257 /* size == 2 */
5258 imm = (uint32_t)shift;
5259 }
5260 tmp2 = tcg_const_i32(imm);
5261 tmp4 = neon_load_reg(rm + 1, 0);
5262 tmp5 = neon_load_reg(rm + 1, 1);
5263 for (pass = 0; pass < 2; pass++) {
5264 if (pass == 0) {
5265 tmp = neon_load_reg(rm, 0);
5266 } else {
5267 tmp = tmp4;
5268 }
0b36f4cd
CL
5269 gen_neon_shift_narrow(size, tmp, tmp2, q,
5270 input_unsigned);
92cdfaeb
PM
5271 if (pass == 0) {
5272 tmp3 = neon_load_reg(rm, 1);
5273 } else {
5274 tmp3 = tmp5;
5275 }
0b36f4cd
CL
5276 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5277 input_unsigned);
36aa55dc 5278 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5279 tcg_temp_free_i32(tmp);
5280 tcg_temp_free_i32(tmp3);
5281 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5282 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5283 neon_store_reg(rd, pass, tmp);
5284 } /* for pass */
c6067f04 5285 tcg_temp_free_i32(tmp2);
b75263d6 5286 }
9ee6e8bb 5287 } else if (op == 10) {
cc13115b
PM
5288 /* VSHLL, VMOVL */
5289 if (q || (rd & 1)) {
9ee6e8bb 5290 return 1;
cc13115b 5291 }
ad69471c
PB
5292 tmp = neon_load_reg(rm, 0);
5293 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5294 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5295 if (pass == 1)
5296 tmp = tmp2;
5297
5298 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5299
9ee6e8bb
PB
5300 if (shift != 0) {
5301 /* The shift is less than the width of the source
ad69471c
PB
5302 type, so we can just shift the whole register. */
5303 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5304 /* Widen the result of shift: we need to clear
5305 * the potential overflow bits resulting from
5306 * left bits of the narrow input appearing as
5307 * right bits of left the neighbour narrow
5308 * input. */
ad69471c
PB
5309 if (size < 2 || !u) {
5310 uint64_t imm64;
5311 if (size == 0) {
5312 imm = (0xffu >> (8 - shift));
5313 imm |= imm << 16;
acdf01ef 5314 } else if (size == 1) {
ad69471c 5315 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5316 } else {
5317 /* size == 2 */
5318 imm = 0xffffffff >> (32 - shift);
5319 }
5320 if (size < 2) {
5321 imm64 = imm | (((uint64_t)imm) << 32);
5322 } else {
5323 imm64 = imm;
9ee6e8bb 5324 }
acdf01ef 5325 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5326 }
5327 }
ad69471c 5328 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5329 }
f73534a5 5330 } else if (op >= 14) {
9ee6e8bb 5331 /* VCVT fixed-point. */
cc13115b
PM
5332 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5333 return 1;
5334 }
f73534a5
PM
5335 /* We have already masked out the must-be-1 top bit of imm6,
5336 * hence this 32-shift where the ARM ARM has 64-imm6.
5337 */
5338 shift = 32 - shift;
9ee6e8bb 5339 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5340 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5341 if (!(op & 1)) {
9ee6e8bb 5342 if (u)
5500b06c 5343 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5344 else
5500b06c 5345 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5346 } else {
5347 if (u)
5500b06c 5348 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5349 else
5500b06c 5350 gen_vfp_tosl(0, shift, 1);
2c0262af 5351 }
4373f3ce 5352 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5353 }
5354 } else {
9ee6e8bb
PB
5355 return 1;
5356 }
5357 } else { /* (insn & 0x00380080) == 0 */
5358 int invert;
7d80fee5
PM
5359 if (q && (rd & 1)) {
5360 return 1;
5361 }
9ee6e8bb
PB
5362
5363 op = (insn >> 8) & 0xf;
5364 /* One register and immediate. */
5365 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5366 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5367 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5368 * We choose to not special-case this and will behave as if a
5369 * valid constant encoding of 0 had been given.
5370 */
9ee6e8bb
PB
5371 switch (op) {
5372 case 0: case 1:
5373 /* no-op */
5374 break;
5375 case 2: case 3:
5376 imm <<= 8;
5377 break;
5378 case 4: case 5:
5379 imm <<= 16;
5380 break;
5381 case 6: case 7:
5382 imm <<= 24;
5383 break;
5384 case 8: case 9:
5385 imm |= imm << 16;
5386 break;
5387 case 10: case 11:
5388 imm = (imm << 8) | (imm << 24);
5389 break;
5390 case 12:
8e31209e 5391 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5392 break;
5393 case 13:
5394 imm = (imm << 16) | 0xffff;
5395 break;
5396 case 14:
5397 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5398 if (invert)
5399 imm = ~imm;
5400 break;
5401 case 15:
7d80fee5
PM
5402 if (invert) {
5403 return 1;
5404 }
9ee6e8bb
PB
5405 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5406 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5407 break;
5408 }
5409 if (invert)
5410 imm = ~imm;
5411
9ee6e8bb
PB
5412 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5413 if (op & 1 && op < 12) {
ad69471c 5414 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5415 if (invert) {
5416 /* The immediate value has already been inverted, so
5417 BIC becomes AND. */
ad69471c 5418 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5419 } else {
ad69471c 5420 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5421 }
9ee6e8bb 5422 } else {
ad69471c 5423 /* VMOV, VMVN. */
7d1b0095 5424 tmp = tcg_temp_new_i32();
9ee6e8bb 5425 if (op == 14 && invert) {
a5a14945 5426 int n;
ad69471c
PB
5427 uint32_t val;
5428 val = 0;
9ee6e8bb
PB
5429 for (n = 0; n < 4; n++) {
5430 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5431 val |= 0xff << (n * 8);
9ee6e8bb 5432 }
ad69471c
PB
5433 tcg_gen_movi_i32(tmp, val);
5434 } else {
5435 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5436 }
9ee6e8bb 5437 }
ad69471c 5438 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5439 }
5440 }
e4b3861d 5441 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5442 if (size != 3) {
5443 op = (insn >> 8) & 0xf;
5444 if ((insn & (1 << 6)) == 0) {
5445 /* Three registers of different lengths. */
5446 int src1_wide;
5447 int src2_wide;
5448 int prewiden;
695272dc
PM
5449 /* undefreq: bit 0 : UNDEF if size != 0
5450 * bit 1 : UNDEF if size == 0
5451 * bit 2 : UNDEF if U == 1
5452 * Note that [1:0] set implies 'always UNDEF'
5453 */
5454 int undefreq;
5455 /* prewiden, src1_wide, src2_wide, undefreq */
5456 static const int neon_3reg_wide[16][4] = {
5457 {1, 0, 0, 0}, /* VADDL */
5458 {1, 1, 0, 0}, /* VADDW */
5459 {1, 0, 0, 0}, /* VSUBL */
5460 {1, 1, 0, 0}, /* VSUBW */
5461 {0, 1, 1, 0}, /* VADDHN */
5462 {0, 0, 0, 0}, /* VABAL */
5463 {0, 1, 1, 0}, /* VSUBHN */
5464 {0, 0, 0, 0}, /* VABDL */
5465 {0, 0, 0, 0}, /* VMLAL */
5466 {0, 0, 0, 6}, /* VQDMLAL */
5467 {0, 0, 0, 0}, /* VMLSL */
5468 {0, 0, 0, 6}, /* VQDMLSL */
5469 {0, 0, 0, 0}, /* Integer VMULL */
5470 {0, 0, 0, 2}, /* VQDMULL */
5471 {0, 0, 0, 5}, /* Polynomial VMULL */
5472 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5473 };
5474
5475 prewiden = neon_3reg_wide[op][0];
5476 src1_wide = neon_3reg_wide[op][1];
5477 src2_wide = neon_3reg_wide[op][2];
695272dc 5478 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5479
695272dc
PM
5480 if (((undefreq & 1) && (size != 0)) ||
5481 ((undefreq & 2) && (size == 0)) ||
5482 ((undefreq & 4) && u)) {
5483 return 1;
5484 }
5485 if ((src1_wide && (rn & 1)) ||
5486 (src2_wide && (rm & 1)) ||
5487 (!src2_wide && (rd & 1))) {
ad69471c 5488 return 1;
695272dc 5489 }
ad69471c 5490
9ee6e8bb
PB
5491 /* Avoid overlapping operands. Wide source operands are
5492 always aligned so will never overlap with wide
5493 destinations in problematic ways. */
8f8e3aa4 5494 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5495 tmp = neon_load_reg(rm, 1);
5496 neon_store_scratch(2, tmp);
8f8e3aa4 5497 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5498 tmp = neon_load_reg(rn, 1);
5499 neon_store_scratch(2, tmp);
9ee6e8bb 5500 }
39d5492a 5501 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5502 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5503 if (src1_wide) {
5504 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5505 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5506 } else {
ad69471c 5507 if (pass == 1 && rd == rn) {
dd8fbd78 5508 tmp = neon_load_scratch(2);
9ee6e8bb 5509 } else {
ad69471c
PB
5510 tmp = neon_load_reg(rn, pass);
5511 }
5512 if (prewiden) {
5513 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5514 }
5515 }
ad69471c
PB
5516 if (src2_wide) {
5517 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5518 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5519 } else {
ad69471c 5520 if (pass == 1 && rd == rm) {
dd8fbd78 5521 tmp2 = neon_load_scratch(2);
9ee6e8bb 5522 } else {
ad69471c
PB
5523 tmp2 = neon_load_reg(rm, pass);
5524 }
5525 if (prewiden) {
5526 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5527 }
9ee6e8bb
PB
5528 }
5529 switch (op) {
5530 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5531 gen_neon_addl(size);
9ee6e8bb 5532 break;
79b0e534 5533 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5534 gen_neon_subl(size);
9ee6e8bb
PB
5535 break;
5536 case 5: case 7: /* VABAL, VABDL */
5537 switch ((size << 1) | u) {
ad69471c
PB
5538 case 0:
5539 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5540 break;
5541 case 1:
5542 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5543 break;
5544 case 2:
5545 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5546 break;
5547 case 3:
5548 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5549 break;
5550 case 4:
5551 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5552 break;
5553 case 5:
5554 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5555 break;
9ee6e8bb
PB
5556 default: abort();
5557 }
7d1b0095
PM
5558 tcg_temp_free_i32(tmp2);
5559 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5560 break;
5561 case 8: case 9: case 10: case 11: case 12: case 13:
5562 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5563 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5564 break;
5565 case 14: /* Polynomial VMULL */
e5ca24cb 5566 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5567 tcg_temp_free_i32(tmp2);
5568 tcg_temp_free_i32(tmp);
e5ca24cb 5569 break;
695272dc
PM
5570 default: /* 15 is RESERVED: caught earlier */
5571 abort();
9ee6e8bb 5572 }
ebcd88ce
PM
5573 if (op == 13) {
5574 /* VQDMULL */
5575 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5576 neon_store_reg64(cpu_V0, rd + pass);
5577 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5578 /* Accumulate. */
ebcd88ce 5579 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5580 switch (op) {
4dc064e6
PM
5581 case 10: /* VMLSL */
5582 gen_neon_negl(cpu_V0, size);
5583 /* Fall through */
5584 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5585 gen_neon_addl(size);
9ee6e8bb
PB
5586 break;
5587 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5588 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5589 if (op == 11) {
5590 gen_neon_negl(cpu_V0, size);
5591 }
ad69471c
PB
5592 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5593 break;
9ee6e8bb
PB
5594 default:
5595 abort();
5596 }
ad69471c 5597 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5598 } else if (op == 4 || op == 6) {
5599 /* Narrowing operation. */
7d1b0095 5600 tmp = tcg_temp_new_i32();
79b0e534 5601 if (!u) {
9ee6e8bb 5602 switch (size) {
ad69471c
PB
5603 case 0:
5604 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5605 break;
5606 case 1:
5607 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5608 break;
5609 case 2:
5610 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5611 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5612 break;
9ee6e8bb
PB
5613 default: abort();
5614 }
5615 } else {
5616 switch (size) {
ad69471c
PB
5617 case 0:
5618 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5619 break;
5620 case 1:
5621 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5622 break;
5623 case 2:
5624 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5625 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5626 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5627 break;
9ee6e8bb
PB
5628 default: abort();
5629 }
5630 }
ad69471c
PB
5631 if (pass == 0) {
5632 tmp3 = tmp;
5633 } else {
5634 neon_store_reg(rd, 0, tmp3);
5635 neon_store_reg(rd, 1, tmp);
5636 }
9ee6e8bb
PB
5637 } else {
5638 /* Write back the result. */
ad69471c 5639 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5640 }
5641 }
5642 } else {
3e3326df
PM
5643 /* Two registers and a scalar. NB that for ops of this form
5644 * the ARM ARM labels bit 24 as Q, but it is in our variable
5645 * 'u', not 'q'.
5646 */
5647 if (size == 0) {
5648 return 1;
5649 }
9ee6e8bb 5650 switch (op) {
9ee6e8bb 5651 case 1: /* Float VMLA scalar */
9ee6e8bb 5652 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5653 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5654 if (size == 1) {
5655 return 1;
5656 }
5657 /* fall through */
5658 case 0: /* Integer VMLA scalar */
5659 case 4: /* Integer VMLS scalar */
5660 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5661 case 12: /* VQDMULH scalar */
5662 case 13: /* VQRDMULH scalar */
3e3326df
PM
5663 if (u && ((rd | rn) & 1)) {
5664 return 1;
5665 }
dd8fbd78
FN
5666 tmp = neon_get_scalar(size, rm);
5667 neon_store_scratch(0, tmp);
9ee6e8bb 5668 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5669 tmp = neon_load_scratch(0);
5670 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5671 if (op == 12) {
5672 if (size == 1) {
02da0b2d 5673 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5674 } else {
02da0b2d 5675 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5676 }
5677 } else if (op == 13) {
5678 if (size == 1) {
02da0b2d 5679 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5680 } else {
02da0b2d 5681 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5682 }
5683 } else if (op & 1) {
aa47cfdd
PM
5684 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5685 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5686 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5687 } else {
5688 switch (size) {
dd8fbd78
FN
5689 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5690 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5691 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5692 default: abort();
9ee6e8bb
PB
5693 }
5694 }
7d1b0095 5695 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5696 if (op < 8) {
5697 /* Accumulate. */
dd8fbd78 5698 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5699 switch (op) {
5700 case 0:
dd8fbd78 5701 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5702 break;
5703 case 1:
aa47cfdd
PM
5704 {
5705 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5706 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5707 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5708 break;
aa47cfdd 5709 }
9ee6e8bb 5710 case 4:
dd8fbd78 5711 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5712 break;
5713 case 5:
aa47cfdd
PM
5714 {
5715 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5716 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5717 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5718 break;
aa47cfdd 5719 }
9ee6e8bb
PB
5720 default:
5721 abort();
5722 }
7d1b0095 5723 tcg_temp_free_i32(tmp2);
9ee6e8bb 5724 }
dd8fbd78 5725 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5726 }
5727 break;
9ee6e8bb 5728 case 3: /* VQDMLAL scalar */
9ee6e8bb 5729 case 7: /* VQDMLSL scalar */
9ee6e8bb 5730 case 11: /* VQDMULL scalar */
3e3326df 5731 if (u == 1) {
ad69471c 5732 return 1;
3e3326df
PM
5733 }
5734 /* fall through */
5735 case 2: /* VMLAL sclar */
5736 case 6: /* VMLSL scalar */
5737 case 10: /* VMULL scalar */
5738 if (rd & 1) {
5739 return 1;
5740 }
dd8fbd78 5741 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5742 /* We need a copy of tmp2 because gen_neon_mull
5743 * deletes it during pass 0. */
7d1b0095 5744 tmp4 = tcg_temp_new_i32();
c6067f04 5745 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5746 tmp3 = neon_load_reg(rn, 1);
ad69471c 5747
9ee6e8bb 5748 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5749 if (pass == 0) {
5750 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5751 } else {
dd8fbd78 5752 tmp = tmp3;
c6067f04 5753 tmp2 = tmp4;
9ee6e8bb 5754 }
ad69471c 5755 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5756 if (op != 11) {
5757 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5758 }
9ee6e8bb 5759 switch (op) {
4dc064e6
PM
5760 case 6:
5761 gen_neon_negl(cpu_V0, size);
5762 /* Fall through */
5763 case 2:
ad69471c 5764 gen_neon_addl(size);
9ee6e8bb
PB
5765 break;
5766 case 3: case 7:
ad69471c 5767 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5768 if (op == 7) {
5769 gen_neon_negl(cpu_V0, size);
5770 }
ad69471c 5771 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5772 break;
5773 case 10:
5774 /* no-op */
5775 break;
5776 case 11:
ad69471c 5777 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5778 break;
5779 default:
5780 abort();
5781 }
ad69471c 5782 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5783 }
dd8fbd78 5784
dd8fbd78 5785
9ee6e8bb
PB
5786 break;
5787 default: /* 14 and 15 are RESERVED */
5788 return 1;
5789 }
5790 }
5791 } else { /* size == 3 */
5792 if (!u) {
5793 /* Extract. */
9ee6e8bb 5794 imm = (insn >> 8) & 0xf;
ad69471c
PB
5795
5796 if (imm > 7 && !q)
5797 return 1;
5798
52579ea1
PM
5799 if (q && ((rd | rn | rm) & 1)) {
5800 return 1;
5801 }
5802
ad69471c
PB
5803 if (imm == 0) {
5804 neon_load_reg64(cpu_V0, rn);
5805 if (q) {
5806 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5807 }
ad69471c
PB
5808 } else if (imm == 8) {
5809 neon_load_reg64(cpu_V0, rn + 1);
5810 if (q) {
5811 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5812 }
ad69471c 5813 } else if (q) {
a7812ae4 5814 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5815 if (imm < 8) {
5816 neon_load_reg64(cpu_V0, rn);
a7812ae4 5817 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5818 } else {
5819 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5820 neon_load_reg64(tmp64, rm);
ad69471c
PB
5821 }
5822 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5823 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5824 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5825 if (imm < 8) {
5826 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5827 } else {
ad69471c
PB
5828 neon_load_reg64(cpu_V1, rm + 1);
5829 imm -= 8;
9ee6e8bb 5830 }
ad69471c 5831 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5832 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5833 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5834 tcg_temp_free_i64(tmp64);
ad69471c 5835 } else {
a7812ae4 5836 /* BUGFIX */
ad69471c 5837 neon_load_reg64(cpu_V0, rn);
a7812ae4 5838 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5839 neon_load_reg64(cpu_V1, rm);
a7812ae4 5840 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5841 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5842 }
5843 neon_store_reg64(cpu_V0, rd);
5844 if (q) {
5845 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5846 }
5847 } else if ((insn & (1 << 11)) == 0) {
5848 /* Two register misc. */
5849 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5850 size = (insn >> 18) & 3;
600b828c
PM
5851 /* UNDEF for unknown op values and bad op-size combinations */
5852 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5853 return 1;
5854 }
fc2a9b37
PM
5855 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5856 q && ((rm | rd) & 1)) {
5857 return 1;
5858 }
9ee6e8bb 5859 switch (op) {
600b828c 5860 case NEON_2RM_VREV64:
9ee6e8bb 5861 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5862 tmp = neon_load_reg(rm, pass * 2);
5863 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5864 switch (size) {
dd8fbd78
FN
5865 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5866 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5867 case 2: /* no-op */ break;
5868 default: abort();
5869 }
dd8fbd78 5870 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5871 if (size == 2) {
dd8fbd78 5872 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5873 } else {
9ee6e8bb 5874 switch (size) {
dd8fbd78
FN
5875 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5876 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5877 default: abort();
5878 }
dd8fbd78 5879 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5880 }
5881 }
5882 break;
600b828c
PM
5883 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5884 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5885 for (pass = 0; pass < q + 1; pass++) {
5886 tmp = neon_load_reg(rm, pass * 2);
5887 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5888 tmp = neon_load_reg(rm, pass * 2 + 1);
5889 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5890 switch (size) {
5891 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5892 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5893 case 2: tcg_gen_add_i64(CPU_V001); break;
5894 default: abort();
5895 }
600b828c 5896 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5897 /* Accumulate. */
ad69471c
PB
5898 neon_load_reg64(cpu_V1, rd + pass);
5899 gen_neon_addl(size);
9ee6e8bb 5900 }
ad69471c 5901 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5902 }
5903 break;
600b828c 5904 case NEON_2RM_VTRN:
9ee6e8bb 5905 if (size == 2) {
a5a14945 5906 int n;
9ee6e8bb 5907 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5908 tmp = neon_load_reg(rm, n);
5909 tmp2 = neon_load_reg(rd, n + 1);
5910 neon_store_reg(rm, n, tmp2);
5911 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5912 }
5913 } else {
5914 goto elementwise;
5915 }
5916 break;
600b828c 5917 case NEON_2RM_VUZP:
02acedf9 5918 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5919 return 1;
9ee6e8bb
PB
5920 }
5921 break;
600b828c 5922 case NEON_2RM_VZIP:
d68a6f3a 5923 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5924 return 1;
9ee6e8bb
PB
5925 }
5926 break;
600b828c
PM
5927 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5928 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5929 if (rm & 1) {
5930 return 1;
5931 }
39d5492a 5932 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5933 for (pass = 0; pass < 2; pass++) {
ad69471c 5934 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5935 tmp = tcg_temp_new_i32();
600b828c
PM
5936 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5937 tmp, cpu_V0);
ad69471c
PB
5938 if (pass == 0) {
5939 tmp2 = tmp;
5940 } else {
5941 neon_store_reg(rd, 0, tmp2);
5942 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5943 }
9ee6e8bb
PB
5944 }
5945 break;
600b828c 5946 case NEON_2RM_VSHLL:
fc2a9b37 5947 if (q || (rd & 1)) {
9ee6e8bb 5948 return 1;
600b828c 5949 }
ad69471c
PB
5950 tmp = neon_load_reg(rm, 0);
5951 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5952 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5953 if (pass == 1)
5954 tmp = tmp2;
5955 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5956 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5957 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5958 }
5959 break;
600b828c 5960 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5961 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5962 q || (rm & 1)) {
5963 return 1;
5964 }
7d1b0095
PM
5965 tmp = tcg_temp_new_i32();
5966 tmp2 = tcg_temp_new_i32();
60011498 5967 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5968 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5969 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5970 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5971 tcg_gen_shli_i32(tmp2, tmp2, 16);
5972 tcg_gen_or_i32(tmp2, tmp2, tmp);
5973 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5974 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5975 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5976 neon_store_reg(rd, 0, tmp2);
7d1b0095 5977 tmp2 = tcg_temp_new_i32();
2d981da7 5978 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5979 tcg_gen_shli_i32(tmp2, tmp2, 16);
5980 tcg_gen_or_i32(tmp2, tmp2, tmp);
5981 neon_store_reg(rd, 1, tmp2);
7d1b0095 5982 tcg_temp_free_i32(tmp);
60011498 5983 break;
600b828c 5984 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5985 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5986 q || (rd & 1)) {
5987 return 1;
5988 }
7d1b0095 5989 tmp3 = tcg_temp_new_i32();
60011498
PB
5990 tmp = neon_load_reg(rm, 0);
5991 tmp2 = neon_load_reg(rm, 1);
5992 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5993 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5994 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5995 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5996 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5997 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5998 tcg_temp_free_i32(tmp);
60011498 5999 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6000 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6001 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6002 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6003 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6004 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6005 tcg_temp_free_i32(tmp2);
6006 tcg_temp_free_i32(tmp3);
60011498 6007 break;
9ee6e8bb
PB
6008 default:
6009 elementwise:
6010 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6011 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6012 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6013 neon_reg_offset(rm, pass));
39d5492a 6014 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6015 } else {
dd8fbd78 6016 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6017 }
6018 switch (op) {
600b828c 6019 case NEON_2RM_VREV32:
9ee6e8bb 6020 switch (size) {
dd8fbd78
FN
6021 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6022 case 1: gen_swap_half(tmp); break;
600b828c 6023 default: abort();
9ee6e8bb
PB
6024 }
6025 break;
600b828c 6026 case NEON_2RM_VREV16:
dd8fbd78 6027 gen_rev16(tmp);
9ee6e8bb 6028 break;
600b828c 6029 case NEON_2RM_VCLS:
9ee6e8bb 6030 switch (size) {
dd8fbd78
FN
6031 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6032 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6033 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6034 default: abort();
9ee6e8bb
PB
6035 }
6036 break;
600b828c 6037 case NEON_2RM_VCLZ:
9ee6e8bb 6038 switch (size) {
dd8fbd78
FN
6039 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6040 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6041 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6042 default: abort();
9ee6e8bb
PB
6043 }
6044 break;
600b828c 6045 case NEON_2RM_VCNT:
dd8fbd78 6046 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6047 break;
600b828c 6048 case NEON_2RM_VMVN:
dd8fbd78 6049 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6050 break;
600b828c 6051 case NEON_2RM_VQABS:
9ee6e8bb 6052 switch (size) {
02da0b2d
PM
6053 case 0:
6054 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6055 break;
6056 case 1:
6057 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6058 break;
6059 case 2:
6060 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6061 break;
600b828c 6062 default: abort();
9ee6e8bb
PB
6063 }
6064 break;
600b828c 6065 case NEON_2RM_VQNEG:
9ee6e8bb 6066 switch (size) {
02da0b2d
PM
6067 case 0:
6068 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6069 break;
6070 case 1:
6071 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6072 break;
6073 case 2:
6074 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6075 break;
600b828c 6076 default: abort();
9ee6e8bb
PB
6077 }
6078 break;
600b828c 6079 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6080 tmp2 = tcg_const_i32(0);
9ee6e8bb 6081 switch(size) {
dd8fbd78
FN
6082 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6083 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6084 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6085 default: abort();
9ee6e8bb 6086 }
39d5492a 6087 tcg_temp_free_i32(tmp2);
600b828c 6088 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6089 tcg_gen_not_i32(tmp, tmp);
600b828c 6090 }
9ee6e8bb 6091 break;
600b828c 6092 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6093 tmp2 = tcg_const_i32(0);
9ee6e8bb 6094 switch(size) {
dd8fbd78
FN
6095 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6096 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6097 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6098 default: abort();
9ee6e8bb 6099 }
39d5492a 6100 tcg_temp_free_i32(tmp2);
600b828c 6101 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6102 tcg_gen_not_i32(tmp, tmp);
600b828c 6103 }
9ee6e8bb 6104 break;
600b828c 6105 case NEON_2RM_VCEQ0:
dd8fbd78 6106 tmp2 = tcg_const_i32(0);
9ee6e8bb 6107 switch(size) {
dd8fbd78
FN
6108 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6109 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6110 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6111 default: abort();
9ee6e8bb 6112 }
39d5492a 6113 tcg_temp_free_i32(tmp2);
9ee6e8bb 6114 break;
600b828c 6115 case NEON_2RM_VABS:
9ee6e8bb 6116 switch(size) {
dd8fbd78
FN
6117 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6118 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6119 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6120 default: abort();
9ee6e8bb
PB
6121 }
6122 break;
600b828c 6123 case NEON_2RM_VNEG:
dd8fbd78
FN
6124 tmp2 = tcg_const_i32(0);
6125 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6126 tcg_temp_free_i32(tmp2);
9ee6e8bb 6127 break;
600b828c 6128 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6129 {
6130 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6131 tmp2 = tcg_const_i32(0);
aa47cfdd 6132 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6133 tcg_temp_free_i32(tmp2);
aa47cfdd 6134 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6135 break;
aa47cfdd 6136 }
600b828c 6137 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6138 {
6139 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6140 tmp2 = tcg_const_i32(0);
aa47cfdd 6141 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6142 tcg_temp_free_i32(tmp2);
aa47cfdd 6143 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6144 break;
aa47cfdd 6145 }
600b828c 6146 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6147 {
6148 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6149 tmp2 = tcg_const_i32(0);
aa47cfdd 6150 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6151 tcg_temp_free_i32(tmp2);
aa47cfdd 6152 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6153 break;
aa47cfdd 6154 }
600b828c 6155 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6156 {
6157 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6158 tmp2 = tcg_const_i32(0);
aa47cfdd 6159 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6160 tcg_temp_free_i32(tmp2);
aa47cfdd 6161 tcg_temp_free_ptr(fpstatus);
0e326109 6162 break;
aa47cfdd 6163 }
600b828c 6164 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6165 {
6166 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6167 tmp2 = tcg_const_i32(0);
aa47cfdd 6168 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6169 tcg_temp_free_i32(tmp2);
aa47cfdd 6170 tcg_temp_free_ptr(fpstatus);
0e326109 6171 break;
aa47cfdd 6172 }
600b828c 6173 case NEON_2RM_VABS_F:
4373f3ce 6174 gen_vfp_abs(0);
9ee6e8bb 6175 break;
600b828c 6176 case NEON_2RM_VNEG_F:
4373f3ce 6177 gen_vfp_neg(0);
9ee6e8bb 6178 break;
600b828c 6179 case NEON_2RM_VSWP:
dd8fbd78
FN
6180 tmp2 = neon_load_reg(rd, pass);
6181 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6182 break;
600b828c 6183 case NEON_2RM_VTRN:
dd8fbd78 6184 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6185 switch (size) {
dd8fbd78
FN
6186 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6187 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6188 default: abort();
9ee6e8bb 6189 }
dd8fbd78 6190 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6191 break;
600b828c 6192 case NEON_2RM_VRECPE:
dd8fbd78 6193 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6194 break;
600b828c 6195 case NEON_2RM_VRSQRTE:
dd8fbd78 6196 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6197 break;
600b828c 6198 case NEON_2RM_VRECPE_F:
4373f3ce 6199 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6200 break;
600b828c 6201 case NEON_2RM_VRSQRTE_F:
4373f3ce 6202 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6203 break;
600b828c 6204 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6205 gen_vfp_sito(0, 1);
9ee6e8bb 6206 break;
600b828c 6207 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6208 gen_vfp_uito(0, 1);
9ee6e8bb 6209 break;
600b828c 6210 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6211 gen_vfp_tosiz(0, 1);
9ee6e8bb 6212 break;
600b828c 6213 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6214 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6215 break;
6216 default:
600b828c
PM
6217 /* Reserved op values were caught by the
6218 * neon_2rm_sizes[] check earlier.
6219 */
6220 abort();
9ee6e8bb 6221 }
600b828c 6222 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6223 tcg_gen_st_f32(cpu_F0s, cpu_env,
6224 neon_reg_offset(rd, pass));
9ee6e8bb 6225 } else {
dd8fbd78 6226 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6227 }
6228 }
6229 break;
6230 }
6231 } else if ((insn & (1 << 10)) == 0) {
6232 /* VTBL, VTBX. */
56907d77
PM
6233 int n = ((insn >> 8) & 3) + 1;
6234 if ((rn + n) > 32) {
6235 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6236 * helper function running off the end of the register file.
6237 */
6238 return 1;
6239 }
6240 n <<= 3;
9ee6e8bb 6241 if (insn & (1 << 6)) {
8f8e3aa4 6242 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6243 } else {
7d1b0095 6244 tmp = tcg_temp_new_i32();
8f8e3aa4 6245 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6246 }
8f8e3aa4 6247 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6248 tmp4 = tcg_const_i32(rn);
6249 tmp5 = tcg_const_i32(n);
9ef39277 6250 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6251 tcg_temp_free_i32(tmp);
9ee6e8bb 6252 if (insn & (1 << 6)) {
8f8e3aa4 6253 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6254 } else {
7d1b0095 6255 tmp = tcg_temp_new_i32();
8f8e3aa4 6256 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6257 }
8f8e3aa4 6258 tmp3 = neon_load_reg(rm, 1);
9ef39277 6259 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6260 tcg_temp_free_i32(tmp5);
6261 tcg_temp_free_i32(tmp4);
8f8e3aa4 6262 neon_store_reg(rd, 0, tmp2);
3018f259 6263 neon_store_reg(rd, 1, tmp3);
7d1b0095 6264 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6265 } else if ((insn & 0x380) == 0) {
6266 /* VDUP */
133da6aa
JR
6267 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6268 return 1;
6269 }
9ee6e8bb 6270 if (insn & (1 << 19)) {
dd8fbd78 6271 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6272 } else {
dd8fbd78 6273 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6274 }
6275 if (insn & (1 << 16)) {
dd8fbd78 6276 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6277 } else if (insn & (1 << 17)) {
6278 if ((insn >> 18) & 1)
dd8fbd78 6279 gen_neon_dup_high16(tmp);
9ee6e8bb 6280 else
dd8fbd78 6281 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6282 }
6283 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6284 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6285 tcg_gen_mov_i32(tmp2, tmp);
6286 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6287 }
7d1b0095 6288 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6289 } else {
6290 return 1;
6291 }
6292 }
6293 }
6294 return 0;
6295}
6296
0ecb72a5 6297static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6298{
4b6a83fb
PM
6299 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6300 const ARMCPRegInfo *ri;
6301 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6302
6303 cpnum = (insn >> 8) & 0xf;
6304 if (arm_feature(env, ARM_FEATURE_XSCALE)
6305 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6306 return 1;
6307
4b6a83fb 6308 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6309 switch (cpnum) {
6310 case 0:
6311 case 1:
6312 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6313 return disas_iwmmxt_insn(env, s, insn);
6314 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6315 return disas_dsp_insn(env, s, insn);
6316 }
6317 return 1;
6318 case 10:
6319 case 11:
6320 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6321 default:
6322 break;
6323 }
6324
6325 /* Otherwise treat as a generic register access */
6326 is64 = (insn & (1 << 25)) == 0;
6327 if (!is64 && ((insn & (1 << 4)) == 0)) {
6328 /* cdp */
6329 return 1;
6330 }
6331
6332 crm = insn & 0xf;
6333 if (is64) {
6334 crn = 0;
6335 opc1 = (insn >> 4) & 0xf;
6336 opc2 = 0;
6337 rt2 = (insn >> 16) & 0xf;
6338 } else {
6339 crn = (insn >> 16) & 0xf;
6340 opc1 = (insn >> 21) & 7;
6341 opc2 = (insn >> 5) & 7;
6342 rt2 = 0;
6343 }
6344 isread = (insn >> 20) & 1;
6345 rt = (insn >> 12) & 0xf;
6346
6347 ri = get_arm_cp_reginfo(cpu,
6348 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6349 if (ri) {
6350 /* Check access permissions */
6351 if (!cp_access_ok(env, ri, isread)) {
6352 return 1;
6353 }
6354
6355 /* Handle special cases first */
6356 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6357 case ARM_CP_NOP:
6358 return 0;
6359 case ARM_CP_WFI:
6360 if (isread) {
6361 return 1;
6362 }
6363 gen_set_pc_im(s->pc);
6364 s->is_jmp = DISAS_WFI;
2bee5105 6365 return 0;
4b6a83fb
PM
6366 default:
6367 break;
6368 }
6369
2452731c
PM
6370 if (use_icount && (ri->type & ARM_CP_IO)) {
6371 gen_io_start();
6372 }
6373
4b6a83fb
PM
6374 if (isread) {
6375 /* Read */
6376 if (is64) {
6377 TCGv_i64 tmp64;
6378 TCGv_i32 tmp;
6379 if (ri->type & ARM_CP_CONST) {
6380 tmp64 = tcg_const_i64(ri->resetvalue);
6381 } else if (ri->readfn) {
6382 TCGv_ptr tmpptr;
6383 gen_set_pc_im(s->pc);
6384 tmp64 = tcg_temp_new_i64();
6385 tmpptr = tcg_const_ptr(ri);
6386 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6387 tcg_temp_free_ptr(tmpptr);
6388 } else {
6389 tmp64 = tcg_temp_new_i64();
6390 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6391 }
6392 tmp = tcg_temp_new_i32();
6393 tcg_gen_trunc_i64_i32(tmp, tmp64);
6394 store_reg(s, rt, tmp);
6395 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6396 tmp = tcg_temp_new_i32();
4b6a83fb 6397 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6398 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6399 store_reg(s, rt2, tmp);
6400 } else {
39d5492a 6401 TCGv_i32 tmp;
4b6a83fb
PM
6402 if (ri->type & ARM_CP_CONST) {
6403 tmp = tcg_const_i32(ri->resetvalue);
6404 } else if (ri->readfn) {
6405 TCGv_ptr tmpptr;
6406 gen_set_pc_im(s->pc);
6407 tmp = tcg_temp_new_i32();
6408 tmpptr = tcg_const_ptr(ri);
6409 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6410 tcg_temp_free_ptr(tmpptr);
6411 } else {
6412 tmp = load_cpu_offset(ri->fieldoffset);
6413 }
6414 if (rt == 15) {
6415 /* Destination register of r15 for 32 bit loads sets
6416 * the condition codes from the high 4 bits of the value
6417 */
6418 gen_set_nzcv(tmp);
6419 tcg_temp_free_i32(tmp);
6420 } else {
6421 store_reg(s, rt, tmp);
6422 }
6423 }
6424 } else {
6425 /* Write */
6426 if (ri->type & ARM_CP_CONST) {
6427 /* If not forbidden by access permissions, treat as WI */
6428 return 0;
6429 }
6430
6431 if (is64) {
39d5492a 6432 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6433 TCGv_i64 tmp64 = tcg_temp_new_i64();
6434 tmplo = load_reg(s, rt);
6435 tmphi = load_reg(s, rt2);
6436 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6437 tcg_temp_free_i32(tmplo);
6438 tcg_temp_free_i32(tmphi);
6439 if (ri->writefn) {
6440 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6441 gen_set_pc_im(s->pc);
6442 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6443 tcg_temp_free_ptr(tmpptr);
6444 } else {
6445 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6446 }
6447 tcg_temp_free_i64(tmp64);
6448 } else {
6449 if (ri->writefn) {
39d5492a 6450 TCGv_i32 tmp;
4b6a83fb
PM
6451 TCGv_ptr tmpptr;
6452 gen_set_pc_im(s->pc);
6453 tmp = load_reg(s, rt);
6454 tmpptr = tcg_const_ptr(ri);
6455 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6456 tcg_temp_free_ptr(tmpptr);
6457 tcg_temp_free_i32(tmp);
6458 } else {
39d5492a 6459 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6460 store_cpu_offset(tmp, ri->fieldoffset);
6461 }
6462 }
2452731c
PM
6463 }
6464
6465 if (use_icount && (ri->type & ARM_CP_IO)) {
6466 /* I/O operations must end the TB here (whether read or write) */
6467 gen_io_end();
6468 gen_lookup_tb(s);
6469 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6470 /* We default to ending the TB on a coprocessor register write,
6471 * but allow this to be suppressed by the register definition
6472 * (usually only necessary to work around guest bugs).
6473 */
2452731c 6474 gen_lookup_tb(s);
4b6a83fb 6475 }
2452731c 6476
4b6a83fb
PM
6477 return 0;
6478 }
6479
4a9a539f 6480 return 1;
9ee6e8bb
PB
6481}
6482
5e3f878a
PB
6483
6484/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6485static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6486{
39d5492a 6487 TCGv_i32 tmp;
7d1b0095 6488 tmp = tcg_temp_new_i32();
5e3f878a
PB
6489 tcg_gen_trunc_i64_i32(tmp, val);
6490 store_reg(s, rlow, tmp);
7d1b0095 6491 tmp = tcg_temp_new_i32();
5e3f878a
PB
6492 tcg_gen_shri_i64(val, val, 32);
6493 tcg_gen_trunc_i64_i32(tmp, val);
6494 store_reg(s, rhigh, tmp);
6495}
6496
6497/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6498static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6499{
a7812ae4 6500 TCGv_i64 tmp;
39d5492a 6501 TCGv_i32 tmp2;
5e3f878a 6502
36aa55dc 6503 /* Load value and extend to 64 bits. */
a7812ae4 6504 tmp = tcg_temp_new_i64();
5e3f878a
PB
6505 tmp2 = load_reg(s, rlow);
6506 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6507 tcg_temp_free_i32(tmp2);
5e3f878a 6508 tcg_gen_add_i64(val, val, tmp);
b75263d6 6509 tcg_temp_free_i64(tmp);
5e3f878a
PB
6510}
6511
6512/* load and add a 64-bit value from a register pair. */
a7812ae4 6513static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6514{
a7812ae4 6515 TCGv_i64 tmp;
39d5492a
PM
6516 TCGv_i32 tmpl;
6517 TCGv_i32 tmph;
5e3f878a
PB
6518
6519 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6520 tmpl = load_reg(s, rlow);
6521 tmph = load_reg(s, rhigh);
a7812ae4 6522 tmp = tcg_temp_new_i64();
36aa55dc 6523 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6524 tcg_temp_free_i32(tmpl);
6525 tcg_temp_free_i32(tmph);
5e3f878a 6526 tcg_gen_add_i64(val, val, tmp);
b75263d6 6527 tcg_temp_free_i64(tmp);
5e3f878a
PB
6528}
6529
c9f10124 6530/* Set N and Z flags from hi|lo. */
39d5492a 6531static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6532{
c9f10124
RH
6533 tcg_gen_mov_i32(cpu_NF, hi);
6534 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6535}
6536
426f5abc
PB
6537/* Load/Store exclusive instructions are implemented by remembering
6538 the value/address loaded, and seeing if these are the same
b90372ad 6539 when the store is performed. This should be sufficient to implement
426f5abc
PB
6540 the architecturally mandated semantics, and avoids having to monitor
6541 regular stores.
6542
6543 In system emulation mode only one CPU will be running at once, so
6544 this sequence is effectively atomic. In user emulation mode we
6545 throw an exception and handle the atomic operation elsewhere. */
6546static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6547 TCGv_i32 addr, int size)
426f5abc 6548{
94ee24e7 6549 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6550
6551 switch (size) {
6552 case 0:
08307563 6553 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6554 break;
6555 case 1:
08307563 6556 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6557 break;
6558 case 2:
6559 case 3:
08307563 6560 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6561 break;
6562 default:
6563 abort();
6564 }
6565 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6566 store_reg(s, rt, tmp);
6567 if (size == 3) {
39d5492a 6568 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6569 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6570 tmp = tcg_temp_new_i32();
08307563 6571 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6572 tcg_temp_free_i32(tmp2);
426f5abc
PB
6573 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6574 store_reg(s, rt2, tmp);
6575 }
6576 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6577}
6578
6579static void gen_clrex(DisasContext *s)
6580{
6581 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6582}
6583
6584#ifdef CONFIG_USER_ONLY
6585static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6586 TCGv_i32 addr, int size)
426f5abc
PB
6587{
6588 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6589 tcg_gen_movi_i32(cpu_exclusive_info,
6590 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6591 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6592}
6593#else
6594static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6595 TCGv_i32 addr, int size)
426f5abc 6596{
39d5492a 6597 TCGv_i32 tmp;
426f5abc
PB
6598 int done_label;
6599 int fail_label;
6600
6601 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6602 [addr] = {Rt};
6603 {Rd} = 0;
6604 } else {
6605 {Rd} = 1;
6606 } */
6607 fail_label = gen_new_label();
6608 done_label = gen_new_label();
6609 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6610 tmp = tcg_temp_new_i32();
426f5abc
PB
6611 switch (size) {
6612 case 0:
08307563 6613 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6614 break;
6615 case 1:
08307563 6616 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6617 break;
6618 case 2:
6619 case 3:
08307563 6620 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6621 break;
6622 default:
6623 abort();
6624 }
6625 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6626 tcg_temp_free_i32(tmp);
426f5abc 6627 if (size == 3) {
39d5492a 6628 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6629 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6630 tmp = tcg_temp_new_i32();
08307563 6631 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6632 tcg_temp_free_i32(tmp2);
426f5abc 6633 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6634 tcg_temp_free_i32(tmp);
426f5abc
PB
6635 }
6636 tmp = load_reg(s, rt);
6637 switch (size) {
6638 case 0:
08307563 6639 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6640 break;
6641 case 1:
08307563 6642 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6643 break;
6644 case 2:
6645 case 3:
08307563 6646 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6647 break;
6648 default:
6649 abort();
6650 }
94ee24e7 6651 tcg_temp_free_i32(tmp);
426f5abc
PB
6652 if (size == 3) {
6653 tcg_gen_addi_i32(addr, addr, 4);
6654 tmp = load_reg(s, rt2);
08307563 6655 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 6656 tcg_temp_free_i32(tmp);
426f5abc
PB
6657 }
6658 tcg_gen_movi_i32(cpu_R[rd], 0);
6659 tcg_gen_br(done_label);
6660 gen_set_label(fail_label);
6661 tcg_gen_movi_i32(cpu_R[rd], 1);
6662 gen_set_label(done_label);
6663 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6664}
6665#endif
6666
81465888
PM
6667/* gen_srs:
6668 * @env: CPUARMState
6669 * @s: DisasContext
6670 * @mode: mode field from insn (which stack to store to)
6671 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6672 * @writeback: true if writeback bit set
6673 *
6674 * Generate code for the SRS (Store Return State) insn.
6675 */
6676static void gen_srs(DisasContext *s,
6677 uint32_t mode, uint32_t amode, bool writeback)
6678{
6679 int32_t offset;
6680 TCGv_i32 addr = tcg_temp_new_i32();
6681 TCGv_i32 tmp = tcg_const_i32(mode);
6682 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6683 tcg_temp_free_i32(tmp);
6684 switch (amode) {
6685 case 0: /* DA */
6686 offset = -4;
6687 break;
6688 case 1: /* IA */
6689 offset = 0;
6690 break;
6691 case 2: /* DB */
6692 offset = -8;
6693 break;
6694 case 3: /* IB */
6695 offset = 4;
6696 break;
6697 default:
6698 abort();
6699 }
6700 tcg_gen_addi_i32(addr, addr, offset);
6701 tmp = load_reg(s, 14);
08307563 6702 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6703 tcg_temp_free_i32(tmp);
81465888
PM
6704 tmp = load_cpu_field(spsr);
6705 tcg_gen_addi_i32(addr, addr, 4);
08307563 6706 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6707 tcg_temp_free_i32(tmp);
81465888
PM
6708 if (writeback) {
6709 switch (amode) {
6710 case 0:
6711 offset = -8;
6712 break;
6713 case 1:
6714 offset = 4;
6715 break;
6716 case 2:
6717 offset = -4;
6718 break;
6719 case 3:
6720 offset = 0;
6721 break;
6722 default:
6723 abort();
6724 }
6725 tcg_gen_addi_i32(addr, addr, offset);
6726 tmp = tcg_const_i32(mode);
6727 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6728 tcg_temp_free_i32(tmp);
6729 }
6730 tcg_temp_free_i32(addr);
6731}
6732
0ecb72a5 6733static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6734{
6735 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6736 TCGv_i32 tmp;
6737 TCGv_i32 tmp2;
6738 TCGv_i32 tmp3;
6739 TCGv_i32 addr;
a7812ae4 6740 TCGv_i64 tmp64;
9ee6e8bb 6741
d31dd73e 6742 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6743 s->pc += 4;
6744
6745 /* M variants do not implement ARM mode. */
6746 if (IS_M(env))
6747 goto illegal_op;
6748 cond = insn >> 28;
6749 if (cond == 0xf){
be5e7a76
DES
6750 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6751 * choose to UNDEF. In ARMv5 and above the space is used
6752 * for miscellaneous unconditional instructions.
6753 */
6754 ARCH(5);
6755
9ee6e8bb
PB
6756 /* Unconditional instructions. */
6757 if (((insn >> 25) & 7) == 1) {
6758 /* NEON Data processing. */
6759 if (!arm_feature(env, ARM_FEATURE_NEON))
6760 goto illegal_op;
6761
6762 if (disas_neon_data_insn(env, s, insn))
6763 goto illegal_op;
6764 return;
6765 }
6766 if ((insn & 0x0f100000) == 0x04000000) {
6767 /* NEON load/store. */
6768 if (!arm_feature(env, ARM_FEATURE_NEON))
6769 goto illegal_op;
6770
6771 if (disas_neon_ls_insn(env, s, insn))
6772 goto illegal_op;
6773 return;
6774 }
3d185e5d
PM
6775 if (((insn & 0x0f30f000) == 0x0510f000) ||
6776 ((insn & 0x0f30f010) == 0x0710f000)) {
6777 if ((insn & (1 << 22)) == 0) {
6778 /* PLDW; v7MP */
6779 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6780 goto illegal_op;
6781 }
6782 }
6783 /* Otherwise PLD; v5TE+ */
be5e7a76 6784 ARCH(5TE);
3d185e5d
PM
6785 return;
6786 }
6787 if (((insn & 0x0f70f000) == 0x0450f000) ||
6788 ((insn & 0x0f70f010) == 0x0650f000)) {
6789 ARCH(7);
6790 return; /* PLI; V7 */
6791 }
6792 if (((insn & 0x0f700000) == 0x04100000) ||
6793 ((insn & 0x0f700010) == 0x06100000)) {
6794 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6795 goto illegal_op;
6796 }
6797 return; /* v7MP: Unallocated memory hint: must NOP */
6798 }
6799
6800 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6801 ARCH(6);
6802 /* setend */
10962fd5
PM
6803 if (((insn >> 9) & 1) != s->bswap_code) {
6804 /* Dynamic endianness switching not implemented. */
e0c270d9 6805 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
6806 goto illegal_op;
6807 }
6808 return;
6809 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6810 switch ((insn >> 4) & 0xf) {
6811 case 1: /* clrex */
6812 ARCH(6K);
426f5abc 6813 gen_clrex(s);
9ee6e8bb
PB
6814 return;
6815 case 4: /* dsb */
6816 case 5: /* dmb */
6817 case 6: /* isb */
6818 ARCH(7);
6819 /* We don't emulate caches so these are a no-op. */
6820 return;
6821 default:
6822 goto illegal_op;
6823 }
6824 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6825 /* srs */
81465888 6826 if (IS_USER(s)) {
9ee6e8bb 6827 goto illegal_op;
9ee6e8bb 6828 }
81465888
PM
6829 ARCH(6);
6830 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6831 return;
ea825eee 6832 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6833 /* rfe */
c67b6b71 6834 int32_t offset;
9ee6e8bb
PB
6835 if (IS_USER(s))
6836 goto illegal_op;
6837 ARCH(6);
6838 rn = (insn >> 16) & 0xf;
b0109805 6839 addr = load_reg(s, rn);
9ee6e8bb
PB
6840 i = (insn >> 23) & 3;
6841 switch (i) {
b0109805 6842 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6843 case 1: offset = 0; break; /* IA */
6844 case 2: offset = -8; break; /* DB */
b0109805 6845 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6846 default: abort();
6847 }
6848 if (offset)
b0109805
PB
6849 tcg_gen_addi_i32(addr, addr, offset);
6850 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 6851 tmp = tcg_temp_new_i32();
08307563 6852 gen_aa32_ld32u(tmp, addr, 0);
b0109805 6853 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 6854 tmp2 = tcg_temp_new_i32();
08307563 6855 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
6856 if (insn & (1 << 21)) {
6857 /* Base writeback. */
6858 switch (i) {
b0109805 6859 case 0: offset = -8; break;
c67b6b71
FN
6860 case 1: offset = 4; break;
6861 case 2: offset = -4; break;
b0109805 6862 case 3: offset = 0; break;
9ee6e8bb
PB
6863 default: abort();
6864 }
6865 if (offset)
b0109805
PB
6866 tcg_gen_addi_i32(addr, addr, offset);
6867 store_reg(s, rn, addr);
6868 } else {
7d1b0095 6869 tcg_temp_free_i32(addr);
9ee6e8bb 6870 }
b0109805 6871 gen_rfe(s, tmp, tmp2);
c67b6b71 6872 return;
9ee6e8bb
PB
6873 } else if ((insn & 0x0e000000) == 0x0a000000) {
6874 /* branch link and change to thumb (blx <offset>) */
6875 int32_t offset;
6876
6877 val = (uint32_t)s->pc;
7d1b0095 6878 tmp = tcg_temp_new_i32();
d9ba4830
PB
6879 tcg_gen_movi_i32(tmp, val);
6880 store_reg(s, 14, tmp);
9ee6e8bb
PB
6881 /* Sign-extend the 24-bit offset */
6882 offset = (((int32_t)insn) << 8) >> 8;
6883 /* offset * 4 + bit24 * 2 + (thumb bit) */
6884 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6885 /* pipeline offset */
6886 val += 4;
be5e7a76 6887 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6888 gen_bx_im(s, val);
9ee6e8bb
PB
6889 return;
6890 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6891 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6892 /* iWMMXt register transfer. */
6893 if (env->cp15.c15_cpar & (1 << 1))
6894 if (!disas_iwmmxt_insn(env, s, insn))
6895 return;
6896 }
6897 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6898 /* Coprocessor double register transfer. */
be5e7a76 6899 ARCH(5TE);
9ee6e8bb
PB
6900 } else if ((insn & 0x0f000010) == 0x0e000010) {
6901 /* Additional coprocessor register transfer. */
7997d92f 6902 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6903 uint32_t mask;
6904 uint32_t val;
6905 /* cps (privileged) */
6906 if (IS_USER(s))
6907 return;
6908 mask = val = 0;
6909 if (insn & (1 << 19)) {
6910 if (insn & (1 << 8))
6911 mask |= CPSR_A;
6912 if (insn & (1 << 7))
6913 mask |= CPSR_I;
6914 if (insn & (1 << 6))
6915 mask |= CPSR_F;
6916 if (insn & (1 << 18))
6917 val |= mask;
6918 }
7997d92f 6919 if (insn & (1 << 17)) {
9ee6e8bb
PB
6920 mask |= CPSR_M;
6921 val |= (insn & 0x1f);
6922 }
6923 if (mask) {
2fbac54b 6924 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6925 }
6926 return;
6927 }
6928 goto illegal_op;
6929 }
6930 if (cond != 0xe) {
6931 /* if not always execute, we generate a conditional jump to
6932 next instruction */
6933 s->condlabel = gen_new_label();
d9ba4830 6934 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6935 s->condjmp = 1;
6936 }
6937 if ((insn & 0x0f900000) == 0x03000000) {
6938 if ((insn & (1 << 21)) == 0) {
6939 ARCH(6T2);
6940 rd = (insn >> 12) & 0xf;
6941 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6942 if ((insn & (1 << 22)) == 0) {
6943 /* MOVW */
7d1b0095 6944 tmp = tcg_temp_new_i32();
5e3f878a 6945 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6946 } else {
6947 /* MOVT */
5e3f878a 6948 tmp = load_reg(s, rd);
86831435 6949 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6950 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6951 }
5e3f878a 6952 store_reg(s, rd, tmp);
9ee6e8bb
PB
6953 } else {
6954 if (((insn >> 12) & 0xf) != 0xf)
6955 goto illegal_op;
6956 if (((insn >> 16) & 0xf) == 0) {
6957 gen_nop_hint(s, insn & 0xff);
6958 } else {
6959 /* CPSR = immediate */
6960 val = insn & 0xff;
6961 shift = ((insn >> 8) & 0xf) * 2;
6962 if (shift)
6963 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6964 i = ((insn & (1 << 22)) != 0);
2fbac54b 6965 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6966 goto illegal_op;
6967 }
6968 }
6969 } else if ((insn & 0x0f900000) == 0x01000000
6970 && (insn & 0x00000090) != 0x00000090) {
6971 /* miscellaneous instructions */
6972 op1 = (insn >> 21) & 3;
6973 sh = (insn >> 4) & 0xf;
6974 rm = insn & 0xf;
6975 switch (sh) {
6976 case 0x0: /* move program status register */
6977 if (op1 & 1) {
6978 /* PSR = reg */
2fbac54b 6979 tmp = load_reg(s, rm);
9ee6e8bb 6980 i = ((op1 & 2) != 0);
2fbac54b 6981 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6982 goto illegal_op;
6983 } else {
6984 /* reg = PSR */
6985 rd = (insn >> 12) & 0xf;
6986 if (op1 & 2) {
6987 if (IS_USER(s))
6988 goto illegal_op;
d9ba4830 6989 tmp = load_cpu_field(spsr);
9ee6e8bb 6990 } else {
7d1b0095 6991 tmp = tcg_temp_new_i32();
9ef39277 6992 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6993 }
d9ba4830 6994 store_reg(s, rd, tmp);
9ee6e8bb
PB
6995 }
6996 break;
6997 case 0x1:
6998 if (op1 == 1) {
6999 /* branch/exchange thumb (bx). */
be5e7a76 7000 ARCH(4T);
d9ba4830
PB
7001 tmp = load_reg(s, rm);
7002 gen_bx(s, tmp);
9ee6e8bb
PB
7003 } else if (op1 == 3) {
7004 /* clz */
be5e7a76 7005 ARCH(5);
9ee6e8bb 7006 rd = (insn >> 12) & 0xf;
1497c961
PB
7007 tmp = load_reg(s, rm);
7008 gen_helper_clz(tmp, tmp);
7009 store_reg(s, rd, tmp);
9ee6e8bb
PB
7010 } else {
7011 goto illegal_op;
7012 }
7013 break;
7014 case 0x2:
7015 if (op1 == 1) {
7016 ARCH(5J); /* bxj */
7017 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7018 tmp = load_reg(s, rm);
7019 gen_bx(s, tmp);
9ee6e8bb
PB
7020 } else {
7021 goto illegal_op;
7022 }
7023 break;
7024 case 0x3:
7025 if (op1 != 1)
7026 goto illegal_op;
7027
be5e7a76 7028 ARCH(5);
9ee6e8bb 7029 /* branch link/exchange thumb (blx) */
d9ba4830 7030 tmp = load_reg(s, rm);
7d1b0095 7031 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7032 tcg_gen_movi_i32(tmp2, s->pc);
7033 store_reg(s, 14, tmp2);
7034 gen_bx(s, tmp);
9ee6e8bb
PB
7035 break;
7036 case 0x5: /* saturating add/subtract */
be5e7a76 7037 ARCH(5TE);
9ee6e8bb
PB
7038 rd = (insn >> 12) & 0xf;
7039 rn = (insn >> 16) & 0xf;
b40d0353 7040 tmp = load_reg(s, rm);
5e3f878a 7041 tmp2 = load_reg(s, rn);
9ee6e8bb 7042 if (op1 & 2)
9ef39277 7043 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7044 if (op1 & 1)
9ef39277 7045 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7046 else
9ef39277 7047 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7048 tcg_temp_free_i32(tmp2);
5e3f878a 7049 store_reg(s, rd, tmp);
9ee6e8bb 7050 break;
49e14940
AL
7051 case 7:
7052 /* SMC instruction (op1 == 3)
7053 and undefined instructions (op1 == 0 || op1 == 2)
7054 will trap */
7055 if (op1 != 1) {
7056 goto illegal_op;
7057 }
7058 /* bkpt */
be5e7a76 7059 ARCH(5);
bc4a0de0 7060 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7061 break;
7062 case 0x8: /* signed multiply */
7063 case 0xa:
7064 case 0xc:
7065 case 0xe:
be5e7a76 7066 ARCH(5TE);
9ee6e8bb
PB
7067 rs = (insn >> 8) & 0xf;
7068 rn = (insn >> 12) & 0xf;
7069 rd = (insn >> 16) & 0xf;
7070 if (op1 == 1) {
7071 /* (32 * 16) >> 16 */
5e3f878a
PB
7072 tmp = load_reg(s, rm);
7073 tmp2 = load_reg(s, rs);
9ee6e8bb 7074 if (sh & 4)
5e3f878a 7075 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7076 else
5e3f878a 7077 gen_sxth(tmp2);
a7812ae4
PB
7078 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7079 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7080 tmp = tcg_temp_new_i32();
a7812ae4 7081 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7082 tcg_temp_free_i64(tmp64);
9ee6e8bb 7083 if ((sh & 2) == 0) {
5e3f878a 7084 tmp2 = load_reg(s, rn);
9ef39277 7085 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7086 tcg_temp_free_i32(tmp2);
9ee6e8bb 7087 }
5e3f878a 7088 store_reg(s, rd, tmp);
9ee6e8bb
PB
7089 } else {
7090 /* 16 * 16 */
5e3f878a
PB
7091 tmp = load_reg(s, rm);
7092 tmp2 = load_reg(s, rs);
7093 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7094 tcg_temp_free_i32(tmp2);
9ee6e8bb 7095 if (op1 == 2) {
a7812ae4
PB
7096 tmp64 = tcg_temp_new_i64();
7097 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7098 tcg_temp_free_i32(tmp);
a7812ae4
PB
7099 gen_addq(s, tmp64, rn, rd);
7100 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7101 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7102 } else {
7103 if (op1 == 0) {
5e3f878a 7104 tmp2 = load_reg(s, rn);
9ef39277 7105 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7106 tcg_temp_free_i32(tmp2);
9ee6e8bb 7107 }
5e3f878a 7108 store_reg(s, rd, tmp);
9ee6e8bb
PB
7109 }
7110 }
7111 break;
7112 default:
7113 goto illegal_op;
7114 }
7115 } else if (((insn & 0x0e000000) == 0 &&
7116 (insn & 0x00000090) != 0x90) ||
7117 ((insn & 0x0e000000) == (1 << 25))) {
7118 int set_cc, logic_cc, shiftop;
7119
7120 op1 = (insn >> 21) & 0xf;
7121 set_cc = (insn >> 20) & 1;
7122 logic_cc = table_logic_cc[op1] & set_cc;
7123
7124 /* data processing instruction */
7125 if (insn & (1 << 25)) {
7126 /* immediate operand */
7127 val = insn & 0xff;
7128 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7129 if (shift) {
9ee6e8bb 7130 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7131 }
7d1b0095 7132 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7133 tcg_gen_movi_i32(tmp2, val);
7134 if (logic_cc && shift) {
7135 gen_set_CF_bit31(tmp2);
7136 }
9ee6e8bb
PB
7137 } else {
7138 /* register */
7139 rm = (insn) & 0xf;
e9bb4aa9 7140 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7141 shiftop = (insn >> 5) & 3;
7142 if (!(insn & (1 << 4))) {
7143 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7144 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7145 } else {
7146 rs = (insn >> 8) & 0xf;
8984bd2e 7147 tmp = load_reg(s, rs);
e9bb4aa9 7148 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7149 }
7150 }
7151 if (op1 != 0x0f && op1 != 0x0d) {
7152 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7153 tmp = load_reg(s, rn);
7154 } else {
39d5492a 7155 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7156 }
7157 rd = (insn >> 12) & 0xf;
7158 switch(op1) {
7159 case 0x00:
e9bb4aa9
JR
7160 tcg_gen_and_i32(tmp, tmp, tmp2);
7161 if (logic_cc) {
7162 gen_logic_CC(tmp);
7163 }
21aeb343 7164 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7165 break;
7166 case 0x01:
e9bb4aa9
JR
7167 tcg_gen_xor_i32(tmp, tmp, tmp2);
7168 if (logic_cc) {
7169 gen_logic_CC(tmp);
7170 }
21aeb343 7171 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7172 break;
7173 case 0x02:
7174 if (set_cc && rd == 15) {
7175 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7176 if (IS_USER(s)) {
9ee6e8bb 7177 goto illegal_op;
e9bb4aa9 7178 }
72485ec4 7179 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7180 gen_exception_return(s, tmp);
9ee6e8bb 7181 } else {
e9bb4aa9 7182 if (set_cc) {
72485ec4 7183 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7184 } else {
7185 tcg_gen_sub_i32(tmp, tmp, tmp2);
7186 }
21aeb343 7187 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7188 }
7189 break;
7190 case 0x03:
e9bb4aa9 7191 if (set_cc) {
72485ec4 7192 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7193 } else {
7194 tcg_gen_sub_i32(tmp, tmp2, tmp);
7195 }
21aeb343 7196 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7197 break;
7198 case 0x04:
e9bb4aa9 7199 if (set_cc) {
72485ec4 7200 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7201 } else {
7202 tcg_gen_add_i32(tmp, tmp, tmp2);
7203 }
21aeb343 7204 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7205 break;
7206 case 0x05:
e9bb4aa9 7207 if (set_cc) {
49b4c31e 7208 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7209 } else {
7210 gen_add_carry(tmp, tmp, tmp2);
7211 }
21aeb343 7212 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7213 break;
7214 case 0x06:
e9bb4aa9 7215 if (set_cc) {
2de68a49 7216 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7217 } else {
7218 gen_sub_carry(tmp, tmp, tmp2);
7219 }
21aeb343 7220 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7221 break;
7222 case 0x07:
e9bb4aa9 7223 if (set_cc) {
2de68a49 7224 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7225 } else {
7226 gen_sub_carry(tmp, tmp2, tmp);
7227 }
21aeb343 7228 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7229 break;
7230 case 0x08:
7231 if (set_cc) {
e9bb4aa9
JR
7232 tcg_gen_and_i32(tmp, tmp, tmp2);
7233 gen_logic_CC(tmp);
9ee6e8bb 7234 }
7d1b0095 7235 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7236 break;
7237 case 0x09:
7238 if (set_cc) {
e9bb4aa9
JR
7239 tcg_gen_xor_i32(tmp, tmp, tmp2);
7240 gen_logic_CC(tmp);
9ee6e8bb 7241 }
7d1b0095 7242 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7243 break;
7244 case 0x0a:
7245 if (set_cc) {
72485ec4 7246 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7247 }
7d1b0095 7248 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7249 break;
7250 case 0x0b:
7251 if (set_cc) {
72485ec4 7252 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7253 }
7d1b0095 7254 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7255 break;
7256 case 0x0c:
e9bb4aa9
JR
7257 tcg_gen_or_i32(tmp, tmp, tmp2);
7258 if (logic_cc) {
7259 gen_logic_CC(tmp);
7260 }
21aeb343 7261 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7262 break;
7263 case 0x0d:
7264 if (logic_cc && rd == 15) {
7265 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7266 if (IS_USER(s)) {
9ee6e8bb 7267 goto illegal_op;
e9bb4aa9
JR
7268 }
7269 gen_exception_return(s, tmp2);
9ee6e8bb 7270 } else {
e9bb4aa9
JR
7271 if (logic_cc) {
7272 gen_logic_CC(tmp2);
7273 }
21aeb343 7274 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7275 }
7276 break;
7277 case 0x0e:
f669df27 7278 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7279 if (logic_cc) {
7280 gen_logic_CC(tmp);
7281 }
21aeb343 7282 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7283 break;
7284 default:
7285 case 0x0f:
e9bb4aa9
JR
7286 tcg_gen_not_i32(tmp2, tmp2);
7287 if (logic_cc) {
7288 gen_logic_CC(tmp2);
7289 }
21aeb343 7290 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7291 break;
7292 }
e9bb4aa9 7293 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7294 tcg_temp_free_i32(tmp2);
e9bb4aa9 7295 }
9ee6e8bb
PB
7296 } else {
7297 /* other instructions */
7298 op1 = (insn >> 24) & 0xf;
7299 switch(op1) {
7300 case 0x0:
7301 case 0x1:
7302 /* multiplies, extra load/stores */
7303 sh = (insn >> 5) & 3;
7304 if (sh == 0) {
7305 if (op1 == 0x0) {
7306 rd = (insn >> 16) & 0xf;
7307 rn = (insn >> 12) & 0xf;
7308 rs = (insn >> 8) & 0xf;
7309 rm = (insn) & 0xf;
7310 op1 = (insn >> 20) & 0xf;
7311 switch (op1) {
7312 case 0: case 1: case 2: case 3: case 6:
7313 /* 32 bit mul */
5e3f878a
PB
7314 tmp = load_reg(s, rs);
7315 tmp2 = load_reg(s, rm);
7316 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7317 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7318 if (insn & (1 << 22)) {
7319 /* Subtract (mls) */
7320 ARCH(6T2);
5e3f878a
PB
7321 tmp2 = load_reg(s, rn);
7322 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7323 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7324 } else if (insn & (1 << 21)) {
7325 /* Add */
5e3f878a
PB
7326 tmp2 = load_reg(s, rn);
7327 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7328 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7329 }
7330 if (insn & (1 << 20))
5e3f878a
PB
7331 gen_logic_CC(tmp);
7332 store_reg(s, rd, tmp);
9ee6e8bb 7333 break;
8aac08b1
AJ
7334 case 4:
7335 /* 64 bit mul double accumulate (UMAAL) */
7336 ARCH(6);
7337 tmp = load_reg(s, rs);
7338 tmp2 = load_reg(s, rm);
7339 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7340 gen_addq_lo(s, tmp64, rn);
7341 gen_addq_lo(s, tmp64, rd);
7342 gen_storeq_reg(s, rn, rd, tmp64);
7343 tcg_temp_free_i64(tmp64);
7344 break;
7345 case 8: case 9: case 10: case 11:
7346 case 12: case 13: case 14: case 15:
7347 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7348 tmp = load_reg(s, rs);
7349 tmp2 = load_reg(s, rm);
8aac08b1 7350 if (insn & (1 << 22)) {
c9f10124 7351 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7352 } else {
c9f10124 7353 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7354 }
7355 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7356 TCGv_i32 al = load_reg(s, rn);
7357 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7358 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7359 tcg_temp_free_i32(al);
7360 tcg_temp_free_i32(ah);
9ee6e8bb 7361 }
8aac08b1 7362 if (insn & (1 << 20)) {
c9f10124 7363 gen_logicq_cc(tmp, tmp2);
8aac08b1 7364 }
c9f10124
RH
7365 store_reg(s, rn, tmp);
7366 store_reg(s, rd, tmp2);
9ee6e8bb 7367 break;
8aac08b1
AJ
7368 default:
7369 goto illegal_op;
9ee6e8bb
PB
7370 }
7371 } else {
7372 rn = (insn >> 16) & 0xf;
7373 rd = (insn >> 12) & 0xf;
7374 if (insn & (1 << 23)) {
7375 /* load/store exclusive */
2359bf80 7376 int op2 = (insn >> 8) & 3;
86753403 7377 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7378
7379 switch (op2) {
7380 case 0: /* lda/stl */
7381 if (op1 == 1) {
7382 goto illegal_op;
7383 }
7384 ARCH(8);
7385 break;
7386 case 1: /* reserved */
7387 goto illegal_op;
7388 case 2: /* ldaex/stlex */
7389 ARCH(8);
7390 break;
7391 case 3: /* ldrex/strex */
7392 if (op1) {
7393 ARCH(6K);
7394 } else {
7395 ARCH(6);
7396 }
7397 break;
7398 }
7399
3174f8e9 7400 addr = tcg_temp_local_new_i32();
98a46317 7401 load_reg_var(s, addr, rn);
2359bf80
MR
7402
7403 /* Since the emulation does not have barriers,
7404 the acquire/release semantics need no special
7405 handling */
7406 if (op2 == 0) {
7407 if (insn & (1 << 20)) {
7408 tmp = tcg_temp_new_i32();
7409 switch (op1) {
7410 case 0: /* lda */
08307563 7411 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7412 break;
7413 case 2: /* ldab */
08307563 7414 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7415 break;
7416 case 3: /* ldah */
08307563 7417 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7418 break;
7419 default:
7420 abort();
7421 }
7422 store_reg(s, rd, tmp);
7423 } else {
7424 rm = insn & 0xf;
7425 tmp = load_reg(s, rm);
7426 switch (op1) {
7427 case 0: /* stl */
08307563 7428 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
7429 break;
7430 case 2: /* stlb */
08307563 7431 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
7432 break;
7433 case 3: /* stlh */
08307563 7434 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
7435 break;
7436 default:
7437 abort();
7438 }
7439 tcg_temp_free_i32(tmp);
7440 }
7441 } else if (insn & (1 << 20)) {
86753403
PB
7442 switch (op1) {
7443 case 0: /* ldrex */
426f5abc 7444 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7445 break;
7446 case 1: /* ldrexd */
426f5abc 7447 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7448 break;
7449 case 2: /* ldrexb */
426f5abc 7450 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7451 break;
7452 case 3: /* ldrexh */
426f5abc 7453 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7454 break;
7455 default:
7456 abort();
7457 }
9ee6e8bb
PB
7458 } else {
7459 rm = insn & 0xf;
86753403
PB
7460 switch (op1) {
7461 case 0: /* strex */
426f5abc 7462 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7463 break;
7464 case 1: /* strexd */
502e64fe 7465 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7466 break;
7467 case 2: /* strexb */
426f5abc 7468 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7469 break;
7470 case 3: /* strexh */
426f5abc 7471 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7472 break;
7473 default:
7474 abort();
7475 }
9ee6e8bb 7476 }
39d5492a 7477 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7478 } else {
7479 /* SWP instruction */
7480 rm = (insn) & 0xf;
7481
8984bd2e
PB
7482 /* ??? This is not really atomic. However we know
7483 we never have multiple CPUs running in parallel,
7484 so it is good enough. */
7485 addr = load_reg(s, rn);
7486 tmp = load_reg(s, rm);
5a839c0d 7487 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7488 if (insn & (1 << 22)) {
08307563
PM
7489 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
7490 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7491 } else {
08307563
PM
7492 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
7493 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7494 }
5a839c0d 7495 tcg_temp_free_i32(tmp);
7d1b0095 7496 tcg_temp_free_i32(addr);
8984bd2e 7497 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7498 }
7499 }
7500 } else {
7501 int address_offset;
7502 int load;
7503 /* Misc load/store */
7504 rn = (insn >> 16) & 0xf;
7505 rd = (insn >> 12) & 0xf;
b0109805 7506 addr = load_reg(s, rn);
9ee6e8bb 7507 if (insn & (1 << 24))
b0109805 7508 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7509 address_offset = 0;
7510 if (insn & (1 << 20)) {
7511 /* load */
5a839c0d 7512 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7513 switch(sh) {
7514 case 1:
08307563 7515 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7516 break;
7517 case 2:
08307563 7518 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7519 break;
7520 default:
7521 case 3:
08307563 7522 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7523 break;
7524 }
7525 load = 1;
7526 } else if (sh & 2) {
be5e7a76 7527 ARCH(5TE);
9ee6e8bb
PB
7528 /* doubleword */
7529 if (sh & 1) {
7530 /* store */
b0109805 7531 tmp = load_reg(s, rd);
08307563 7532 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7533 tcg_temp_free_i32(tmp);
b0109805
PB
7534 tcg_gen_addi_i32(addr, addr, 4);
7535 tmp = load_reg(s, rd + 1);
08307563 7536 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7537 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7538 load = 0;
7539 } else {
7540 /* load */
5a839c0d 7541 tmp = tcg_temp_new_i32();
08307563 7542 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7543 store_reg(s, rd, tmp);
7544 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7545 tmp = tcg_temp_new_i32();
08307563 7546 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7547 rd++;
7548 load = 1;
7549 }
7550 address_offset = -4;
7551 } else {
7552 /* store */
b0109805 7553 tmp = load_reg(s, rd);
08307563 7554 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 7555 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7556 load = 0;
7557 }
7558 /* Perform base writeback before the loaded value to
7559 ensure correct behavior with overlapping index registers.
7560 ldrd with base writeback is is undefined if the
7561 destination and index registers overlap. */
7562 if (!(insn & (1 << 24))) {
b0109805
PB
7563 gen_add_datah_offset(s, insn, address_offset, addr);
7564 store_reg(s, rn, addr);
9ee6e8bb
PB
7565 } else if (insn & (1 << 21)) {
7566 if (address_offset)
b0109805
PB
7567 tcg_gen_addi_i32(addr, addr, address_offset);
7568 store_reg(s, rn, addr);
7569 } else {
7d1b0095 7570 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7571 }
7572 if (load) {
7573 /* Complete the load. */
b0109805 7574 store_reg(s, rd, tmp);
9ee6e8bb
PB
7575 }
7576 }
7577 break;
7578 case 0x4:
7579 case 0x5:
7580 goto do_ldst;
7581 case 0x6:
7582 case 0x7:
7583 if (insn & (1 << 4)) {
7584 ARCH(6);
7585 /* Armv6 Media instructions. */
7586 rm = insn & 0xf;
7587 rn = (insn >> 16) & 0xf;
2c0262af 7588 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7589 rs = (insn >> 8) & 0xf;
7590 switch ((insn >> 23) & 3) {
7591 case 0: /* Parallel add/subtract. */
7592 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7593 tmp = load_reg(s, rn);
7594 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7595 sh = (insn >> 5) & 7;
7596 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7597 goto illegal_op;
6ddbc6e4 7598 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7599 tcg_temp_free_i32(tmp2);
6ddbc6e4 7600 store_reg(s, rd, tmp);
9ee6e8bb
PB
7601 break;
7602 case 1:
7603 if ((insn & 0x00700020) == 0) {
6c95676b 7604 /* Halfword pack. */
3670669c
PB
7605 tmp = load_reg(s, rn);
7606 tmp2 = load_reg(s, rm);
9ee6e8bb 7607 shift = (insn >> 7) & 0x1f;
3670669c
PB
7608 if (insn & (1 << 6)) {
7609 /* pkhtb */
22478e79
AZ
7610 if (shift == 0)
7611 shift = 31;
7612 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7613 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7614 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7615 } else {
7616 /* pkhbt */
22478e79
AZ
7617 if (shift)
7618 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7619 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7620 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7621 }
7622 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7623 tcg_temp_free_i32(tmp2);
3670669c 7624 store_reg(s, rd, tmp);
9ee6e8bb
PB
7625 } else if ((insn & 0x00200020) == 0x00200000) {
7626 /* [us]sat */
6ddbc6e4 7627 tmp = load_reg(s, rm);
9ee6e8bb
PB
7628 shift = (insn >> 7) & 0x1f;
7629 if (insn & (1 << 6)) {
7630 if (shift == 0)
7631 shift = 31;
6ddbc6e4 7632 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7633 } else {
6ddbc6e4 7634 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7635 }
7636 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7637 tmp2 = tcg_const_i32(sh);
7638 if (insn & (1 << 22))
9ef39277 7639 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7640 else
9ef39277 7641 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7642 tcg_temp_free_i32(tmp2);
6ddbc6e4 7643 store_reg(s, rd, tmp);
9ee6e8bb
PB
7644 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7645 /* [us]sat16 */
6ddbc6e4 7646 tmp = load_reg(s, rm);
9ee6e8bb 7647 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7648 tmp2 = tcg_const_i32(sh);
7649 if (insn & (1 << 22))
9ef39277 7650 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7651 else
9ef39277 7652 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7653 tcg_temp_free_i32(tmp2);
6ddbc6e4 7654 store_reg(s, rd, tmp);
9ee6e8bb
PB
7655 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7656 /* Select bytes. */
6ddbc6e4
PB
7657 tmp = load_reg(s, rn);
7658 tmp2 = load_reg(s, rm);
7d1b0095 7659 tmp3 = tcg_temp_new_i32();
0ecb72a5 7660 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7661 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7662 tcg_temp_free_i32(tmp3);
7663 tcg_temp_free_i32(tmp2);
6ddbc6e4 7664 store_reg(s, rd, tmp);
9ee6e8bb 7665 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7666 tmp = load_reg(s, rm);
9ee6e8bb 7667 shift = (insn >> 10) & 3;
1301f322 7668 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7669 rotate, a shift is sufficient. */
7670 if (shift != 0)
f669df27 7671 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7672 op1 = (insn >> 20) & 7;
7673 switch (op1) {
5e3f878a
PB
7674 case 0: gen_sxtb16(tmp); break;
7675 case 2: gen_sxtb(tmp); break;
7676 case 3: gen_sxth(tmp); break;
7677 case 4: gen_uxtb16(tmp); break;
7678 case 6: gen_uxtb(tmp); break;
7679 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7680 default: goto illegal_op;
7681 }
7682 if (rn != 15) {
5e3f878a 7683 tmp2 = load_reg(s, rn);
9ee6e8bb 7684 if ((op1 & 3) == 0) {
5e3f878a 7685 gen_add16(tmp, tmp2);
9ee6e8bb 7686 } else {
5e3f878a 7687 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7688 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7689 }
7690 }
6c95676b 7691 store_reg(s, rd, tmp);
9ee6e8bb
PB
7692 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7693 /* rev */
b0109805 7694 tmp = load_reg(s, rm);
9ee6e8bb
PB
7695 if (insn & (1 << 22)) {
7696 if (insn & (1 << 7)) {
b0109805 7697 gen_revsh(tmp);
9ee6e8bb
PB
7698 } else {
7699 ARCH(6T2);
b0109805 7700 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7701 }
7702 } else {
7703 if (insn & (1 << 7))
b0109805 7704 gen_rev16(tmp);
9ee6e8bb 7705 else
66896cb8 7706 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7707 }
b0109805 7708 store_reg(s, rd, tmp);
9ee6e8bb
PB
7709 } else {
7710 goto illegal_op;
7711 }
7712 break;
7713 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7714 switch ((insn >> 20) & 0x7) {
7715 case 5:
7716 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7717 /* op2 not 00x or 11x : UNDEF */
7718 goto illegal_op;
7719 }
838fa72d
AJ
7720 /* Signed multiply most significant [accumulate].
7721 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7722 tmp = load_reg(s, rm);
7723 tmp2 = load_reg(s, rs);
a7812ae4 7724 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7725
955a7dd5 7726 if (rd != 15) {
838fa72d 7727 tmp = load_reg(s, rd);
9ee6e8bb 7728 if (insn & (1 << 6)) {
838fa72d 7729 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7730 } else {
838fa72d 7731 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7732 }
7733 }
838fa72d
AJ
7734 if (insn & (1 << 5)) {
7735 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7736 }
7737 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7738 tmp = tcg_temp_new_i32();
838fa72d
AJ
7739 tcg_gen_trunc_i64_i32(tmp, tmp64);
7740 tcg_temp_free_i64(tmp64);
955a7dd5 7741 store_reg(s, rn, tmp);
41e9564d
PM
7742 break;
7743 case 0:
7744 case 4:
7745 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7746 if (insn & (1 << 7)) {
7747 goto illegal_op;
7748 }
7749 tmp = load_reg(s, rm);
7750 tmp2 = load_reg(s, rs);
9ee6e8bb 7751 if (insn & (1 << 5))
5e3f878a
PB
7752 gen_swap_half(tmp2);
7753 gen_smul_dual(tmp, tmp2);
5e3f878a 7754 if (insn & (1 << 6)) {
e1d177b9 7755 /* This subtraction cannot overflow. */
5e3f878a
PB
7756 tcg_gen_sub_i32(tmp, tmp, tmp2);
7757 } else {
e1d177b9
PM
7758 /* This addition cannot overflow 32 bits;
7759 * however it may overflow considered as a signed
7760 * operation, in which case we must set the Q flag.
7761 */
9ef39277 7762 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7763 }
7d1b0095 7764 tcg_temp_free_i32(tmp2);
9ee6e8bb 7765 if (insn & (1 << 22)) {
5e3f878a 7766 /* smlald, smlsld */
a7812ae4
PB
7767 tmp64 = tcg_temp_new_i64();
7768 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7769 tcg_temp_free_i32(tmp);
a7812ae4
PB
7770 gen_addq(s, tmp64, rd, rn);
7771 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7772 tcg_temp_free_i64(tmp64);
9ee6e8bb 7773 } else {
5e3f878a 7774 /* smuad, smusd, smlad, smlsd */
22478e79 7775 if (rd != 15)
9ee6e8bb 7776 {
22478e79 7777 tmp2 = load_reg(s, rd);
9ef39277 7778 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7779 tcg_temp_free_i32(tmp2);
9ee6e8bb 7780 }
22478e79 7781 store_reg(s, rn, tmp);
9ee6e8bb 7782 }
41e9564d 7783 break;
b8b8ea05
PM
7784 case 1:
7785 case 3:
7786 /* SDIV, UDIV */
7787 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7788 goto illegal_op;
7789 }
7790 if (((insn >> 5) & 7) || (rd != 15)) {
7791 goto illegal_op;
7792 }
7793 tmp = load_reg(s, rm);
7794 tmp2 = load_reg(s, rs);
7795 if (insn & (1 << 21)) {
7796 gen_helper_udiv(tmp, tmp, tmp2);
7797 } else {
7798 gen_helper_sdiv(tmp, tmp, tmp2);
7799 }
7800 tcg_temp_free_i32(tmp2);
7801 store_reg(s, rn, tmp);
7802 break;
41e9564d
PM
7803 default:
7804 goto illegal_op;
9ee6e8bb
PB
7805 }
7806 break;
7807 case 3:
7808 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7809 switch (op1) {
7810 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7811 ARCH(6);
7812 tmp = load_reg(s, rm);
7813 tmp2 = load_reg(s, rs);
7814 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7815 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7816 if (rd != 15) {
7817 tmp2 = load_reg(s, rd);
6ddbc6e4 7818 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7819 tcg_temp_free_i32(tmp2);
9ee6e8bb 7820 }
ded9d295 7821 store_reg(s, rn, tmp);
9ee6e8bb
PB
7822 break;
7823 case 0x20: case 0x24: case 0x28: case 0x2c:
7824 /* Bitfield insert/clear. */
7825 ARCH(6T2);
7826 shift = (insn >> 7) & 0x1f;
7827 i = (insn >> 16) & 0x1f;
7828 i = i + 1 - shift;
7829 if (rm == 15) {
7d1b0095 7830 tmp = tcg_temp_new_i32();
5e3f878a 7831 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7832 } else {
5e3f878a 7833 tmp = load_reg(s, rm);
9ee6e8bb
PB
7834 }
7835 if (i != 32) {
5e3f878a 7836 tmp2 = load_reg(s, rd);
d593c48e 7837 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7838 tcg_temp_free_i32(tmp2);
9ee6e8bb 7839 }
5e3f878a 7840 store_reg(s, rd, tmp);
9ee6e8bb
PB
7841 break;
7842 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7843 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7844 ARCH(6T2);
5e3f878a 7845 tmp = load_reg(s, rm);
9ee6e8bb
PB
7846 shift = (insn >> 7) & 0x1f;
7847 i = ((insn >> 16) & 0x1f) + 1;
7848 if (shift + i > 32)
7849 goto illegal_op;
7850 if (i < 32) {
7851 if (op1 & 0x20) {
5e3f878a 7852 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7853 } else {
5e3f878a 7854 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7855 }
7856 }
5e3f878a 7857 store_reg(s, rd, tmp);
9ee6e8bb
PB
7858 break;
7859 default:
7860 goto illegal_op;
7861 }
7862 break;
7863 }
7864 break;
7865 }
7866 do_ldst:
7867 /* Check for undefined extension instructions
7868 * per the ARM Bible IE:
7869 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7870 */
7871 sh = (0xf << 20) | (0xf << 4);
7872 if (op1 == 0x7 && ((insn & sh) == sh))
7873 {
7874 goto illegal_op;
7875 }
7876 /* load/store byte/word */
7877 rn = (insn >> 16) & 0xf;
7878 rd = (insn >> 12) & 0xf;
b0109805 7879 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7880 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7881 if (insn & (1 << 24))
b0109805 7882 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7883 if (insn & (1 << 20)) {
7884 /* load */
5a839c0d 7885 tmp = tcg_temp_new_i32();
9ee6e8bb 7886 if (insn & (1 << 22)) {
08307563 7887 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 7888 } else {
08307563 7889 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 7890 }
9ee6e8bb
PB
7891 } else {
7892 /* store */
b0109805 7893 tmp = load_reg(s, rd);
5a839c0d 7894 if (insn & (1 << 22)) {
08307563 7895 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 7896 } else {
08307563 7897 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
7898 }
7899 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7900 }
7901 if (!(insn & (1 << 24))) {
b0109805
PB
7902 gen_add_data_offset(s, insn, tmp2);
7903 store_reg(s, rn, tmp2);
7904 } else if (insn & (1 << 21)) {
7905 store_reg(s, rn, tmp2);
7906 } else {
7d1b0095 7907 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7908 }
7909 if (insn & (1 << 20)) {
7910 /* Complete the load. */
be5e7a76 7911 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7912 }
7913 break;
7914 case 0x08:
7915 case 0x09:
7916 {
7917 int j, n, user, loaded_base;
39d5492a 7918 TCGv_i32 loaded_var;
9ee6e8bb
PB
7919 /* load/store multiple words */
7920 /* XXX: store correct base if write back */
7921 user = 0;
7922 if (insn & (1 << 22)) {
7923 if (IS_USER(s))
7924 goto illegal_op; /* only usable in supervisor mode */
7925
7926 if ((insn & (1 << 15)) == 0)
7927 user = 1;
7928 }
7929 rn = (insn >> 16) & 0xf;
b0109805 7930 addr = load_reg(s, rn);
9ee6e8bb
PB
7931
7932 /* compute total size */
7933 loaded_base = 0;
39d5492a 7934 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7935 n = 0;
7936 for(i=0;i<16;i++) {
7937 if (insn & (1 << i))
7938 n++;
7939 }
7940 /* XXX: test invalid n == 0 case ? */
7941 if (insn & (1 << 23)) {
7942 if (insn & (1 << 24)) {
7943 /* pre increment */
b0109805 7944 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7945 } else {
7946 /* post increment */
7947 }
7948 } else {
7949 if (insn & (1 << 24)) {
7950 /* pre decrement */
b0109805 7951 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7952 } else {
7953 /* post decrement */
7954 if (n != 1)
b0109805 7955 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7956 }
7957 }
7958 j = 0;
7959 for(i=0;i<16;i++) {
7960 if (insn & (1 << i)) {
7961 if (insn & (1 << 20)) {
7962 /* load */
5a839c0d 7963 tmp = tcg_temp_new_i32();
08307563 7964 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7965 if (user) {
b75263d6 7966 tmp2 = tcg_const_i32(i);
1ce94f81 7967 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7968 tcg_temp_free_i32(tmp2);
7d1b0095 7969 tcg_temp_free_i32(tmp);
9ee6e8bb 7970 } else if (i == rn) {
b0109805 7971 loaded_var = tmp;
9ee6e8bb
PB
7972 loaded_base = 1;
7973 } else {
be5e7a76 7974 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7975 }
7976 } else {
7977 /* store */
7978 if (i == 15) {
7979 /* special case: r15 = PC + 8 */
7980 val = (long)s->pc + 4;
7d1b0095 7981 tmp = tcg_temp_new_i32();
b0109805 7982 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7983 } else if (user) {
7d1b0095 7984 tmp = tcg_temp_new_i32();
b75263d6 7985 tmp2 = tcg_const_i32(i);
9ef39277 7986 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7987 tcg_temp_free_i32(tmp2);
9ee6e8bb 7988 } else {
b0109805 7989 tmp = load_reg(s, i);
9ee6e8bb 7990 }
08307563 7991 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7992 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7993 }
7994 j++;
7995 /* no need to add after the last transfer */
7996 if (j != n)
b0109805 7997 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7998 }
7999 }
8000 if (insn & (1 << 21)) {
8001 /* write back */
8002 if (insn & (1 << 23)) {
8003 if (insn & (1 << 24)) {
8004 /* pre increment */
8005 } else {
8006 /* post increment */
b0109805 8007 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8008 }
8009 } else {
8010 if (insn & (1 << 24)) {
8011 /* pre decrement */
8012 if (n != 1)
b0109805 8013 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8014 } else {
8015 /* post decrement */
b0109805 8016 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8017 }
8018 }
b0109805
PB
8019 store_reg(s, rn, addr);
8020 } else {
7d1b0095 8021 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8022 }
8023 if (loaded_base) {
b0109805 8024 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8025 }
8026 if ((insn & (1 << 22)) && !user) {
8027 /* Restore CPSR from SPSR. */
d9ba4830
PB
8028 tmp = load_cpu_field(spsr);
8029 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8030 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8031 s->is_jmp = DISAS_UPDATE;
8032 }
8033 }
8034 break;
8035 case 0xa:
8036 case 0xb:
8037 {
8038 int32_t offset;
8039
8040 /* branch (and link) */
8041 val = (int32_t)s->pc;
8042 if (insn & (1 << 24)) {
7d1b0095 8043 tmp = tcg_temp_new_i32();
5e3f878a
PB
8044 tcg_gen_movi_i32(tmp, val);
8045 store_reg(s, 14, tmp);
9ee6e8bb 8046 }
534df156
PM
8047 offset = sextract32(insn << 2, 0, 26);
8048 val += offset + 4;
9ee6e8bb
PB
8049 gen_jmp(s, val);
8050 }
8051 break;
8052 case 0xc:
8053 case 0xd:
8054 case 0xe:
8055 /* Coprocessor. */
8056 if (disas_coproc_insn(env, s, insn))
8057 goto illegal_op;
8058 break;
8059 case 0xf:
8060 /* swi */
5e3f878a 8061 gen_set_pc_im(s->pc);
9ee6e8bb
PB
8062 s->is_jmp = DISAS_SWI;
8063 break;
8064 default:
8065 illegal_op:
bc4a0de0 8066 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8067 break;
8068 }
8069 }
8070}
8071
8072/* Return true if this is a Thumb-2 logical op. */
8073static int
8074thumb2_logic_op(int op)
8075{
8076 return (op < 8);
8077}
8078
8079/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8080 then set condition code flags based on the result of the operation.
8081 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8082 to the high bit of T1.
8083 Returns zero if the opcode is valid. */
8084
8085static int
39d5492a
PM
8086gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8087 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8088{
8089 int logic_cc;
8090
8091 logic_cc = 0;
8092 switch (op) {
8093 case 0: /* and */
396e467c 8094 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8095 logic_cc = conds;
8096 break;
8097 case 1: /* bic */
f669df27 8098 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8099 logic_cc = conds;
8100 break;
8101 case 2: /* orr */
396e467c 8102 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8103 logic_cc = conds;
8104 break;
8105 case 3: /* orn */
29501f1b 8106 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8107 logic_cc = conds;
8108 break;
8109 case 4: /* eor */
396e467c 8110 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8111 logic_cc = conds;
8112 break;
8113 case 8: /* add */
8114 if (conds)
72485ec4 8115 gen_add_CC(t0, t0, t1);
9ee6e8bb 8116 else
396e467c 8117 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8118 break;
8119 case 10: /* adc */
8120 if (conds)
49b4c31e 8121 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8122 else
396e467c 8123 gen_adc(t0, t1);
9ee6e8bb
PB
8124 break;
8125 case 11: /* sbc */
2de68a49
RH
8126 if (conds) {
8127 gen_sbc_CC(t0, t0, t1);
8128 } else {
396e467c 8129 gen_sub_carry(t0, t0, t1);
2de68a49 8130 }
9ee6e8bb
PB
8131 break;
8132 case 13: /* sub */
8133 if (conds)
72485ec4 8134 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8135 else
396e467c 8136 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8137 break;
8138 case 14: /* rsb */
8139 if (conds)
72485ec4 8140 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8141 else
396e467c 8142 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8143 break;
8144 default: /* 5, 6, 7, 9, 12, 15. */
8145 return 1;
8146 }
8147 if (logic_cc) {
396e467c 8148 gen_logic_CC(t0);
9ee6e8bb 8149 if (shifter_out)
396e467c 8150 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8151 }
8152 return 0;
8153}
8154
8155/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8156 is not legal. */
0ecb72a5 8157static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8158{
b0109805 8159 uint32_t insn, imm, shift, offset;
9ee6e8bb 8160 uint32_t rd, rn, rm, rs;
39d5492a
PM
8161 TCGv_i32 tmp;
8162 TCGv_i32 tmp2;
8163 TCGv_i32 tmp3;
8164 TCGv_i32 addr;
a7812ae4 8165 TCGv_i64 tmp64;
9ee6e8bb
PB
8166 int op;
8167 int shiftop;
8168 int conds;
8169 int logic_cc;
8170
8171 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8172 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8173 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8174 16-bit instructions to get correct prefetch abort behavior. */
8175 insn = insn_hw1;
8176 if ((insn & (1 << 12)) == 0) {
be5e7a76 8177 ARCH(5);
9ee6e8bb
PB
8178 /* Second half of blx. */
8179 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8180 tmp = load_reg(s, 14);
8181 tcg_gen_addi_i32(tmp, tmp, offset);
8182 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8183
7d1b0095 8184 tmp2 = tcg_temp_new_i32();
b0109805 8185 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8186 store_reg(s, 14, tmp2);
8187 gen_bx(s, tmp);
9ee6e8bb
PB
8188 return 0;
8189 }
8190 if (insn & (1 << 11)) {
8191 /* Second half of bl. */
8192 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8193 tmp = load_reg(s, 14);
6a0d8a1d 8194 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8195
7d1b0095 8196 tmp2 = tcg_temp_new_i32();
b0109805 8197 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8198 store_reg(s, 14, tmp2);
8199 gen_bx(s, tmp);
9ee6e8bb
PB
8200 return 0;
8201 }
8202 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8203 /* Instruction spans a page boundary. Implement it as two
8204 16-bit instructions in case the second half causes an
8205 prefetch abort. */
8206 offset = ((int32_t)insn << 21) >> 9;
396e467c 8207 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8208 return 0;
8209 }
8210 /* Fall through to 32-bit decode. */
8211 }
8212
d31dd73e 8213 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8214 s->pc += 2;
8215 insn |= (uint32_t)insn_hw1 << 16;
8216
8217 if ((insn & 0xf800e800) != 0xf000e800) {
8218 ARCH(6T2);
8219 }
8220
8221 rn = (insn >> 16) & 0xf;
8222 rs = (insn >> 12) & 0xf;
8223 rd = (insn >> 8) & 0xf;
8224 rm = insn & 0xf;
8225 switch ((insn >> 25) & 0xf) {
8226 case 0: case 1: case 2: case 3:
8227 /* 16-bit instructions. Should never happen. */
8228 abort();
8229 case 4:
8230 if (insn & (1 << 22)) {
8231 /* Other load/store, table branch. */
8232 if (insn & 0x01200000) {
8233 /* Load/store doubleword. */
8234 if (rn == 15) {
7d1b0095 8235 addr = tcg_temp_new_i32();
b0109805 8236 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8237 } else {
b0109805 8238 addr = load_reg(s, rn);
9ee6e8bb
PB
8239 }
8240 offset = (insn & 0xff) * 4;
8241 if ((insn & (1 << 23)) == 0)
8242 offset = -offset;
8243 if (insn & (1 << 24)) {
b0109805 8244 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8245 offset = 0;
8246 }
8247 if (insn & (1 << 20)) {
8248 /* ldrd */
e2592fad 8249 tmp = tcg_temp_new_i32();
08307563 8250 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8251 store_reg(s, rs, tmp);
8252 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8253 tmp = tcg_temp_new_i32();
08307563 8254 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8255 store_reg(s, rd, tmp);
9ee6e8bb
PB
8256 } else {
8257 /* strd */
b0109805 8258 tmp = load_reg(s, rs);
08307563 8259 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8260 tcg_temp_free_i32(tmp);
b0109805
PB
8261 tcg_gen_addi_i32(addr, addr, 4);
8262 tmp = load_reg(s, rd);
08307563 8263 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8264 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8265 }
8266 if (insn & (1 << 21)) {
8267 /* Base writeback. */
8268 if (rn == 15)
8269 goto illegal_op;
b0109805
PB
8270 tcg_gen_addi_i32(addr, addr, offset - 4);
8271 store_reg(s, rn, addr);
8272 } else {
7d1b0095 8273 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8274 }
8275 } else if ((insn & (1 << 23)) == 0) {
8276 /* Load/store exclusive word. */
39d5492a 8277 addr = tcg_temp_local_new_i32();
98a46317 8278 load_reg_var(s, addr, rn);
426f5abc 8279 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8280 if (insn & (1 << 20)) {
426f5abc 8281 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8282 } else {
426f5abc 8283 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8284 }
39d5492a 8285 tcg_temp_free_i32(addr);
2359bf80 8286 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8287 /* Table Branch. */
8288 if (rn == 15) {
7d1b0095 8289 addr = tcg_temp_new_i32();
b0109805 8290 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8291 } else {
b0109805 8292 addr = load_reg(s, rn);
9ee6e8bb 8293 }
b26eefb6 8294 tmp = load_reg(s, rm);
b0109805 8295 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8296 if (insn & (1 << 4)) {
8297 /* tbh */
b0109805 8298 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8299 tcg_temp_free_i32(tmp);
e2592fad 8300 tmp = tcg_temp_new_i32();
08307563 8301 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8302 } else { /* tbb */
7d1b0095 8303 tcg_temp_free_i32(tmp);
e2592fad 8304 tmp = tcg_temp_new_i32();
08307563 8305 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8306 }
7d1b0095 8307 tcg_temp_free_i32(addr);
b0109805
PB
8308 tcg_gen_shli_i32(tmp, tmp, 1);
8309 tcg_gen_addi_i32(tmp, tmp, s->pc);
8310 store_reg(s, 15, tmp);
9ee6e8bb 8311 } else {
2359bf80 8312 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8313 op = (insn >> 4) & 0x3;
2359bf80
MR
8314 switch (op2) {
8315 case 0:
426f5abc 8316 goto illegal_op;
2359bf80
MR
8317 case 1:
8318 /* Load/store exclusive byte/halfword/doubleword */
8319 if (op == 2) {
8320 goto illegal_op;
8321 }
8322 ARCH(7);
8323 break;
8324 case 2:
8325 /* Load-acquire/store-release */
8326 if (op == 3) {
8327 goto illegal_op;
8328 }
8329 /* Fall through */
8330 case 3:
8331 /* Load-acquire/store-release exclusive */
8332 ARCH(8);
8333 break;
426f5abc 8334 }
39d5492a 8335 addr = tcg_temp_local_new_i32();
98a46317 8336 load_reg_var(s, addr, rn);
2359bf80
MR
8337 if (!(op2 & 1)) {
8338 if (insn & (1 << 20)) {
8339 tmp = tcg_temp_new_i32();
8340 switch (op) {
8341 case 0: /* ldab */
08307563 8342 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8343 break;
8344 case 1: /* ldah */
08307563 8345 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8346 break;
8347 case 2: /* lda */
08307563 8348 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8349 break;
8350 default:
8351 abort();
8352 }
8353 store_reg(s, rs, tmp);
8354 } else {
8355 tmp = load_reg(s, rs);
8356 switch (op) {
8357 case 0: /* stlb */
08307563 8358 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8359 break;
8360 case 1: /* stlh */
08307563 8361 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8362 break;
8363 case 2: /* stl */
08307563 8364 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8365 break;
8366 default:
8367 abort();
8368 }
8369 tcg_temp_free_i32(tmp);
8370 }
8371 } else if (insn & (1 << 20)) {
426f5abc 8372 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8373 } else {
426f5abc 8374 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8375 }
39d5492a 8376 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8377 }
8378 } else {
8379 /* Load/store multiple, RFE, SRS. */
8380 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8381 /* RFE, SRS: not available in user mode or on M profile */
8382 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8383 goto illegal_op;
00115976 8384 }
9ee6e8bb
PB
8385 if (insn & (1 << 20)) {
8386 /* rfe */
b0109805
PB
8387 addr = load_reg(s, rn);
8388 if ((insn & (1 << 24)) == 0)
8389 tcg_gen_addi_i32(addr, addr, -8);
8390 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8391 tmp = tcg_temp_new_i32();
08307563 8392 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8393 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8394 tmp2 = tcg_temp_new_i32();
08307563 8395 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8396 if (insn & (1 << 21)) {
8397 /* Base writeback. */
b0109805
PB
8398 if (insn & (1 << 24)) {
8399 tcg_gen_addi_i32(addr, addr, 4);
8400 } else {
8401 tcg_gen_addi_i32(addr, addr, -4);
8402 }
8403 store_reg(s, rn, addr);
8404 } else {
7d1b0095 8405 tcg_temp_free_i32(addr);
9ee6e8bb 8406 }
b0109805 8407 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8408 } else {
8409 /* srs */
81465888
PM
8410 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8411 insn & (1 << 21));
9ee6e8bb
PB
8412 }
8413 } else {
5856d44e 8414 int i, loaded_base = 0;
39d5492a 8415 TCGv_i32 loaded_var;
9ee6e8bb 8416 /* Load/store multiple. */
b0109805 8417 addr = load_reg(s, rn);
9ee6e8bb
PB
8418 offset = 0;
8419 for (i = 0; i < 16; i++) {
8420 if (insn & (1 << i))
8421 offset += 4;
8422 }
8423 if (insn & (1 << 24)) {
b0109805 8424 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8425 }
8426
39d5492a 8427 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8428 for (i = 0; i < 16; i++) {
8429 if ((insn & (1 << i)) == 0)
8430 continue;
8431 if (insn & (1 << 20)) {
8432 /* Load. */
e2592fad 8433 tmp = tcg_temp_new_i32();
08307563 8434 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8435 if (i == 15) {
b0109805 8436 gen_bx(s, tmp);
5856d44e
YO
8437 } else if (i == rn) {
8438 loaded_var = tmp;
8439 loaded_base = 1;
9ee6e8bb 8440 } else {
b0109805 8441 store_reg(s, i, tmp);
9ee6e8bb
PB
8442 }
8443 } else {
8444 /* Store. */
b0109805 8445 tmp = load_reg(s, i);
08307563 8446 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8447 tcg_temp_free_i32(tmp);
9ee6e8bb 8448 }
b0109805 8449 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8450 }
5856d44e
YO
8451 if (loaded_base) {
8452 store_reg(s, rn, loaded_var);
8453 }
9ee6e8bb
PB
8454 if (insn & (1 << 21)) {
8455 /* Base register writeback. */
8456 if (insn & (1 << 24)) {
b0109805 8457 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8458 }
8459 /* Fault if writeback register is in register list. */
8460 if (insn & (1 << rn))
8461 goto illegal_op;
b0109805
PB
8462 store_reg(s, rn, addr);
8463 } else {
7d1b0095 8464 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8465 }
8466 }
8467 }
8468 break;
2af9ab77
JB
8469 case 5:
8470
9ee6e8bb 8471 op = (insn >> 21) & 0xf;
2af9ab77
JB
8472 if (op == 6) {
8473 /* Halfword pack. */
8474 tmp = load_reg(s, rn);
8475 tmp2 = load_reg(s, rm);
8476 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8477 if (insn & (1 << 5)) {
8478 /* pkhtb */
8479 if (shift == 0)
8480 shift = 31;
8481 tcg_gen_sari_i32(tmp2, tmp2, shift);
8482 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8483 tcg_gen_ext16u_i32(tmp2, tmp2);
8484 } else {
8485 /* pkhbt */
8486 if (shift)
8487 tcg_gen_shli_i32(tmp2, tmp2, shift);
8488 tcg_gen_ext16u_i32(tmp, tmp);
8489 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8490 }
8491 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8492 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8493 store_reg(s, rd, tmp);
8494 } else {
2af9ab77
JB
8495 /* Data processing register constant shift. */
8496 if (rn == 15) {
7d1b0095 8497 tmp = tcg_temp_new_i32();
2af9ab77
JB
8498 tcg_gen_movi_i32(tmp, 0);
8499 } else {
8500 tmp = load_reg(s, rn);
8501 }
8502 tmp2 = load_reg(s, rm);
8503
8504 shiftop = (insn >> 4) & 3;
8505 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8506 conds = (insn & (1 << 20)) != 0;
8507 logic_cc = (conds && thumb2_logic_op(op));
8508 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8509 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8510 goto illegal_op;
7d1b0095 8511 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8512 if (rd != 15) {
8513 store_reg(s, rd, tmp);
8514 } else {
7d1b0095 8515 tcg_temp_free_i32(tmp);
2af9ab77 8516 }
3174f8e9 8517 }
9ee6e8bb
PB
8518 break;
8519 case 13: /* Misc data processing. */
8520 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8521 if (op < 4 && (insn & 0xf000) != 0xf000)
8522 goto illegal_op;
8523 switch (op) {
8524 case 0: /* Register controlled shift. */
8984bd2e
PB
8525 tmp = load_reg(s, rn);
8526 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8527 if ((insn & 0x70) != 0)
8528 goto illegal_op;
8529 op = (insn >> 21) & 3;
8984bd2e
PB
8530 logic_cc = (insn & (1 << 20)) != 0;
8531 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8532 if (logic_cc)
8533 gen_logic_CC(tmp);
21aeb343 8534 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8535 break;
8536 case 1: /* Sign/zero extend. */
5e3f878a 8537 tmp = load_reg(s, rm);
9ee6e8bb 8538 shift = (insn >> 4) & 3;
1301f322 8539 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8540 rotate, a shift is sufficient. */
8541 if (shift != 0)
f669df27 8542 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8543 op = (insn >> 20) & 7;
8544 switch (op) {
5e3f878a
PB
8545 case 0: gen_sxth(tmp); break;
8546 case 1: gen_uxth(tmp); break;
8547 case 2: gen_sxtb16(tmp); break;
8548 case 3: gen_uxtb16(tmp); break;
8549 case 4: gen_sxtb(tmp); break;
8550 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8551 default: goto illegal_op;
8552 }
8553 if (rn != 15) {
5e3f878a 8554 tmp2 = load_reg(s, rn);
9ee6e8bb 8555 if ((op >> 1) == 1) {
5e3f878a 8556 gen_add16(tmp, tmp2);
9ee6e8bb 8557 } else {
5e3f878a 8558 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8559 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8560 }
8561 }
5e3f878a 8562 store_reg(s, rd, tmp);
9ee6e8bb
PB
8563 break;
8564 case 2: /* SIMD add/subtract. */
8565 op = (insn >> 20) & 7;
8566 shift = (insn >> 4) & 7;
8567 if ((op & 3) == 3 || (shift & 3) == 3)
8568 goto illegal_op;
6ddbc6e4
PB
8569 tmp = load_reg(s, rn);
8570 tmp2 = load_reg(s, rm);
8571 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8572 tcg_temp_free_i32(tmp2);
6ddbc6e4 8573 store_reg(s, rd, tmp);
9ee6e8bb
PB
8574 break;
8575 case 3: /* Other data processing. */
8576 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8577 if (op < 4) {
8578 /* Saturating add/subtract. */
d9ba4830
PB
8579 tmp = load_reg(s, rn);
8580 tmp2 = load_reg(s, rm);
9ee6e8bb 8581 if (op & 1)
9ef39277 8582 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8583 if (op & 2)
9ef39277 8584 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8585 else
9ef39277 8586 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8587 tcg_temp_free_i32(tmp2);
9ee6e8bb 8588 } else {
d9ba4830 8589 tmp = load_reg(s, rn);
9ee6e8bb
PB
8590 switch (op) {
8591 case 0x0a: /* rbit */
d9ba4830 8592 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8593 break;
8594 case 0x08: /* rev */
66896cb8 8595 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8596 break;
8597 case 0x09: /* rev16 */
d9ba4830 8598 gen_rev16(tmp);
9ee6e8bb
PB
8599 break;
8600 case 0x0b: /* revsh */
d9ba4830 8601 gen_revsh(tmp);
9ee6e8bb
PB
8602 break;
8603 case 0x10: /* sel */
d9ba4830 8604 tmp2 = load_reg(s, rm);
7d1b0095 8605 tmp3 = tcg_temp_new_i32();
0ecb72a5 8606 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8607 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8608 tcg_temp_free_i32(tmp3);
8609 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8610 break;
8611 case 0x18: /* clz */
d9ba4830 8612 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8613 break;
8614 default:
8615 goto illegal_op;
8616 }
8617 }
d9ba4830 8618 store_reg(s, rd, tmp);
9ee6e8bb
PB
8619 break;
8620 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8621 op = (insn >> 4) & 0xf;
d9ba4830
PB
8622 tmp = load_reg(s, rn);
8623 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8624 switch ((insn >> 20) & 7) {
8625 case 0: /* 32 x 32 -> 32 */
d9ba4830 8626 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8627 tcg_temp_free_i32(tmp2);
9ee6e8bb 8628 if (rs != 15) {
d9ba4830 8629 tmp2 = load_reg(s, rs);
9ee6e8bb 8630 if (op)
d9ba4830 8631 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8632 else
d9ba4830 8633 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8634 tcg_temp_free_i32(tmp2);
9ee6e8bb 8635 }
9ee6e8bb
PB
8636 break;
8637 case 1: /* 16 x 16 -> 32 */
d9ba4830 8638 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8639 tcg_temp_free_i32(tmp2);
9ee6e8bb 8640 if (rs != 15) {
d9ba4830 8641 tmp2 = load_reg(s, rs);
9ef39277 8642 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8643 tcg_temp_free_i32(tmp2);
9ee6e8bb 8644 }
9ee6e8bb
PB
8645 break;
8646 case 2: /* Dual multiply add. */
8647 case 4: /* Dual multiply subtract. */
8648 if (op)
d9ba4830
PB
8649 gen_swap_half(tmp2);
8650 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8651 if (insn & (1 << 22)) {
e1d177b9 8652 /* This subtraction cannot overflow. */
d9ba4830 8653 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8654 } else {
e1d177b9
PM
8655 /* This addition cannot overflow 32 bits;
8656 * however it may overflow considered as a signed
8657 * operation, in which case we must set the Q flag.
8658 */
9ef39277 8659 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8660 }
7d1b0095 8661 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8662 if (rs != 15)
8663 {
d9ba4830 8664 tmp2 = load_reg(s, rs);
9ef39277 8665 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8666 tcg_temp_free_i32(tmp2);
9ee6e8bb 8667 }
9ee6e8bb
PB
8668 break;
8669 case 3: /* 32 * 16 -> 32msb */
8670 if (op)
d9ba4830 8671 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8672 else
d9ba4830 8673 gen_sxth(tmp2);
a7812ae4
PB
8674 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8675 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8676 tmp = tcg_temp_new_i32();
a7812ae4 8677 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8678 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8679 if (rs != 15)
8680 {
d9ba4830 8681 tmp2 = load_reg(s, rs);
9ef39277 8682 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8683 tcg_temp_free_i32(tmp2);
9ee6e8bb 8684 }
9ee6e8bb 8685 break;
838fa72d
AJ
8686 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8687 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8688 if (rs != 15) {
838fa72d
AJ
8689 tmp = load_reg(s, rs);
8690 if (insn & (1 << 20)) {
8691 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8692 } else {
838fa72d 8693 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8694 }
2c0262af 8695 }
838fa72d
AJ
8696 if (insn & (1 << 4)) {
8697 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8698 }
8699 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8700 tmp = tcg_temp_new_i32();
838fa72d
AJ
8701 tcg_gen_trunc_i64_i32(tmp, tmp64);
8702 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8703 break;
8704 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8705 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8706 tcg_temp_free_i32(tmp2);
9ee6e8bb 8707 if (rs != 15) {
d9ba4830
PB
8708 tmp2 = load_reg(s, rs);
8709 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8710 tcg_temp_free_i32(tmp2);
5fd46862 8711 }
9ee6e8bb 8712 break;
2c0262af 8713 }
d9ba4830 8714 store_reg(s, rd, tmp);
2c0262af 8715 break;
9ee6e8bb
PB
8716 case 6: case 7: /* 64-bit multiply, Divide. */
8717 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8718 tmp = load_reg(s, rn);
8719 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8720 if ((op & 0x50) == 0x10) {
8721 /* sdiv, udiv */
47789990 8722 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8723 goto illegal_op;
47789990 8724 }
9ee6e8bb 8725 if (op & 0x20)
5e3f878a 8726 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8727 else
5e3f878a 8728 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8729 tcg_temp_free_i32(tmp2);
5e3f878a 8730 store_reg(s, rd, tmp);
9ee6e8bb
PB
8731 } else if ((op & 0xe) == 0xc) {
8732 /* Dual multiply accumulate long. */
8733 if (op & 1)
5e3f878a
PB
8734 gen_swap_half(tmp2);
8735 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8736 if (op & 0x10) {
5e3f878a 8737 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8738 } else {
5e3f878a 8739 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8740 }
7d1b0095 8741 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8742 /* BUGFIX */
8743 tmp64 = tcg_temp_new_i64();
8744 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8745 tcg_temp_free_i32(tmp);
a7812ae4
PB
8746 gen_addq(s, tmp64, rs, rd);
8747 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8748 tcg_temp_free_i64(tmp64);
2c0262af 8749 } else {
9ee6e8bb
PB
8750 if (op & 0x20) {
8751 /* Unsigned 64-bit multiply */
a7812ae4 8752 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8753 } else {
9ee6e8bb
PB
8754 if (op & 8) {
8755 /* smlalxy */
5e3f878a 8756 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8757 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8758 tmp64 = tcg_temp_new_i64();
8759 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8760 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8761 } else {
8762 /* Signed 64-bit multiply */
a7812ae4 8763 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8764 }
b5ff1b31 8765 }
9ee6e8bb
PB
8766 if (op & 4) {
8767 /* umaal */
a7812ae4
PB
8768 gen_addq_lo(s, tmp64, rs);
8769 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8770 } else if (op & 0x40) {
8771 /* 64-bit accumulate. */
a7812ae4 8772 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8773 }
a7812ae4 8774 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8775 tcg_temp_free_i64(tmp64);
5fd46862 8776 }
2c0262af 8777 break;
9ee6e8bb
PB
8778 }
8779 break;
8780 case 6: case 7: case 14: case 15:
8781 /* Coprocessor. */
8782 if (((insn >> 24) & 3) == 3) {
8783 /* Translate into the equivalent ARM encoding. */
f06053e3 8784 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8785 if (disas_neon_data_insn(env, s, insn))
8786 goto illegal_op;
8787 } else {
8788 if (insn & (1 << 28))
8789 goto illegal_op;
8790 if (disas_coproc_insn (env, s, insn))
8791 goto illegal_op;
8792 }
8793 break;
8794 case 8: case 9: case 10: case 11:
8795 if (insn & (1 << 15)) {
8796 /* Branches, misc control. */
8797 if (insn & 0x5000) {
8798 /* Unconditional branch. */
8799 /* signextend(hw1[10:0]) -> offset[:12]. */
8800 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8801 /* hw1[10:0] -> offset[11:1]. */
8802 offset |= (insn & 0x7ff) << 1;
8803 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8804 offset[24:22] already have the same value because of the
8805 sign extension above. */
8806 offset ^= ((~insn) & (1 << 13)) << 10;
8807 offset ^= ((~insn) & (1 << 11)) << 11;
8808
9ee6e8bb
PB
8809 if (insn & (1 << 14)) {
8810 /* Branch and link. */
3174f8e9 8811 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8812 }
3b46e624 8813
b0109805 8814 offset += s->pc;
9ee6e8bb
PB
8815 if (insn & (1 << 12)) {
8816 /* b/bl */
b0109805 8817 gen_jmp(s, offset);
9ee6e8bb
PB
8818 } else {
8819 /* blx */
b0109805 8820 offset &= ~(uint32_t)2;
be5e7a76 8821 /* thumb2 bx, no need to check */
b0109805 8822 gen_bx_im(s, offset);
2c0262af 8823 }
9ee6e8bb
PB
8824 } else if (((insn >> 23) & 7) == 7) {
8825 /* Misc control */
8826 if (insn & (1 << 13))
8827 goto illegal_op;
8828
8829 if (insn & (1 << 26)) {
8830 /* Secure monitor call (v6Z) */
e0c270d9
SW
8831 qemu_log_mask(LOG_UNIMP,
8832 "arm: unimplemented secure monitor call\n");
9ee6e8bb 8833 goto illegal_op; /* not implemented. */
2c0262af 8834 } else {
9ee6e8bb
PB
8835 op = (insn >> 20) & 7;
8836 switch (op) {
8837 case 0: /* msr cpsr. */
8838 if (IS_M(env)) {
8984bd2e
PB
8839 tmp = load_reg(s, rn);
8840 addr = tcg_const_i32(insn & 0xff);
8841 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8842 tcg_temp_free_i32(addr);
7d1b0095 8843 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8844 gen_lookup_tb(s);
8845 break;
8846 }
8847 /* fall through */
8848 case 1: /* msr spsr. */
8849 if (IS_M(env))
8850 goto illegal_op;
2fbac54b
FN
8851 tmp = load_reg(s, rn);
8852 if (gen_set_psr(s,
9ee6e8bb 8853 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8854 op == 1, tmp))
9ee6e8bb
PB
8855 goto illegal_op;
8856 break;
8857 case 2: /* cps, nop-hint. */
8858 if (((insn >> 8) & 7) == 0) {
8859 gen_nop_hint(s, insn & 0xff);
8860 }
8861 /* Implemented as NOP in user mode. */
8862 if (IS_USER(s))
8863 break;
8864 offset = 0;
8865 imm = 0;
8866 if (insn & (1 << 10)) {
8867 if (insn & (1 << 7))
8868 offset |= CPSR_A;
8869 if (insn & (1 << 6))
8870 offset |= CPSR_I;
8871 if (insn & (1 << 5))
8872 offset |= CPSR_F;
8873 if (insn & (1 << 9))
8874 imm = CPSR_A | CPSR_I | CPSR_F;
8875 }
8876 if (insn & (1 << 8)) {
8877 offset |= 0x1f;
8878 imm |= (insn & 0x1f);
8879 }
8880 if (offset) {
2fbac54b 8881 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8882 }
8883 break;
8884 case 3: /* Special control operations. */
426f5abc 8885 ARCH(7);
9ee6e8bb
PB
8886 op = (insn >> 4) & 0xf;
8887 switch (op) {
8888 case 2: /* clrex */
426f5abc 8889 gen_clrex(s);
9ee6e8bb
PB
8890 break;
8891 case 4: /* dsb */
8892 case 5: /* dmb */
8893 case 6: /* isb */
8894 /* These execute as NOPs. */
9ee6e8bb
PB
8895 break;
8896 default:
8897 goto illegal_op;
8898 }
8899 break;
8900 case 4: /* bxj */
8901 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8902 tmp = load_reg(s, rn);
8903 gen_bx(s, tmp);
9ee6e8bb
PB
8904 break;
8905 case 5: /* Exception return. */
b8b45b68
RV
8906 if (IS_USER(s)) {
8907 goto illegal_op;
8908 }
8909 if (rn != 14 || rd != 15) {
8910 goto illegal_op;
8911 }
8912 tmp = load_reg(s, rn);
8913 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8914 gen_exception_return(s, tmp);
8915 break;
9ee6e8bb 8916 case 6: /* mrs cpsr. */
7d1b0095 8917 tmp = tcg_temp_new_i32();
9ee6e8bb 8918 if (IS_M(env)) {
8984bd2e
PB
8919 addr = tcg_const_i32(insn & 0xff);
8920 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8921 tcg_temp_free_i32(addr);
9ee6e8bb 8922 } else {
9ef39277 8923 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8924 }
8984bd2e 8925 store_reg(s, rd, tmp);
9ee6e8bb
PB
8926 break;
8927 case 7: /* mrs spsr. */
8928 /* Not accessible in user mode. */
8929 if (IS_USER(s) || IS_M(env))
8930 goto illegal_op;
d9ba4830
PB
8931 tmp = load_cpu_field(spsr);
8932 store_reg(s, rd, tmp);
9ee6e8bb 8933 break;
2c0262af
FB
8934 }
8935 }
9ee6e8bb
PB
8936 } else {
8937 /* Conditional branch. */
8938 op = (insn >> 22) & 0xf;
8939 /* Generate a conditional jump to next instruction. */
8940 s->condlabel = gen_new_label();
d9ba4830 8941 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8942 s->condjmp = 1;
8943
8944 /* offset[11:1] = insn[10:0] */
8945 offset = (insn & 0x7ff) << 1;
8946 /* offset[17:12] = insn[21:16]. */
8947 offset |= (insn & 0x003f0000) >> 4;
8948 /* offset[31:20] = insn[26]. */
8949 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8950 /* offset[18] = insn[13]. */
8951 offset |= (insn & (1 << 13)) << 5;
8952 /* offset[19] = insn[11]. */
8953 offset |= (insn & (1 << 11)) << 8;
8954
8955 /* jump to the offset */
b0109805 8956 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8957 }
8958 } else {
8959 /* Data processing immediate. */
8960 if (insn & (1 << 25)) {
8961 if (insn & (1 << 24)) {
8962 if (insn & (1 << 20))
8963 goto illegal_op;
8964 /* Bitfield/Saturate. */
8965 op = (insn >> 21) & 7;
8966 imm = insn & 0x1f;
8967 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8968 if (rn == 15) {
7d1b0095 8969 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8970 tcg_gen_movi_i32(tmp, 0);
8971 } else {
8972 tmp = load_reg(s, rn);
8973 }
9ee6e8bb
PB
8974 switch (op) {
8975 case 2: /* Signed bitfield extract. */
8976 imm++;
8977 if (shift + imm > 32)
8978 goto illegal_op;
8979 if (imm < 32)
6ddbc6e4 8980 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8981 break;
8982 case 6: /* Unsigned bitfield extract. */
8983 imm++;
8984 if (shift + imm > 32)
8985 goto illegal_op;
8986 if (imm < 32)
6ddbc6e4 8987 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8988 break;
8989 case 3: /* Bitfield insert/clear. */
8990 if (imm < shift)
8991 goto illegal_op;
8992 imm = imm + 1 - shift;
8993 if (imm != 32) {
6ddbc6e4 8994 tmp2 = load_reg(s, rd);
d593c48e 8995 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8996 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8997 }
8998 break;
8999 case 7:
9000 goto illegal_op;
9001 default: /* Saturate. */
9ee6e8bb
PB
9002 if (shift) {
9003 if (op & 1)
6ddbc6e4 9004 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 9005 else
6ddbc6e4 9006 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 9007 }
6ddbc6e4 9008 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
9009 if (op & 4) {
9010 /* Unsigned. */
9ee6e8bb 9011 if ((op & 1) && shift == 0)
9ef39277 9012 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9013 else
9ef39277 9014 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 9015 } else {
9ee6e8bb 9016 /* Signed. */
9ee6e8bb 9017 if ((op & 1) && shift == 0)
9ef39277 9018 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9019 else
9ef39277 9020 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 9021 }
b75263d6 9022 tcg_temp_free_i32(tmp2);
9ee6e8bb 9023 break;
2c0262af 9024 }
6ddbc6e4 9025 store_reg(s, rd, tmp);
9ee6e8bb
PB
9026 } else {
9027 imm = ((insn & 0x04000000) >> 15)
9028 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9029 if (insn & (1 << 22)) {
9030 /* 16-bit immediate. */
9031 imm |= (insn >> 4) & 0xf000;
9032 if (insn & (1 << 23)) {
9033 /* movt */
5e3f878a 9034 tmp = load_reg(s, rd);
86831435 9035 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9036 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9037 } else {
9ee6e8bb 9038 /* movw */
7d1b0095 9039 tmp = tcg_temp_new_i32();
5e3f878a 9040 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9041 }
9042 } else {
9ee6e8bb
PB
9043 /* Add/sub 12-bit immediate. */
9044 if (rn == 15) {
b0109805 9045 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9046 if (insn & (1 << 23))
b0109805 9047 offset -= imm;
9ee6e8bb 9048 else
b0109805 9049 offset += imm;
7d1b0095 9050 tmp = tcg_temp_new_i32();
5e3f878a 9051 tcg_gen_movi_i32(tmp, offset);
2c0262af 9052 } else {
5e3f878a 9053 tmp = load_reg(s, rn);
9ee6e8bb 9054 if (insn & (1 << 23))
5e3f878a 9055 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9056 else
5e3f878a 9057 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9058 }
9ee6e8bb 9059 }
5e3f878a 9060 store_reg(s, rd, tmp);
191abaa2 9061 }
9ee6e8bb
PB
9062 } else {
9063 int shifter_out = 0;
9064 /* modified 12-bit immediate. */
9065 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9066 imm = (insn & 0xff);
9067 switch (shift) {
9068 case 0: /* XY */
9069 /* Nothing to do. */
9070 break;
9071 case 1: /* 00XY00XY */
9072 imm |= imm << 16;
9073 break;
9074 case 2: /* XY00XY00 */
9075 imm |= imm << 16;
9076 imm <<= 8;
9077 break;
9078 case 3: /* XYXYXYXY */
9079 imm |= imm << 16;
9080 imm |= imm << 8;
9081 break;
9082 default: /* Rotated constant. */
9083 shift = (shift << 1) | (imm >> 7);
9084 imm |= 0x80;
9085 imm = imm << (32 - shift);
9086 shifter_out = 1;
9087 break;
b5ff1b31 9088 }
7d1b0095 9089 tmp2 = tcg_temp_new_i32();
3174f8e9 9090 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9091 rn = (insn >> 16) & 0xf;
3174f8e9 9092 if (rn == 15) {
7d1b0095 9093 tmp = tcg_temp_new_i32();
3174f8e9
FN
9094 tcg_gen_movi_i32(tmp, 0);
9095 } else {
9096 tmp = load_reg(s, rn);
9097 }
9ee6e8bb
PB
9098 op = (insn >> 21) & 0xf;
9099 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9100 shifter_out, tmp, tmp2))
9ee6e8bb 9101 goto illegal_op;
7d1b0095 9102 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9103 rd = (insn >> 8) & 0xf;
9104 if (rd != 15) {
3174f8e9
FN
9105 store_reg(s, rd, tmp);
9106 } else {
7d1b0095 9107 tcg_temp_free_i32(tmp);
2c0262af 9108 }
2c0262af 9109 }
9ee6e8bb
PB
9110 }
9111 break;
9112 case 12: /* Load/store single data item. */
9113 {
9114 int postinc = 0;
9115 int writeback = 0;
b0109805 9116 int user;
9ee6e8bb
PB
9117 if ((insn & 0x01100000) == 0x01000000) {
9118 if (disas_neon_ls_insn(env, s, insn))
c1713132 9119 goto illegal_op;
9ee6e8bb
PB
9120 break;
9121 }
a2fdc890
PM
9122 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9123 if (rs == 15) {
9124 if (!(insn & (1 << 20))) {
9125 goto illegal_op;
9126 }
9127 if (op != 2) {
9128 /* Byte or halfword load space with dest == r15 : memory hints.
9129 * Catch them early so we don't emit pointless addressing code.
9130 * This space is a mix of:
9131 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9132 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9133 * cores)
9134 * unallocated hints, which must be treated as NOPs
9135 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9136 * which is easiest for the decoding logic
9137 * Some space which must UNDEF
9138 */
9139 int op1 = (insn >> 23) & 3;
9140 int op2 = (insn >> 6) & 0x3f;
9141 if (op & 2) {
9142 goto illegal_op;
9143 }
9144 if (rn == 15) {
02afbf64
PM
9145 /* UNPREDICTABLE, unallocated hint or
9146 * PLD/PLDW/PLI (literal)
9147 */
a2fdc890
PM
9148 return 0;
9149 }
9150 if (op1 & 1) {
02afbf64 9151 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9152 }
9153 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9154 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9155 }
9156 /* UNDEF space, or an UNPREDICTABLE */
9157 return 1;
9158 }
9159 }
b0109805 9160 user = IS_USER(s);
9ee6e8bb 9161 if (rn == 15) {
7d1b0095 9162 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9163 /* PC relative. */
9164 /* s->pc has already been incremented by 4. */
9165 imm = s->pc & 0xfffffffc;
9166 if (insn & (1 << 23))
9167 imm += insn & 0xfff;
9168 else
9169 imm -= insn & 0xfff;
b0109805 9170 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9171 } else {
b0109805 9172 addr = load_reg(s, rn);
9ee6e8bb
PB
9173 if (insn & (1 << 23)) {
9174 /* Positive offset. */
9175 imm = insn & 0xfff;
b0109805 9176 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9177 } else {
9ee6e8bb 9178 imm = insn & 0xff;
2a0308c5
PM
9179 switch ((insn >> 8) & 0xf) {
9180 case 0x0: /* Shifted Register. */
9ee6e8bb 9181 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9182 if (shift > 3) {
9183 tcg_temp_free_i32(addr);
18c9b560 9184 goto illegal_op;
2a0308c5 9185 }
b26eefb6 9186 tmp = load_reg(s, rm);
9ee6e8bb 9187 if (shift)
b26eefb6 9188 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9189 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9190 tcg_temp_free_i32(tmp);
9ee6e8bb 9191 break;
2a0308c5 9192 case 0xc: /* Negative offset. */
b0109805 9193 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9194 break;
2a0308c5 9195 case 0xe: /* User privilege. */
b0109805
PB
9196 tcg_gen_addi_i32(addr, addr, imm);
9197 user = 1;
9ee6e8bb 9198 break;
2a0308c5 9199 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9200 imm = -imm;
9201 /* Fall through. */
2a0308c5 9202 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9203 postinc = 1;
9204 writeback = 1;
9205 break;
2a0308c5 9206 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9207 imm = -imm;
9208 /* Fall through. */
2a0308c5 9209 case 0xf: /* Pre-increment. */
b0109805 9210 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9211 writeback = 1;
9212 break;
9213 default:
2a0308c5 9214 tcg_temp_free_i32(addr);
b7bcbe95 9215 goto illegal_op;
9ee6e8bb
PB
9216 }
9217 }
9218 }
9ee6e8bb
PB
9219 if (insn & (1 << 20)) {
9220 /* Load. */
5a839c0d 9221 tmp = tcg_temp_new_i32();
a2fdc890 9222 switch (op) {
5a839c0d 9223 case 0:
08307563 9224 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9225 break;
9226 case 4:
08307563 9227 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9228 break;
9229 case 1:
08307563 9230 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9231 break;
9232 case 5:
08307563 9233 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9234 break;
9235 case 2:
08307563 9236 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9237 break;
2a0308c5 9238 default:
5a839c0d 9239 tcg_temp_free_i32(tmp);
2a0308c5
PM
9240 tcg_temp_free_i32(addr);
9241 goto illegal_op;
a2fdc890
PM
9242 }
9243 if (rs == 15) {
9244 gen_bx(s, tmp);
9ee6e8bb 9245 } else {
a2fdc890 9246 store_reg(s, rs, tmp);
9ee6e8bb
PB
9247 }
9248 } else {
9249 /* Store. */
b0109805 9250 tmp = load_reg(s, rs);
9ee6e8bb 9251 switch (op) {
5a839c0d 9252 case 0:
08307563 9253 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9254 break;
9255 case 1:
08307563 9256 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9257 break;
9258 case 2:
08307563 9259 gen_aa32_st32(tmp, addr, user);
5a839c0d 9260 break;
2a0308c5 9261 default:
5a839c0d 9262 tcg_temp_free_i32(tmp);
2a0308c5
PM
9263 tcg_temp_free_i32(addr);
9264 goto illegal_op;
b7bcbe95 9265 }
5a839c0d 9266 tcg_temp_free_i32(tmp);
2c0262af 9267 }
9ee6e8bb 9268 if (postinc)
b0109805
PB
9269 tcg_gen_addi_i32(addr, addr, imm);
9270 if (writeback) {
9271 store_reg(s, rn, addr);
9272 } else {
7d1b0095 9273 tcg_temp_free_i32(addr);
b0109805 9274 }
9ee6e8bb
PB
9275 }
9276 break;
9277 default:
9278 goto illegal_op;
2c0262af 9279 }
9ee6e8bb
PB
9280 return 0;
9281illegal_op:
9282 return 1;
2c0262af
FB
9283}
9284
0ecb72a5 9285static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9286{
9287 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9288 int32_t offset;
9289 int i;
39d5492a
PM
9290 TCGv_i32 tmp;
9291 TCGv_i32 tmp2;
9292 TCGv_i32 addr;
99c475ab 9293
9ee6e8bb
PB
9294 if (s->condexec_mask) {
9295 cond = s->condexec_cond;
bedd2912
JB
9296 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9297 s->condlabel = gen_new_label();
9298 gen_test_cc(cond ^ 1, s->condlabel);
9299 s->condjmp = 1;
9300 }
9ee6e8bb
PB
9301 }
9302
d31dd73e 9303 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9304 s->pc += 2;
b5ff1b31 9305
99c475ab
FB
9306 switch (insn >> 12) {
9307 case 0: case 1:
396e467c 9308
99c475ab
FB
9309 rd = insn & 7;
9310 op = (insn >> 11) & 3;
9311 if (op == 3) {
9312 /* add/subtract */
9313 rn = (insn >> 3) & 7;
396e467c 9314 tmp = load_reg(s, rn);
99c475ab
FB
9315 if (insn & (1 << 10)) {
9316 /* immediate */
7d1b0095 9317 tmp2 = tcg_temp_new_i32();
396e467c 9318 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9319 } else {
9320 /* reg */
9321 rm = (insn >> 6) & 7;
396e467c 9322 tmp2 = load_reg(s, rm);
99c475ab 9323 }
9ee6e8bb
PB
9324 if (insn & (1 << 9)) {
9325 if (s->condexec_mask)
396e467c 9326 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9327 else
72485ec4 9328 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9329 } else {
9330 if (s->condexec_mask)
396e467c 9331 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9332 else
72485ec4 9333 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9334 }
7d1b0095 9335 tcg_temp_free_i32(tmp2);
396e467c 9336 store_reg(s, rd, tmp);
99c475ab
FB
9337 } else {
9338 /* shift immediate */
9339 rm = (insn >> 3) & 7;
9340 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9341 tmp = load_reg(s, rm);
9342 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9343 if (!s->condexec_mask)
9344 gen_logic_CC(tmp);
9345 store_reg(s, rd, tmp);
99c475ab
FB
9346 }
9347 break;
9348 case 2: case 3:
9349 /* arithmetic large immediate */
9350 op = (insn >> 11) & 3;
9351 rd = (insn >> 8) & 0x7;
396e467c 9352 if (op == 0) { /* mov */
7d1b0095 9353 tmp = tcg_temp_new_i32();
396e467c 9354 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9355 if (!s->condexec_mask)
396e467c
FN
9356 gen_logic_CC(tmp);
9357 store_reg(s, rd, tmp);
9358 } else {
9359 tmp = load_reg(s, rd);
7d1b0095 9360 tmp2 = tcg_temp_new_i32();
396e467c
FN
9361 tcg_gen_movi_i32(tmp2, insn & 0xff);
9362 switch (op) {
9363 case 1: /* cmp */
72485ec4 9364 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9365 tcg_temp_free_i32(tmp);
9366 tcg_temp_free_i32(tmp2);
396e467c
FN
9367 break;
9368 case 2: /* add */
9369 if (s->condexec_mask)
9370 tcg_gen_add_i32(tmp, tmp, tmp2);
9371 else
72485ec4 9372 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9373 tcg_temp_free_i32(tmp2);
396e467c
FN
9374 store_reg(s, rd, tmp);
9375 break;
9376 case 3: /* sub */
9377 if (s->condexec_mask)
9378 tcg_gen_sub_i32(tmp, tmp, tmp2);
9379 else
72485ec4 9380 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9381 tcg_temp_free_i32(tmp2);
396e467c
FN
9382 store_reg(s, rd, tmp);
9383 break;
9384 }
99c475ab 9385 }
99c475ab
FB
9386 break;
9387 case 4:
9388 if (insn & (1 << 11)) {
9389 rd = (insn >> 8) & 7;
5899f386
FB
9390 /* load pc-relative. Bit 1 of PC is ignored. */
9391 val = s->pc + 2 + ((insn & 0xff) * 4);
9392 val &= ~(uint32_t)2;
7d1b0095 9393 addr = tcg_temp_new_i32();
b0109805 9394 tcg_gen_movi_i32(addr, val);
c40c8556 9395 tmp = tcg_temp_new_i32();
08307563 9396 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9397 tcg_temp_free_i32(addr);
b0109805 9398 store_reg(s, rd, tmp);
99c475ab
FB
9399 break;
9400 }
9401 if (insn & (1 << 10)) {
9402 /* data processing extended or blx */
9403 rd = (insn & 7) | ((insn >> 4) & 8);
9404 rm = (insn >> 3) & 0xf;
9405 op = (insn >> 8) & 3;
9406 switch (op) {
9407 case 0: /* add */
396e467c
FN
9408 tmp = load_reg(s, rd);
9409 tmp2 = load_reg(s, rm);
9410 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9411 tcg_temp_free_i32(tmp2);
396e467c 9412 store_reg(s, rd, tmp);
99c475ab
FB
9413 break;
9414 case 1: /* cmp */
396e467c
FN
9415 tmp = load_reg(s, rd);
9416 tmp2 = load_reg(s, rm);
72485ec4 9417 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9418 tcg_temp_free_i32(tmp2);
9419 tcg_temp_free_i32(tmp);
99c475ab
FB
9420 break;
9421 case 2: /* mov/cpy */
396e467c
FN
9422 tmp = load_reg(s, rm);
9423 store_reg(s, rd, tmp);
99c475ab
FB
9424 break;
9425 case 3:/* branch [and link] exchange thumb register */
b0109805 9426 tmp = load_reg(s, rm);
99c475ab 9427 if (insn & (1 << 7)) {
be5e7a76 9428 ARCH(5);
99c475ab 9429 val = (uint32_t)s->pc | 1;
7d1b0095 9430 tmp2 = tcg_temp_new_i32();
b0109805
PB
9431 tcg_gen_movi_i32(tmp2, val);
9432 store_reg(s, 14, tmp2);
99c475ab 9433 }
be5e7a76 9434 /* already thumb, no need to check */
d9ba4830 9435 gen_bx(s, tmp);
99c475ab
FB
9436 break;
9437 }
9438 break;
9439 }
9440
9441 /* data processing register */
9442 rd = insn & 7;
9443 rm = (insn >> 3) & 7;
9444 op = (insn >> 6) & 0xf;
9445 if (op == 2 || op == 3 || op == 4 || op == 7) {
9446 /* the shift/rotate ops want the operands backwards */
9447 val = rm;
9448 rm = rd;
9449 rd = val;
9450 val = 1;
9451 } else {
9452 val = 0;
9453 }
9454
396e467c 9455 if (op == 9) { /* neg */
7d1b0095 9456 tmp = tcg_temp_new_i32();
396e467c
FN
9457 tcg_gen_movi_i32(tmp, 0);
9458 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9459 tmp = load_reg(s, rd);
9460 } else {
39d5492a 9461 TCGV_UNUSED_I32(tmp);
396e467c 9462 }
99c475ab 9463
396e467c 9464 tmp2 = load_reg(s, rm);
5899f386 9465 switch (op) {
99c475ab 9466 case 0x0: /* and */
396e467c 9467 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9468 if (!s->condexec_mask)
396e467c 9469 gen_logic_CC(tmp);
99c475ab
FB
9470 break;
9471 case 0x1: /* eor */
396e467c 9472 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9473 if (!s->condexec_mask)
396e467c 9474 gen_logic_CC(tmp);
99c475ab
FB
9475 break;
9476 case 0x2: /* lsl */
9ee6e8bb 9477 if (s->condexec_mask) {
365af80e 9478 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9479 } else {
9ef39277 9480 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9481 gen_logic_CC(tmp2);
9ee6e8bb 9482 }
99c475ab
FB
9483 break;
9484 case 0x3: /* lsr */
9ee6e8bb 9485 if (s->condexec_mask) {
365af80e 9486 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9487 } else {
9ef39277 9488 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9489 gen_logic_CC(tmp2);
9ee6e8bb 9490 }
99c475ab
FB
9491 break;
9492 case 0x4: /* asr */
9ee6e8bb 9493 if (s->condexec_mask) {
365af80e 9494 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9495 } else {
9ef39277 9496 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9497 gen_logic_CC(tmp2);
9ee6e8bb 9498 }
99c475ab
FB
9499 break;
9500 case 0x5: /* adc */
49b4c31e 9501 if (s->condexec_mask) {
396e467c 9502 gen_adc(tmp, tmp2);
49b4c31e
RH
9503 } else {
9504 gen_adc_CC(tmp, tmp, tmp2);
9505 }
99c475ab
FB
9506 break;
9507 case 0x6: /* sbc */
2de68a49 9508 if (s->condexec_mask) {
396e467c 9509 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9510 } else {
9511 gen_sbc_CC(tmp, tmp, tmp2);
9512 }
99c475ab
FB
9513 break;
9514 case 0x7: /* ror */
9ee6e8bb 9515 if (s->condexec_mask) {
f669df27
AJ
9516 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9517 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9518 } else {
9ef39277 9519 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9520 gen_logic_CC(tmp2);
9ee6e8bb 9521 }
99c475ab
FB
9522 break;
9523 case 0x8: /* tst */
396e467c
FN
9524 tcg_gen_and_i32(tmp, tmp, tmp2);
9525 gen_logic_CC(tmp);
99c475ab 9526 rd = 16;
5899f386 9527 break;
99c475ab 9528 case 0x9: /* neg */
9ee6e8bb 9529 if (s->condexec_mask)
396e467c 9530 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9531 else
72485ec4 9532 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9533 break;
9534 case 0xa: /* cmp */
72485ec4 9535 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9536 rd = 16;
9537 break;
9538 case 0xb: /* cmn */
72485ec4 9539 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9540 rd = 16;
9541 break;
9542 case 0xc: /* orr */
396e467c 9543 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9544 if (!s->condexec_mask)
396e467c 9545 gen_logic_CC(tmp);
99c475ab
FB
9546 break;
9547 case 0xd: /* mul */
7b2919a0 9548 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9549 if (!s->condexec_mask)
396e467c 9550 gen_logic_CC(tmp);
99c475ab
FB
9551 break;
9552 case 0xe: /* bic */
f669df27 9553 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9554 if (!s->condexec_mask)
396e467c 9555 gen_logic_CC(tmp);
99c475ab
FB
9556 break;
9557 case 0xf: /* mvn */
396e467c 9558 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9559 if (!s->condexec_mask)
396e467c 9560 gen_logic_CC(tmp2);
99c475ab 9561 val = 1;
5899f386 9562 rm = rd;
99c475ab
FB
9563 break;
9564 }
9565 if (rd != 16) {
396e467c
FN
9566 if (val) {
9567 store_reg(s, rm, tmp2);
9568 if (op != 0xf)
7d1b0095 9569 tcg_temp_free_i32(tmp);
396e467c
FN
9570 } else {
9571 store_reg(s, rd, tmp);
7d1b0095 9572 tcg_temp_free_i32(tmp2);
396e467c
FN
9573 }
9574 } else {
7d1b0095
PM
9575 tcg_temp_free_i32(tmp);
9576 tcg_temp_free_i32(tmp2);
99c475ab
FB
9577 }
9578 break;
9579
9580 case 5:
9581 /* load/store register offset. */
9582 rd = insn & 7;
9583 rn = (insn >> 3) & 7;
9584 rm = (insn >> 6) & 7;
9585 op = (insn >> 9) & 7;
b0109805 9586 addr = load_reg(s, rn);
b26eefb6 9587 tmp = load_reg(s, rm);
b0109805 9588 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9589 tcg_temp_free_i32(tmp);
99c475ab 9590
c40c8556 9591 if (op < 3) { /* store */
b0109805 9592 tmp = load_reg(s, rd);
c40c8556
PM
9593 } else {
9594 tmp = tcg_temp_new_i32();
9595 }
99c475ab
FB
9596
9597 switch (op) {
9598 case 0: /* str */
08307563 9599 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9600 break;
9601 case 1: /* strh */
08307563 9602 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9603 break;
9604 case 2: /* strb */
08307563 9605 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9606 break;
9607 case 3: /* ldrsb */
08307563 9608 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9609 break;
9610 case 4: /* ldr */
08307563 9611 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9612 break;
9613 case 5: /* ldrh */
08307563 9614 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9615 break;
9616 case 6: /* ldrb */
08307563 9617 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9618 break;
9619 case 7: /* ldrsh */
08307563 9620 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9621 break;
9622 }
c40c8556 9623 if (op >= 3) { /* load */
b0109805 9624 store_reg(s, rd, tmp);
c40c8556
PM
9625 } else {
9626 tcg_temp_free_i32(tmp);
9627 }
7d1b0095 9628 tcg_temp_free_i32(addr);
99c475ab
FB
9629 break;
9630
9631 case 6:
9632 /* load/store word immediate offset */
9633 rd = insn & 7;
9634 rn = (insn >> 3) & 7;
b0109805 9635 addr = load_reg(s, rn);
99c475ab 9636 val = (insn >> 4) & 0x7c;
b0109805 9637 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9638
9639 if (insn & (1 << 11)) {
9640 /* load */
c40c8556 9641 tmp = tcg_temp_new_i32();
08307563 9642 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9643 store_reg(s, rd, tmp);
99c475ab
FB
9644 } else {
9645 /* store */
b0109805 9646 tmp = load_reg(s, rd);
08307563 9647 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9648 tcg_temp_free_i32(tmp);
99c475ab 9649 }
7d1b0095 9650 tcg_temp_free_i32(addr);
99c475ab
FB
9651 break;
9652
9653 case 7:
9654 /* load/store byte immediate offset */
9655 rd = insn & 7;
9656 rn = (insn >> 3) & 7;
b0109805 9657 addr = load_reg(s, rn);
99c475ab 9658 val = (insn >> 6) & 0x1f;
b0109805 9659 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9660
9661 if (insn & (1 << 11)) {
9662 /* load */
c40c8556 9663 tmp = tcg_temp_new_i32();
08307563 9664 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 9665 store_reg(s, rd, tmp);
99c475ab
FB
9666 } else {
9667 /* store */
b0109805 9668 tmp = load_reg(s, rd);
08307563 9669 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 9670 tcg_temp_free_i32(tmp);
99c475ab 9671 }
7d1b0095 9672 tcg_temp_free_i32(addr);
99c475ab
FB
9673 break;
9674
9675 case 8:
9676 /* load/store halfword immediate offset */
9677 rd = insn & 7;
9678 rn = (insn >> 3) & 7;
b0109805 9679 addr = load_reg(s, rn);
99c475ab 9680 val = (insn >> 5) & 0x3e;
b0109805 9681 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9682
9683 if (insn & (1 << 11)) {
9684 /* load */
c40c8556 9685 tmp = tcg_temp_new_i32();
08307563 9686 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 9687 store_reg(s, rd, tmp);
99c475ab
FB
9688 } else {
9689 /* store */
b0109805 9690 tmp = load_reg(s, rd);
08307563 9691 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 9692 tcg_temp_free_i32(tmp);
99c475ab 9693 }
7d1b0095 9694 tcg_temp_free_i32(addr);
99c475ab
FB
9695 break;
9696
9697 case 9:
9698 /* load/store from stack */
9699 rd = (insn >> 8) & 7;
b0109805 9700 addr = load_reg(s, 13);
99c475ab 9701 val = (insn & 0xff) * 4;
b0109805 9702 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9703
9704 if (insn & (1 << 11)) {
9705 /* load */
c40c8556 9706 tmp = tcg_temp_new_i32();
08307563 9707 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9708 store_reg(s, rd, tmp);
99c475ab
FB
9709 } else {
9710 /* store */
b0109805 9711 tmp = load_reg(s, rd);
08307563 9712 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9713 tcg_temp_free_i32(tmp);
99c475ab 9714 }
7d1b0095 9715 tcg_temp_free_i32(addr);
99c475ab
FB
9716 break;
9717
9718 case 10:
9719 /* add to high reg */
9720 rd = (insn >> 8) & 7;
5899f386
FB
9721 if (insn & (1 << 11)) {
9722 /* SP */
5e3f878a 9723 tmp = load_reg(s, 13);
5899f386
FB
9724 } else {
9725 /* PC. bit 1 is ignored. */
7d1b0095 9726 tmp = tcg_temp_new_i32();
5e3f878a 9727 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9728 }
99c475ab 9729 val = (insn & 0xff) * 4;
5e3f878a
PB
9730 tcg_gen_addi_i32(tmp, tmp, val);
9731 store_reg(s, rd, tmp);
99c475ab
FB
9732 break;
9733
9734 case 11:
9735 /* misc */
9736 op = (insn >> 8) & 0xf;
9737 switch (op) {
9738 case 0:
9739 /* adjust stack pointer */
b26eefb6 9740 tmp = load_reg(s, 13);
99c475ab
FB
9741 val = (insn & 0x7f) * 4;
9742 if (insn & (1 << 7))
6a0d8a1d 9743 val = -(int32_t)val;
b26eefb6
PB
9744 tcg_gen_addi_i32(tmp, tmp, val);
9745 store_reg(s, 13, tmp);
99c475ab
FB
9746 break;
9747
9ee6e8bb
PB
9748 case 2: /* sign/zero extend. */
9749 ARCH(6);
9750 rd = insn & 7;
9751 rm = (insn >> 3) & 7;
b0109805 9752 tmp = load_reg(s, rm);
9ee6e8bb 9753 switch ((insn >> 6) & 3) {
b0109805
PB
9754 case 0: gen_sxth(tmp); break;
9755 case 1: gen_sxtb(tmp); break;
9756 case 2: gen_uxth(tmp); break;
9757 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9758 }
b0109805 9759 store_reg(s, rd, tmp);
9ee6e8bb 9760 break;
99c475ab
FB
9761 case 4: case 5: case 0xc: case 0xd:
9762 /* push/pop */
b0109805 9763 addr = load_reg(s, 13);
5899f386
FB
9764 if (insn & (1 << 8))
9765 offset = 4;
99c475ab 9766 else
5899f386
FB
9767 offset = 0;
9768 for (i = 0; i < 8; i++) {
9769 if (insn & (1 << i))
9770 offset += 4;
9771 }
9772 if ((insn & (1 << 11)) == 0) {
b0109805 9773 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9774 }
99c475ab
FB
9775 for (i = 0; i < 8; i++) {
9776 if (insn & (1 << i)) {
9777 if (insn & (1 << 11)) {
9778 /* pop */
c40c8556 9779 tmp = tcg_temp_new_i32();
08307563 9780 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9781 store_reg(s, i, tmp);
99c475ab
FB
9782 } else {
9783 /* push */
b0109805 9784 tmp = load_reg(s, i);
08307563 9785 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9786 tcg_temp_free_i32(tmp);
99c475ab 9787 }
5899f386 9788 /* advance to the next address. */
b0109805 9789 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9790 }
9791 }
39d5492a 9792 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9793 if (insn & (1 << 8)) {
9794 if (insn & (1 << 11)) {
9795 /* pop pc */
c40c8556 9796 tmp = tcg_temp_new_i32();
08307563 9797 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9798 /* don't set the pc until the rest of the instruction
9799 has completed */
9800 } else {
9801 /* push lr */
b0109805 9802 tmp = load_reg(s, 14);
08307563 9803 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9804 tcg_temp_free_i32(tmp);
99c475ab 9805 }
b0109805 9806 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9807 }
5899f386 9808 if ((insn & (1 << 11)) == 0) {
b0109805 9809 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9810 }
99c475ab 9811 /* write back the new stack pointer */
b0109805 9812 store_reg(s, 13, addr);
99c475ab 9813 /* set the new PC value */
be5e7a76
DES
9814 if ((insn & 0x0900) == 0x0900) {
9815 store_reg_from_load(env, s, 15, tmp);
9816 }
99c475ab
FB
9817 break;
9818
9ee6e8bb
PB
9819 case 1: case 3: case 9: case 11: /* czb */
9820 rm = insn & 7;
d9ba4830 9821 tmp = load_reg(s, rm);
9ee6e8bb
PB
9822 s->condlabel = gen_new_label();
9823 s->condjmp = 1;
9824 if (insn & (1 << 11))
cb63669a 9825 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9826 else
cb63669a 9827 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9828 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9829 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9830 val = (uint32_t)s->pc + 2;
9831 val += offset;
9832 gen_jmp(s, val);
9833 break;
9834
9835 case 15: /* IT, nop-hint. */
9836 if ((insn & 0xf) == 0) {
9837 gen_nop_hint(s, (insn >> 4) & 0xf);
9838 break;
9839 }
9840 /* If Then. */
9841 s->condexec_cond = (insn >> 4) & 0xe;
9842 s->condexec_mask = insn & 0x1f;
9843 /* No actual code generated for this insn, just setup state. */
9844 break;
9845
06c949e6 9846 case 0xe: /* bkpt */
be5e7a76 9847 ARCH(5);
bc4a0de0 9848 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9849 break;
9850
9ee6e8bb
PB
9851 case 0xa: /* rev */
9852 ARCH(6);
9853 rn = (insn >> 3) & 0x7;
9854 rd = insn & 0x7;
b0109805 9855 tmp = load_reg(s, rn);
9ee6e8bb 9856 switch ((insn >> 6) & 3) {
66896cb8 9857 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9858 case 1: gen_rev16(tmp); break;
9859 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9860 default: goto illegal_op;
9861 }
b0109805 9862 store_reg(s, rd, tmp);
9ee6e8bb
PB
9863 break;
9864
d9e028c1
PM
9865 case 6:
9866 switch ((insn >> 5) & 7) {
9867 case 2:
9868 /* setend */
9869 ARCH(6);
10962fd5
PM
9870 if (((insn >> 3) & 1) != s->bswap_code) {
9871 /* Dynamic endianness switching not implemented. */
e0c270d9 9872 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
9873 goto illegal_op;
9874 }
9ee6e8bb 9875 break;
d9e028c1
PM
9876 case 3:
9877 /* cps */
9878 ARCH(6);
9879 if (IS_USER(s)) {
9880 break;
8984bd2e 9881 }
d9e028c1
PM
9882 if (IS_M(env)) {
9883 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9884 /* FAULTMASK */
9885 if (insn & 1) {
9886 addr = tcg_const_i32(19);
9887 gen_helper_v7m_msr(cpu_env, addr, tmp);
9888 tcg_temp_free_i32(addr);
9889 }
9890 /* PRIMASK */
9891 if (insn & 2) {
9892 addr = tcg_const_i32(16);
9893 gen_helper_v7m_msr(cpu_env, addr, tmp);
9894 tcg_temp_free_i32(addr);
9895 }
9896 tcg_temp_free_i32(tmp);
9897 gen_lookup_tb(s);
9898 } else {
9899 if (insn & (1 << 4)) {
9900 shift = CPSR_A | CPSR_I | CPSR_F;
9901 } else {
9902 shift = 0;
9903 }
9904 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9905 }
d9e028c1
PM
9906 break;
9907 default:
9908 goto undef;
9ee6e8bb
PB
9909 }
9910 break;
9911
99c475ab
FB
9912 default:
9913 goto undef;
9914 }
9915 break;
9916
9917 case 12:
a7d3970d 9918 {
99c475ab 9919 /* load/store multiple */
39d5492a
PM
9920 TCGv_i32 loaded_var;
9921 TCGV_UNUSED_I32(loaded_var);
99c475ab 9922 rn = (insn >> 8) & 0x7;
b0109805 9923 addr = load_reg(s, rn);
99c475ab
FB
9924 for (i = 0; i < 8; i++) {
9925 if (insn & (1 << i)) {
99c475ab
FB
9926 if (insn & (1 << 11)) {
9927 /* load */
c40c8556 9928 tmp = tcg_temp_new_i32();
08307563 9929 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9930 if (i == rn) {
9931 loaded_var = tmp;
9932 } else {
9933 store_reg(s, i, tmp);
9934 }
99c475ab
FB
9935 } else {
9936 /* store */
b0109805 9937 tmp = load_reg(s, i);
08307563 9938 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9939 tcg_temp_free_i32(tmp);
99c475ab 9940 }
5899f386 9941 /* advance to the next address */
b0109805 9942 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9943 }
9944 }
b0109805 9945 if ((insn & (1 << rn)) == 0) {
a7d3970d 9946 /* base reg not in list: base register writeback */
b0109805
PB
9947 store_reg(s, rn, addr);
9948 } else {
a7d3970d
PM
9949 /* base reg in list: if load, complete it now */
9950 if (insn & (1 << 11)) {
9951 store_reg(s, rn, loaded_var);
9952 }
7d1b0095 9953 tcg_temp_free_i32(addr);
b0109805 9954 }
99c475ab 9955 break;
a7d3970d 9956 }
99c475ab
FB
9957 case 13:
9958 /* conditional branch or swi */
9959 cond = (insn >> 8) & 0xf;
9960 if (cond == 0xe)
9961 goto undef;
9962
9963 if (cond == 0xf) {
9964 /* swi */
422ebf69 9965 gen_set_pc_im(s->pc);
9ee6e8bb 9966 s->is_jmp = DISAS_SWI;
99c475ab
FB
9967 break;
9968 }
9969 /* generate a conditional jump to next instruction */
e50e6a20 9970 s->condlabel = gen_new_label();
d9ba4830 9971 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9972 s->condjmp = 1;
99c475ab
FB
9973
9974 /* jump to the offset */
5899f386 9975 val = (uint32_t)s->pc + 2;
99c475ab 9976 offset = ((int32_t)insn << 24) >> 24;
5899f386 9977 val += offset << 1;
8aaca4c0 9978 gen_jmp(s, val);
99c475ab
FB
9979 break;
9980
9981 case 14:
358bf29e 9982 if (insn & (1 << 11)) {
9ee6e8bb
PB
9983 if (disas_thumb2_insn(env, s, insn))
9984 goto undef32;
358bf29e
PB
9985 break;
9986 }
9ee6e8bb 9987 /* unconditional branch */
99c475ab
FB
9988 val = (uint32_t)s->pc;
9989 offset = ((int32_t)insn << 21) >> 21;
9990 val += (offset << 1) + 2;
8aaca4c0 9991 gen_jmp(s, val);
99c475ab
FB
9992 break;
9993
9994 case 15:
9ee6e8bb 9995 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9996 goto undef32;
9ee6e8bb 9997 break;
99c475ab
FB
9998 }
9999 return;
9ee6e8bb 10000undef32:
bc4a0de0 10001 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
10002 return;
10003illegal_op:
99c475ab 10004undef:
bc4a0de0 10005 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
10006}
10007
2c0262af
FB
10008/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
10009 basic block 'tb'. If search_pc is TRUE, also generate PC
10010 information for each intermediate instruction. */
5639c3f2 10011static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 10012 TranslationBlock *tb,
5639c3f2 10013 bool search_pc)
2c0262af 10014{
ed2803da 10015 CPUState *cs = CPU(cpu);
5639c3f2 10016 CPUARMState *env = &cpu->env;
2c0262af 10017 DisasContext dc1, *dc = &dc1;
a1d1bb31 10018 CPUBreakpoint *bp;
2c0262af
FB
10019 uint16_t *gen_opc_end;
10020 int j, lj;
0fa85d43 10021 target_ulong pc_start;
b5ff1b31 10022 uint32_t next_page_start;
2e70f6ef
PB
10023 int num_insns;
10024 int max_insns;
3b46e624 10025
2c0262af 10026 /* generate intermediate code */
0fa85d43 10027 pc_start = tb->pc;
3b46e624 10028
2c0262af
FB
10029 dc->tb = tb;
10030
92414b31 10031 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10032
10033 dc->is_jmp = DISAS_NEXT;
10034 dc->pc = pc_start;
ed2803da 10035 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10036 dc->condjmp = 0;
7204ab88 10037 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 10038 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
10039 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10040 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 10041#if !defined(CONFIG_USER_ONLY)
61f74d6a 10042 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 10043#endif
5df8bac1 10044 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
10045 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10046 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
10047 cpu_F0s = tcg_temp_new_i32();
10048 cpu_F1s = tcg_temp_new_i32();
10049 cpu_F0d = tcg_temp_new_i64();
10050 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10051 cpu_V0 = cpu_F0d;
10052 cpu_V1 = cpu_F1d;
e677137d 10053 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10054 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10055 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10056 lj = -1;
2e70f6ef
PB
10057 num_insns = 0;
10058 max_insns = tb->cflags & CF_COUNT_MASK;
10059 if (max_insns == 0)
10060 max_insns = CF_COUNT_MASK;
10061
806f352d 10062 gen_tb_start();
e12ce78d 10063
3849902c
PM
10064 tcg_clear_temp_count();
10065
e12ce78d
PM
10066 /* A note on handling of the condexec (IT) bits:
10067 *
10068 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10069 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10070 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10071 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10072 * to do it at the end of the block. (For example if we don't do this
10073 * it's hard to identify whether we can safely skip writing condexec
10074 * at the end of the TB, which we definitely want to do for the case
10075 * where a TB doesn't do anything with the IT state at all.)
10076 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10077 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10078 * This is done both for leaving the TB at the end, and for leaving
10079 * it because of an exception we know will happen, which is done in
10080 * gen_exception_insn(). The latter is necessary because we need to
10081 * leave the TB with the PC/IT state just prior to execution of the
10082 * instruction which caused the exception.
10083 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10084 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10085 * This is handled in the same way as restoration of the
10086 * PC in these situations: we will be called again with search_pc=1
10087 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10088 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10089 * this to restore the condexec bits.
e12ce78d
PM
10090 *
10091 * Note that there are no instructions which can read the condexec
10092 * bits, and none which can write non-static values to them, so
0ecb72a5 10093 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10094 * middle of a TB.
10095 */
10096
9ee6e8bb
PB
10097 /* Reset the conditional execution bits immediately. This avoids
10098 complications trying to do it at the end of the block. */
98eac7ca 10099 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10100 {
39d5492a 10101 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10102 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10103 store_cpu_field(tmp, condexec_bits);
8f01245e 10104 }
2c0262af 10105 do {
fbb4a2e3
PB
10106#ifdef CONFIG_USER_ONLY
10107 /* Intercept jump to the magic kernel page. */
10108 if (dc->pc >= 0xffff0000) {
10109 /* We always get here via a jump, so know we are not in a
10110 conditional execution block. */
10111 gen_exception(EXCP_KERNEL_TRAP);
10112 dc->is_jmp = DISAS_UPDATE;
10113 break;
10114 }
10115#else
9ee6e8bb
PB
10116 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10117 /* We always get here via a jump, so know we are not in a
10118 conditional execution block. */
d9ba4830 10119 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10120 dc->is_jmp = DISAS_UPDATE;
10121 break;
9ee6e8bb
PB
10122 }
10123#endif
10124
72cf2d4f
BS
10125 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10126 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10127 if (bp->pc == dc->pc) {
bc4a0de0 10128 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10129 /* Advance PC so that clearing the breakpoint will
10130 invalidate this TB. */
10131 dc->pc += 2;
10132 goto done_generating;
1fddef4b
FB
10133 }
10134 }
10135 }
2c0262af 10136 if (search_pc) {
92414b31 10137 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10138 if (lj < j) {
10139 lj++;
10140 while (lj < j)
ab1103de 10141 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10142 }
25983cad 10143 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10144 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10145 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10146 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10147 }
e50e6a20 10148
2e70f6ef
PB
10149 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10150 gen_io_start();
10151
fdefe51c 10152 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10153 tcg_gen_debug_insn_start(dc->pc);
10154 }
10155
7204ab88 10156 if (dc->thumb) {
9ee6e8bb
PB
10157 disas_thumb_insn(env, dc);
10158 if (dc->condexec_mask) {
10159 dc->condexec_cond = (dc->condexec_cond & 0xe)
10160 | ((dc->condexec_mask >> 4) & 1);
10161 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10162 if (dc->condexec_mask == 0) {
10163 dc->condexec_cond = 0;
10164 }
10165 }
10166 } else {
10167 disas_arm_insn(env, dc);
10168 }
e50e6a20
FB
10169
10170 if (dc->condjmp && !dc->is_jmp) {
10171 gen_set_label(dc->condlabel);
10172 dc->condjmp = 0;
10173 }
3849902c
PM
10174
10175 if (tcg_check_temp_count()) {
10176 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10177 }
10178
aaf2d97d 10179 /* Translation stops when a conditional branch is encountered.
e50e6a20 10180 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10181 * Also stop translation when a page boundary is reached. This
bf20dc07 10182 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10183 num_insns ++;
efd7f486 10184 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10185 !cs->singlestep_enabled &&
1b530a6d 10186 !singlestep &&
2e70f6ef
PB
10187 dc->pc < next_page_start &&
10188 num_insns < max_insns);
10189
10190 if (tb->cflags & CF_LAST_IO) {
10191 if (dc->condjmp) {
10192 /* FIXME: This can theoretically happen with self-modifying
10193 code. */
10194 cpu_abort(env, "IO on conditional branch instruction");
10195 }
10196 gen_io_end();
10197 }
9ee6e8bb 10198
b5ff1b31 10199 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10200 instruction was a conditional branch or trap, and the PC has
10201 already been written. */
ed2803da 10202 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10203 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10204 if (dc->condjmp) {
9ee6e8bb
PB
10205 gen_set_condexec(dc);
10206 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10207 gen_exception(EXCP_SWI);
9ee6e8bb 10208 } else {
d9ba4830 10209 gen_exception(EXCP_DEBUG);
9ee6e8bb 10210 }
e50e6a20
FB
10211 gen_set_label(dc->condlabel);
10212 }
10213 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 10214 gen_set_pc_im(dc->pc);
e50e6a20 10215 dc->condjmp = 0;
8aaca4c0 10216 }
9ee6e8bb
PB
10217 gen_set_condexec(dc);
10218 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10219 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10220 } else {
10221 /* FIXME: Single stepping a WFI insn will not halt
10222 the CPU. */
d9ba4830 10223 gen_exception(EXCP_DEBUG);
9ee6e8bb 10224 }
8aaca4c0 10225 } else {
9ee6e8bb
PB
10226 /* While branches must always occur at the end of an IT block,
10227 there are a few other things that can cause us to terminate
65626741 10228 the TB in the middle of an IT block:
9ee6e8bb
PB
10229 - Exception generating instructions (bkpt, swi, undefined).
10230 - Page boundaries.
10231 - Hardware watchpoints.
10232 Hardware breakpoints have already been handled and skip this code.
10233 */
10234 gen_set_condexec(dc);
8aaca4c0 10235 switch(dc->is_jmp) {
8aaca4c0 10236 case DISAS_NEXT:
6e256c93 10237 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10238 break;
10239 default:
10240 case DISAS_JUMP:
10241 case DISAS_UPDATE:
10242 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10243 tcg_gen_exit_tb(0);
8aaca4c0
FB
10244 break;
10245 case DISAS_TB_JUMP:
10246 /* nothing more to generate */
10247 break;
9ee6e8bb 10248 case DISAS_WFI:
1ce94f81 10249 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10250 break;
10251 case DISAS_SWI:
d9ba4830 10252 gen_exception(EXCP_SWI);
9ee6e8bb 10253 break;
8aaca4c0 10254 }
e50e6a20
FB
10255 if (dc->condjmp) {
10256 gen_set_label(dc->condlabel);
9ee6e8bb 10257 gen_set_condexec(dc);
6e256c93 10258 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10259 dc->condjmp = 0;
10260 }
2c0262af 10261 }
2e70f6ef 10262
9ee6e8bb 10263done_generating:
806f352d 10264 gen_tb_end(tb, num_insns);
efd7f486 10265 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10266
10267#ifdef DEBUG_DISAS
8fec2b8c 10268 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10269 qemu_log("----------------\n");
10270 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10271 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10272 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10273 qemu_log("\n");
2c0262af
FB
10274 }
10275#endif
b5ff1b31 10276 if (search_pc) {
92414b31 10277 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10278 lj++;
10279 while (lj <= j)
ab1103de 10280 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10281 } else {
2c0262af 10282 tb->size = dc->pc - pc_start;
2e70f6ef 10283 tb->icount = num_insns;
b5ff1b31 10284 }
2c0262af
FB
10285}
10286
0ecb72a5 10287void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10288{
5639c3f2 10289 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10290}
10291
0ecb72a5 10292void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10293{
5639c3f2 10294 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10295}
10296
b5ff1b31
FB
10297static const char *cpu_mode_names[16] = {
10298 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10299 "???", "???", "???", "und", "???", "???", "???", "sys"
10300};
9ee6e8bb 10301
878096ee
AF
10302void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10303 int flags)
2c0262af 10304{
878096ee
AF
10305 ARMCPU *cpu = ARM_CPU(cs);
10306 CPUARMState *env = &cpu->env;
2c0262af 10307 int i;
b5ff1b31 10308 uint32_t psr;
2c0262af
FB
10309
10310 for(i=0;i<16;i++) {
7fe48483 10311 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10312 if ((i % 4) == 3)
7fe48483 10313 cpu_fprintf(f, "\n");
2c0262af 10314 else
7fe48483 10315 cpu_fprintf(f, " ");
2c0262af 10316 }
b5ff1b31 10317 psr = cpsr_read(env);
687fa640
TS
10318 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10319 psr,
b5ff1b31
FB
10320 psr & (1 << 31) ? 'N' : '-',
10321 psr & (1 << 30) ? 'Z' : '-',
10322 psr & (1 << 29) ? 'C' : '-',
10323 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10324 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10325 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10326
f2617cfc
PM
10327 if (flags & CPU_DUMP_FPU) {
10328 int numvfpregs = 0;
10329 if (arm_feature(env, ARM_FEATURE_VFP)) {
10330 numvfpregs += 16;
10331 }
10332 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10333 numvfpregs += 16;
10334 }
10335 for (i = 0; i < numvfpregs; i++) {
10336 uint64_t v = float64_val(env->vfp.regs[i]);
10337 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10338 i * 2, (uint32_t)v,
10339 i * 2 + 1, (uint32_t)(v >> 32),
10340 i, v);
10341 }
10342 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10343 }
2c0262af 10344}
a6b025d3 10345
0ecb72a5 10346void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10347{
25983cad 10348 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10349 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10350}