]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
target-arm: Remove gen_{ld,st}* from Thumb insns
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
39d5492a 98static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
39d5492a 141static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 142{
39d5492a 143 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 149
39d5492a 150static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6 159/* Set a variable to the value of a CPU register. */
39d5492a 160static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 176static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 177{
39d5492a 178 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
39d5492a 185static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
39d5492a 205static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 206{
39d5492a 207 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
39d5492a 216 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
39d5492a 222static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 223{
39d5492a
PM
224 TCGv_i32 tmp1 = tcg_temp_new_i32();
225 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
39d5492a 238static void gen_rev16(TCGv_i32 var)
3670669c 239{
39d5492a 240 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
39d5492a 250static void gen_revsh(TCGv_i32 var)
3670669c 251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
39d5492a 258static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
39d5492a 266static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d 280/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
5e3f878a 308/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 309static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 310{
39d5492a
PM
311 TCGv_i32 lo = tcg_temp_new_i32();
312 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 313 TCGv_i64 ret;
5e3f878a 314
831d7fe8 315 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 316 tcg_temp_free_i32(a);
7d1b0095 317 tcg_temp_free_i32(b);
831d7fe8
RH
318
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
321 tcg_temp_free_i32(lo);
322 tcg_temp_free_i32(hi);
831d7fe8
RH
323
324 return ret;
5e3f878a
PB
325}
326
39d5492a 327static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 328{
39d5492a
PM
329 TCGv_i32 lo = tcg_temp_new_i32();
330 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 331 TCGv_i64 ret;
5e3f878a 332
831d7fe8 333 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 334 tcg_temp_free_i32(a);
7d1b0095 335 tcg_temp_free_i32(b);
831d7fe8
RH
336
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
339 tcg_temp_free_i32(lo);
340 tcg_temp_free_i32(hi);
831d7fe8
RH
341
342 return ret;
5e3f878a
PB
343}
344
8f01245e 345/* Swap low and high halfwords. */
39d5492a 346static void gen_swap_half(TCGv_i32 var)
8f01245e 347{
39d5492a 348 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
7d1b0095 352 tcg_temp_free_i32(tmp);
8f01245e
PB
353}
354
b26eefb6
PB
355/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
360 */
361
39d5492a 362static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 363{
39d5492a 364 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
b26eefb6
PB
373}
374
375/* Set CF to the top bit of var. */
39d5492a 376static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 377{
66c374de 378 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
379}
380
381/* Set N and Z flags from var. */
39d5492a 382static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 383{
66c374de
AJ
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
386}
387
388/* T0 += T1 + CF. */
39d5492a 389static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 390{
396e467c 391 tcg_gen_add_i32(t0, t0, t1);
66c374de 392 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
393}
394
e9bb4aa9 395/* dest = T0 + T1 + CF. */
39d5492a 396static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 397{
e9bb4aa9 398 tcg_gen_add_i32(dest, t0, t1);
66c374de 399 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
400}
401
3670669c 402/* dest = T0 - T1 + CF - 1. */
39d5492a 403static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 404{
3670669c 405 tcg_gen_sub_i32(dest, t0, t1);
66c374de 406 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 407 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
408}
409
72485ec4 410/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 411static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 412{
39d5492a 413 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
414 tcg_gen_movi_i32(tmp, 0);
415 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
422}
423
49b4c31e 424/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 425static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 426{
39d5492a 427 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
428 if (TCG_TARGET_HAS_add2_i32) {
429 tcg_gen_movi_i32(tmp, 0);
430 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 431 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
432 } else {
433 TCGv_i64 q0 = tcg_temp_new_i64();
434 TCGv_i64 q1 = tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0, t0);
436 tcg_gen_extu_i32_i64(q1, t1);
437 tcg_gen_add_i64(q0, q0, q1);
438 tcg_gen_extu_i32_i64(q1, cpu_CF);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
441 tcg_temp_free_i64(q0);
442 tcg_temp_free_i64(q1);
443 }
444 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tcg_gen_xor_i32(tmp, t0, t1);
447 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
448 tcg_temp_free_i32(tmp);
449 tcg_gen_mov_i32(dest, cpu_NF);
450}
451
72485ec4 452/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 453static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 454{
39d5492a 455 TCGv_i32 tmp;
72485ec4
AJ
456 tcg_gen_sub_i32(cpu_NF, t0, t1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
459 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
460 tmp = tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
465}
466
e77f0832 467/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 468static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 469{
39d5492a 470 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
471 tcg_gen_not_i32(tmp, t1);
472 gen_adc_CC(dest, t0, tmp);
39d5492a 473 tcg_temp_free_i32(tmp);
2de68a49
RH
474}
475
365af80e 476#define GEN_SHIFT(name) \
39d5492a 477static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 478{ \
39d5492a 479 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
480 tmp1 = tcg_temp_new_i32(); \
481 tcg_gen_andi_i32(tmp1, t1, 0xff); \
482 tmp2 = tcg_const_i32(0); \
483 tmp3 = tcg_const_i32(0x1f); \
484 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
485 tcg_temp_free_i32(tmp3); \
486 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
487 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
488 tcg_temp_free_i32(tmp2); \
489 tcg_temp_free_i32(tmp1); \
490}
491GEN_SHIFT(shl)
492GEN_SHIFT(shr)
493#undef GEN_SHIFT
494
39d5492a 495static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 496{
39d5492a 497 TCGv_i32 tmp1, tmp2;
365af80e
AJ
498 tmp1 = tcg_temp_new_i32();
499 tcg_gen_andi_i32(tmp1, t1, 0xff);
500 tmp2 = tcg_const_i32(0x1f);
501 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
502 tcg_temp_free_i32(tmp2);
503 tcg_gen_sar_i32(dest, t0, tmp1);
504 tcg_temp_free_i32(tmp1);
505}
506
39d5492a 507static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 508{
39d5492a
PM
509 TCGv_i32 c0 = tcg_const_i32(0);
510 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
511 tcg_gen_neg_i32(tmp, src);
512 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
513 tcg_temp_free_i32(c0);
514 tcg_temp_free_i32(tmp);
515}
ad69471c 516
39d5492a 517static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 518{
9a119ff6 519 if (shift == 0) {
66c374de 520 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 521 } else {
66c374de
AJ
522 tcg_gen_shri_i32(cpu_CF, var, shift);
523 if (shift != 31) {
524 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
525 }
9a119ff6 526 }
9a119ff6 527}
b26eefb6 528
9a119ff6 529/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
530static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
531 int shift, int flags)
9a119ff6
PB
532{
533 switch (shiftop) {
534 case 0: /* LSL */
535 if (shift != 0) {
536 if (flags)
537 shifter_out_im(var, 32 - shift);
538 tcg_gen_shli_i32(var, var, shift);
539 }
540 break;
541 case 1: /* LSR */
542 if (shift == 0) {
543 if (flags) {
66c374de 544 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
545 }
546 tcg_gen_movi_i32(var, 0);
547 } else {
548 if (flags)
549 shifter_out_im(var, shift - 1);
550 tcg_gen_shri_i32(var, var, shift);
551 }
552 break;
553 case 2: /* ASR */
554 if (shift == 0)
555 shift = 32;
556 if (flags)
557 shifter_out_im(var, shift - 1);
558 if (shift == 32)
559 shift = 31;
560 tcg_gen_sari_i32(var, var, shift);
561 break;
562 case 3: /* ROR/RRX */
563 if (shift != 0) {
564 if (flags)
565 shifter_out_im(var, shift - 1);
f669df27 566 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 567 } else {
39d5492a 568 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 569 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
570 if (flags)
571 shifter_out_im(var, 0);
572 tcg_gen_shri_i32(var, var, 1);
b26eefb6 573 tcg_gen_or_i32(var, var, tmp);
7d1b0095 574 tcg_temp_free_i32(tmp);
b26eefb6
PB
575 }
576 }
577};
578
39d5492a
PM
579static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
580 TCGv_i32 shift, int flags)
8984bd2e
PB
581{
582 if (flags) {
583 switch (shiftop) {
9ef39277
BS
584 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
585 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
586 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
587 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
588 }
589 } else {
590 switch (shiftop) {
365af80e
AJ
591 case 0:
592 gen_shl(var, var, shift);
593 break;
594 case 1:
595 gen_shr(var, var, shift);
596 break;
597 case 2:
598 gen_sar(var, var, shift);
599 break;
f669df27
AJ
600 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
601 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
602 }
603 }
7d1b0095 604 tcg_temp_free_i32(shift);
8984bd2e
PB
605}
606
6ddbc6e4
PB
607#define PAS_OP(pfx) \
608 switch (op2) { \
609 case 0: gen_pas_helper(glue(pfx,add16)); break; \
610 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
611 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
612 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
613 case 4: gen_pas_helper(glue(pfx,add8)); break; \
614 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
615 }
39d5492a 616static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 617{
a7812ae4 618 TCGv_ptr tmp;
6ddbc6e4
PB
619
620 switch (op1) {
621#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
622 case 1:
a7812ae4 623 tmp = tcg_temp_new_ptr();
0ecb72a5 624 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 625 PAS_OP(s)
b75263d6 626 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
627 break;
628 case 5:
a7812ae4 629 tmp = tcg_temp_new_ptr();
0ecb72a5 630 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 631 PAS_OP(u)
b75263d6 632 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
633 break;
634#undef gen_pas_helper
635#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
636 case 2:
637 PAS_OP(q);
638 break;
639 case 3:
640 PAS_OP(sh);
641 break;
642 case 6:
643 PAS_OP(uq);
644 break;
645 case 7:
646 PAS_OP(uh);
647 break;
648#undef gen_pas_helper
649 }
650}
9ee6e8bb
PB
651#undef PAS_OP
652
6ddbc6e4
PB
653/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
654#define PAS_OP(pfx) \
ed89a2f1 655 switch (op1) { \
6ddbc6e4
PB
656 case 0: gen_pas_helper(glue(pfx,add8)); break; \
657 case 1: gen_pas_helper(glue(pfx,add16)); break; \
658 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
659 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
660 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
661 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
662 }
39d5492a 663static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 664{
a7812ae4 665 TCGv_ptr tmp;
6ddbc6e4 666
ed89a2f1 667 switch (op2) {
6ddbc6e4
PB
668#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
669 case 0:
a7812ae4 670 tmp = tcg_temp_new_ptr();
0ecb72a5 671 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 672 PAS_OP(s)
b75263d6 673 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
674 break;
675 case 4:
a7812ae4 676 tmp = tcg_temp_new_ptr();
0ecb72a5 677 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 678 PAS_OP(u)
b75263d6 679 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
680 break;
681#undef gen_pas_helper
682#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
683 case 1:
684 PAS_OP(q);
685 break;
686 case 2:
687 PAS_OP(sh);
688 break;
689 case 5:
690 PAS_OP(uq);
691 break;
692 case 6:
693 PAS_OP(uh);
694 break;
695#undef gen_pas_helper
696 }
697}
9ee6e8bb
PB
698#undef PAS_OP
699
d9ba4830
PB
700static void gen_test_cc(int cc, int label)
701{
39d5492a 702 TCGv_i32 tmp;
d9ba4830
PB
703 int inv;
704
d9ba4830
PB
705 switch (cc) {
706 case 0: /* eq: Z */
66c374de 707 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
708 break;
709 case 1: /* ne: !Z */
66c374de 710 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
711 break;
712 case 2: /* cs: C */
66c374de 713 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
714 break;
715 case 3: /* cc: !C */
66c374de 716 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
717 break;
718 case 4: /* mi: N */
66c374de 719 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
720 break;
721 case 5: /* pl: !N */
66c374de 722 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
723 break;
724 case 6: /* vs: V */
66c374de 725 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
726 break;
727 case 7: /* vc: !V */
66c374de 728 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
729 break;
730 case 8: /* hi: C && !Z */
731 inv = gen_new_label();
66c374de
AJ
732 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
733 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
734 gen_set_label(inv);
735 break;
736 case 9: /* ls: !C || Z */
66c374de
AJ
737 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
738 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
739 break;
740 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
741 tmp = tcg_temp_new_i32();
742 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 743 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 744 tcg_temp_free_i32(tmp);
d9ba4830
PB
745 break;
746 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
747 tmp = tcg_temp_new_i32();
748 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 749 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 750 tcg_temp_free_i32(tmp);
d9ba4830
PB
751 break;
752 case 12: /* gt: !Z && N == V */
753 inv = gen_new_label();
66c374de
AJ
754 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
755 tmp = tcg_temp_new_i32();
756 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 757 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 758 tcg_temp_free_i32(tmp);
d9ba4830
PB
759 gen_set_label(inv);
760 break;
761 case 13: /* le: Z || N != V */
66c374de
AJ
762 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
763 tmp = tcg_temp_new_i32();
764 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 765 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 766 tcg_temp_free_i32(tmp);
d9ba4830
PB
767 break;
768 default:
769 fprintf(stderr, "Bad condition code 0x%x\n", cc);
770 abort();
771 }
d9ba4830 772}
2c0262af 773
b1d8e52e 774static const uint8_t table_logic_cc[16] = {
2c0262af
FB
775 1, /* and */
776 1, /* xor */
777 0, /* sub */
778 0, /* rsb */
779 0, /* add */
780 0, /* adc */
781 0, /* sbc */
782 0, /* rsc */
783 1, /* andl */
784 1, /* xorl */
785 0, /* cmp */
786 0, /* cmn */
787 1, /* orr */
788 1, /* mov */
789 1, /* bic */
790 1, /* mvn */
791};
3b46e624 792
d9ba4830
PB
793/* Set PC and Thumb state from an immediate address. */
794static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 795{
39d5492a 796 TCGv_i32 tmp;
99c475ab 797
b26eefb6 798 s->is_jmp = DISAS_UPDATE;
d9ba4830 799 if (s->thumb != (addr & 1)) {
7d1b0095 800 tmp = tcg_temp_new_i32();
d9ba4830 801 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 802 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 803 tcg_temp_free_i32(tmp);
d9ba4830 804 }
155c3eac 805 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
806}
807
808/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 809static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 810{
d9ba4830 811 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
812 tcg_gen_andi_i32(cpu_R[15], var, ~1);
813 tcg_gen_andi_i32(var, var, 1);
814 store_cpu_field(var, thumb);
d9ba4830
PB
815}
816
21aeb343
JR
817/* Variant of store_reg which uses branch&exchange logic when storing
818 to r15 in ARM architecture v7 and above. The source must be a temporary
819 and will be marked as dead. */
0ecb72a5 820static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 821 int reg, TCGv_i32 var)
21aeb343
JR
822{
823 if (reg == 15 && ENABLE_ARCH_7) {
824 gen_bx(s, var);
825 } else {
826 store_reg(s, reg, var);
827 }
828}
829
be5e7a76
DES
830/* Variant of store_reg which uses branch&exchange logic when storing
831 * to r15 in ARM architecture v5T and above. This is used for storing
832 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
833 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 834static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 835 int reg, TCGv_i32 var)
be5e7a76
DES
836{
837 if (reg == 15 && ENABLE_ARCH_5) {
838 gen_bx(s, var);
839 } else {
840 store_reg(s, reg, var);
841 }
842}
843
39d5492a 844static inline TCGv_i32 gen_ld8s(TCGv_i32 addr, int index)
b0109805 845{
39d5492a 846 TCGv_i32 tmp = tcg_temp_new_i32();
b0109805
PB
847 tcg_gen_qemu_ld8s(tmp, addr, index);
848 return tmp;
849}
39d5492a 850static inline TCGv_i32 gen_ld8u(TCGv_i32 addr, int index)
b0109805 851{
39d5492a 852 TCGv_i32 tmp = tcg_temp_new_i32();
b0109805
PB
853 tcg_gen_qemu_ld8u(tmp, addr, index);
854 return tmp;
855}
39d5492a 856static inline TCGv_i32 gen_ld16s(TCGv_i32 addr, int index)
b0109805 857{
39d5492a 858 TCGv_i32 tmp = tcg_temp_new_i32();
b0109805
PB
859 tcg_gen_qemu_ld16s(tmp, addr, index);
860 return tmp;
861}
39d5492a 862static inline TCGv_i32 gen_ld16u(TCGv_i32 addr, int index)
b0109805 863{
39d5492a 864 TCGv_i32 tmp = tcg_temp_new_i32();
b0109805
PB
865 tcg_gen_qemu_ld16u(tmp, addr, index);
866 return tmp;
867}
39d5492a 868static inline TCGv_i32 gen_ld32(TCGv_i32 addr, int index)
b0109805 869{
39d5492a 870 TCGv_i32 tmp = tcg_temp_new_i32();
b0109805
PB
871 tcg_gen_qemu_ld32u(tmp, addr, index);
872 return tmp;
873}
39d5492a 874static inline void gen_st8(TCGv_i32 val, TCGv_i32 addr, int index)
b0109805
PB
875{
876 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 877 tcg_temp_free_i32(val);
b0109805 878}
39d5492a 879static inline void gen_st16(TCGv_i32 val, TCGv_i32 addr, int index)
b0109805
PB
880{
881 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 882 tcg_temp_free_i32(val);
b0109805 883}
39d5492a 884static inline void gen_st32(TCGv_i32 val, TCGv_i32 addr, int index)
b0109805
PB
885{
886 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 887 tcg_temp_free_i32(val);
b0109805 888}
b5ff1b31 889
5e3f878a
PB
890static inline void gen_set_pc_im(uint32_t val)
891{
155c3eac 892 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
893}
894
b5ff1b31
FB
895/* Force a TB lookup after an instruction that changes the CPU state. */
896static inline void gen_lookup_tb(DisasContext *s)
897{
a6445c52 898 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
899 s->is_jmp = DISAS_UPDATE;
900}
901
b0109805 902static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 903 TCGv_i32 var)
2c0262af 904{
1e8d4eec 905 int val, rm, shift, shiftop;
39d5492a 906 TCGv_i32 offset;
2c0262af
FB
907
908 if (!(insn & (1 << 25))) {
909 /* immediate */
910 val = insn & 0xfff;
911 if (!(insn & (1 << 23)))
912 val = -val;
537730b9 913 if (val != 0)
b0109805 914 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
915 } else {
916 /* shift/register */
917 rm = (insn) & 0xf;
918 shift = (insn >> 7) & 0x1f;
1e8d4eec 919 shiftop = (insn >> 5) & 3;
b26eefb6 920 offset = load_reg(s, rm);
9a119ff6 921 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 922 if (!(insn & (1 << 23)))
b0109805 923 tcg_gen_sub_i32(var, var, offset);
2c0262af 924 else
b0109805 925 tcg_gen_add_i32(var, var, offset);
7d1b0095 926 tcg_temp_free_i32(offset);
2c0262af
FB
927 }
928}
929
191f9a93 930static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 931 int extra, TCGv_i32 var)
2c0262af
FB
932{
933 int val, rm;
39d5492a 934 TCGv_i32 offset;
3b46e624 935
2c0262af
FB
936 if (insn & (1 << 22)) {
937 /* immediate */
938 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
939 if (!(insn & (1 << 23)))
940 val = -val;
18acad92 941 val += extra;
537730b9 942 if (val != 0)
b0109805 943 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
944 } else {
945 /* register */
191f9a93 946 if (extra)
b0109805 947 tcg_gen_addi_i32(var, var, extra);
2c0262af 948 rm = (insn) & 0xf;
b26eefb6 949 offset = load_reg(s, rm);
2c0262af 950 if (!(insn & (1 << 23)))
b0109805 951 tcg_gen_sub_i32(var, var, offset);
2c0262af 952 else
b0109805 953 tcg_gen_add_i32(var, var, offset);
7d1b0095 954 tcg_temp_free_i32(offset);
2c0262af
FB
955 }
956}
957
5aaebd13
PM
958static TCGv_ptr get_fpstatus_ptr(int neon)
959{
960 TCGv_ptr statusptr = tcg_temp_new_ptr();
961 int offset;
962 if (neon) {
0ecb72a5 963 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 964 } else {
0ecb72a5 965 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
966 }
967 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
968 return statusptr;
969}
970
4373f3ce
PB
971#define VFP_OP2(name) \
972static inline void gen_vfp_##name(int dp) \
973{ \
ae1857ec
PM
974 TCGv_ptr fpst = get_fpstatus_ptr(0); \
975 if (dp) { \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
977 } else { \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
979 } \
980 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
981}
982
4373f3ce
PB
983VFP_OP2(add)
984VFP_OP2(sub)
985VFP_OP2(mul)
986VFP_OP2(div)
987
988#undef VFP_OP2
989
605a6aed
PM
990static inline void gen_vfp_F1_mul(int dp)
991{
992 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 993 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 994 if (dp) {
ae1857ec 995 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 996 } else {
ae1857ec 997 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 998 }
ae1857ec 999 tcg_temp_free_ptr(fpst);
605a6aed
PM
1000}
1001
1002static inline void gen_vfp_F1_neg(int dp)
1003{
1004 /* Like gen_vfp_neg() but put result in F1 */
1005 if (dp) {
1006 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1007 } else {
1008 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1009 }
1010}
1011
4373f3ce
PB
1012static inline void gen_vfp_abs(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1016 else
1017 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1018}
1019
1020static inline void gen_vfp_neg(int dp)
1021{
1022 if (dp)
1023 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1024 else
1025 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1026}
1027
1028static inline void gen_vfp_sqrt(int dp)
1029{
1030 if (dp)
1031 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1032 else
1033 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1034}
1035
1036static inline void gen_vfp_cmp(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1040 else
1041 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1042}
1043
1044static inline void gen_vfp_cmpe(int dp)
1045{
1046 if (dp)
1047 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1048 else
1049 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1050}
1051
1052static inline void gen_vfp_F1_ld0(int dp)
1053{
1054 if (dp)
5b340b51 1055 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1056 else
5b340b51 1057 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1058}
1059
5500b06c
PM
1060#define VFP_GEN_ITOF(name) \
1061static inline void gen_vfp_##name(int dp, int neon) \
1062{ \
5aaebd13 1063 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1064 if (dp) { \
1065 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1066 } else { \
1067 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1068 } \
b7fa9214 1069 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1070}
1071
5500b06c
PM
1072VFP_GEN_ITOF(uito)
1073VFP_GEN_ITOF(sito)
1074#undef VFP_GEN_ITOF
4373f3ce 1075
5500b06c
PM
1076#define VFP_GEN_FTOI(name) \
1077static inline void gen_vfp_##name(int dp, int neon) \
1078{ \
5aaebd13 1079 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1080 if (dp) { \
1081 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1082 } else { \
1083 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1084 } \
b7fa9214 1085 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1086}
1087
5500b06c
PM
1088VFP_GEN_FTOI(toui)
1089VFP_GEN_FTOI(touiz)
1090VFP_GEN_FTOI(tosi)
1091VFP_GEN_FTOI(tosiz)
1092#undef VFP_GEN_FTOI
4373f3ce
PB
1093
1094#define VFP_GEN_FIX(name) \
5500b06c 1095static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1096{ \
39d5492a 1097 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1098 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1099 if (dp) { \
1100 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1101 } else { \
1102 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1103 } \
b75263d6 1104 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1105 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1106}
4373f3ce
PB
1107VFP_GEN_FIX(tosh)
1108VFP_GEN_FIX(tosl)
1109VFP_GEN_FIX(touh)
1110VFP_GEN_FIX(toul)
1111VFP_GEN_FIX(shto)
1112VFP_GEN_FIX(slto)
1113VFP_GEN_FIX(uhto)
1114VFP_GEN_FIX(ulto)
1115#undef VFP_GEN_FIX
9ee6e8bb 1116
39d5492a 1117static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31
FB
1118{
1119 if (dp)
312eea9f 1120 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1121 else
312eea9f 1122 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1123}
1124
39d5492a 1125static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31
FB
1126{
1127 if (dp)
312eea9f 1128 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1129 else
312eea9f 1130 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1131}
1132
8e96005d
FB
1133static inline long
1134vfp_reg_offset (int dp, int reg)
1135{
1136 if (dp)
1137 return offsetof(CPUARMState, vfp.regs[reg]);
1138 else if (reg & 1) {
1139 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1140 + offsetof(CPU_DoubleU, l.upper);
1141 } else {
1142 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1143 + offsetof(CPU_DoubleU, l.lower);
1144 }
1145}
9ee6e8bb
PB
1146
1147/* Return the offset of a 32-bit piece of a NEON register.
1148 zero is the least significant end of the register. */
1149static inline long
1150neon_reg_offset (int reg, int n)
1151{
1152 int sreg;
1153 sreg = reg * 2 + n;
1154 return vfp_reg_offset(0, sreg);
1155}
1156
39d5492a 1157static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1158{
39d5492a 1159 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1160 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1161 return tmp;
1162}
1163
39d5492a 1164static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1165{
1166 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1167 tcg_temp_free_i32(var);
8f8e3aa4
PB
1168}
1169
a7812ae4 1170static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1171{
1172 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1173}
1174
a7812ae4 1175static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1176{
1177 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1178}
1179
4373f3ce
PB
1180#define tcg_gen_ld_f32 tcg_gen_ld_i32
1181#define tcg_gen_ld_f64 tcg_gen_ld_i64
1182#define tcg_gen_st_f32 tcg_gen_st_i32
1183#define tcg_gen_st_f64 tcg_gen_st_i64
1184
b7bcbe95
FB
1185static inline void gen_mov_F0_vreg(int dp, int reg)
1186{
1187 if (dp)
4373f3ce 1188 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1189 else
4373f3ce 1190 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1191}
1192
1193static inline void gen_mov_F1_vreg(int dp, int reg)
1194{
1195 if (dp)
4373f3ce 1196 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1197 else
4373f3ce 1198 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1199}
1200
1201static inline void gen_mov_vreg_F0(int dp, int reg)
1202{
1203 if (dp)
4373f3ce 1204 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1205 else
4373f3ce 1206 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1207}
1208
18c9b560
AZ
1209#define ARM_CP_RW_BIT (1 << 20)
1210
a7812ae4 1211static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1212{
0ecb72a5 1213 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1214}
1215
a7812ae4 1216static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1217{
0ecb72a5 1218 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1219}
1220
39d5492a 1221static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1222{
39d5492a 1223 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1224 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1225 return var;
e677137d
PB
1226}
1227
39d5492a 1228static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1229{
0ecb72a5 1230 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1231 tcg_temp_free_i32(var);
e677137d
PB
1232}
1233
1234static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1235{
1236 iwmmxt_store_reg(cpu_M0, rn);
1237}
1238
1239static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1240{
1241 iwmmxt_load_reg(cpu_M0, rn);
1242}
1243
1244static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1245{
1246 iwmmxt_load_reg(cpu_V1, rn);
1247 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1248}
1249
1250static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1251{
1252 iwmmxt_load_reg(cpu_V1, rn);
1253 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1254}
1255
1256static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1257{
1258 iwmmxt_load_reg(cpu_V1, rn);
1259 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1260}
1261
1262#define IWMMXT_OP(name) \
1263static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1264{ \
1265 iwmmxt_load_reg(cpu_V1, rn); \
1266 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1267}
1268
477955bd
PM
1269#define IWMMXT_OP_ENV(name) \
1270static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1271{ \
1272 iwmmxt_load_reg(cpu_V1, rn); \
1273 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1274}
1275
1276#define IWMMXT_OP_ENV_SIZE(name) \
1277IWMMXT_OP_ENV(name##b) \
1278IWMMXT_OP_ENV(name##w) \
1279IWMMXT_OP_ENV(name##l)
e677137d 1280
477955bd 1281#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1282static inline void gen_op_iwmmxt_##name##_M0(void) \
1283{ \
477955bd 1284 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1285}
1286
1287IWMMXT_OP(maddsq)
1288IWMMXT_OP(madduq)
1289IWMMXT_OP(sadb)
1290IWMMXT_OP(sadw)
1291IWMMXT_OP(mulslw)
1292IWMMXT_OP(mulshw)
1293IWMMXT_OP(mululw)
1294IWMMXT_OP(muluhw)
1295IWMMXT_OP(macsw)
1296IWMMXT_OP(macuw)
1297
477955bd
PM
1298IWMMXT_OP_ENV_SIZE(unpackl)
1299IWMMXT_OP_ENV_SIZE(unpackh)
1300
1301IWMMXT_OP_ENV1(unpacklub)
1302IWMMXT_OP_ENV1(unpackluw)
1303IWMMXT_OP_ENV1(unpacklul)
1304IWMMXT_OP_ENV1(unpackhub)
1305IWMMXT_OP_ENV1(unpackhuw)
1306IWMMXT_OP_ENV1(unpackhul)
1307IWMMXT_OP_ENV1(unpacklsb)
1308IWMMXT_OP_ENV1(unpacklsw)
1309IWMMXT_OP_ENV1(unpacklsl)
1310IWMMXT_OP_ENV1(unpackhsb)
1311IWMMXT_OP_ENV1(unpackhsw)
1312IWMMXT_OP_ENV1(unpackhsl)
1313
1314IWMMXT_OP_ENV_SIZE(cmpeq)
1315IWMMXT_OP_ENV_SIZE(cmpgtu)
1316IWMMXT_OP_ENV_SIZE(cmpgts)
1317
1318IWMMXT_OP_ENV_SIZE(mins)
1319IWMMXT_OP_ENV_SIZE(minu)
1320IWMMXT_OP_ENV_SIZE(maxs)
1321IWMMXT_OP_ENV_SIZE(maxu)
1322
1323IWMMXT_OP_ENV_SIZE(subn)
1324IWMMXT_OP_ENV_SIZE(addn)
1325IWMMXT_OP_ENV_SIZE(subu)
1326IWMMXT_OP_ENV_SIZE(addu)
1327IWMMXT_OP_ENV_SIZE(subs)
1328IWMMXT_OP_ENV_SIZE(adds)
1329
1330IWMMXT_OP_ENV(avgb0)
1331IWMMXT_OP_ENV(avgb1)
1332IWMMXT_OP_ENV(avgw0)
1333IWMMXT_OP_ENV(avgw1)
e677137d
PB
1334
1335IWMMXT_OP(msadb)
1336
477955bd
PM
1337IWMMXT_OP_ENV(packuw)
1338IWMMXT_OP_ENV(packul)
1339IWMMXT_OP_ENV(packuq)
1340IWMMXT_OP_ENV(packsw)
1341IWMMXT_OP_ENV(packsl)
1342IWMMXT_OP_ENV(packsq)
e677137d 1343
e677137d
PB
1344static void gen_op_iwmmxt_set_mup(void)
1345{
39d5492a 1346 TCGv_i32 tmp;
e677137d
PB
1347 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1348 tcg_gen_ori_i32(tmp, tmp, 2);
1349 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1350}
1351
1352static void gen_op_iwmmxt_set_cup(void)
1353{
39d5492a 1354 TCGv_i32 tmp;
e677137d
PB
1355 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1356 tcg_gen_ori_i32(tmp, tmp, 1);
1357 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1358}
1359
1360static void gen_op_iwmmxt_setpsr_nz(void)
1361{
39d5492a 1362 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1363 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1364 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1365}
1366
1367static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1368{
1369 iwmmxt_load_reg(cpu_V1, rn);
86831435 1370 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1371 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1372}
1373
39d5492a
PM
1374static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1375 TCGv_i32 dest)
18c9b560
AZ
1376{
1377 int rd;
1378 uint32_t offset;
39d5492a 1379 TCGv_i32 tmp;
18c9b560
AZ
1380
1381 rd = (insn >> 16) & 0xf;
da6b5335 1382 tmp = load_reg(s, rd);
18c9b560
AZ
1383
1384 offset = (insn & 0xff) << ((insn >> 7) & 2);
1385 if (insn & (1 << 24)) {
1386 /* Pre indexed */
1387 if (insn & (1 << 23))
da6b5335 1388 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1389 else
da6b5335
FN
1390 tcg_gen_addi_i32(tmp, tmp, -offset);
1391 tcg_gen_mov_i32(dest, tmp);
18c9b560 1392 if (insn & (1 << 21))
da6b5335
FN
1393 store_reg(s, rd, tmp);
1394 else
7d1b0095 1395 tcg_temp_free_i32(tmp);
18c9b560
AZ
1396 } else if (insn & (1 << 21)) {
1397 /* Post indexed */
da6b5335 1398 tcg_gen_mov_i32(dest, tmp);
18c9b560 1399 if (insn & (1 << 23))
da6b5335 1400 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1401 else
da6b5335
FN
1402 tcg_gen_addi_i32(tmp, tmp, -offset);
1403 store_reg(s, rd, tmp);
18c9b560
AZ
1404 } else if (!(insn & (1 << 23)))
1405 return 1;
1406 return 0;
1407}
1408
39d5492a 1409static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1410{
1411 int rd = (insn >> 0) & 0xf;
39d5492a 1412 TCGv_i32 tmp;
18c9b560 1413
da6b5335
FN
1414 if (insn & (1 << 8)) {
1415 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1416 return 1;
da6b5335
FN
1417 } else {
1418 tmp = iwmmxt_load_creg(rd);
1419 }
1420 } else {
7d1b0095 1421 tmp = tcg_temp_new_i32();
da6b5335
FN
1422 iwmmxt_load_reg(cpu_V0, rd);
1423 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1424 }
1425 tcg_gen_andi_i32(tmp, tmp, mask);
1426 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1427 tcg_temp_free_i32(tmp);
18c9b560
AZ
1428 return 0;
1429}
1430
a1c7273b 1431/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1432 (ie. an undefined instruction). */
0ecb72a5 1433static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1434{
1435 int rd, wrd;
1436 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1437 TCGv_i32 addr;
1438 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1439
1440 if ((insn & 0x0e000e00) == 0x0c000000) {
1441 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1442 wrd = insn & 0xf;
1443 rdlo = (insn >> 12) & 0xf;
1444 rdhi = (insn >> 16) & 0xf;
1445 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1446 iwmmxt_load_reg(cpu_V0, wrd);
1447 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1448 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1449 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1450 } else { /* TMCRR */
da6b5335
FN
1451 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1452 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1453 gen_op_iwmmxt_set_mup();
1454 }
1455 return 0;
1456 }
1457
1458 wrd = (insn >> 12) & 0xf;
7d1b0095 1459 addr = tcg_temp_new_i32();
da6b5335 1460 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1461 tcg_temp_free_i32(addr);
18c9b560 1462 return 1;
da6b5335 1463 }
18c9b560
AZ
1464 if (insn & ARM_CP_RW_BIT) {
1465 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1466 tmp = tcg_temp_new_i32();
da6b5335
FN
1467 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1468 iwmmxt_store_creg(wrd, tmp);
18c9b560 1469 } else {
e677137d
PB
1470 i = 1;
1471 if (insn & (1 << 8)) {
1472 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1473 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1474 i = 0;
1475 } else { /* WLDRW wRd */
29531141
PM
1476 tmp = tcg_temp_new_i32();
1477 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1478 }
1479 } else {
29531141 1480 tmp = tcg_temp_new_i32();
e677137d 1481 if (insn & (1 << 22)) { /* WLDRH */
29531141 1482 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
e677137d 1483 } else { /* WLDRB */
29531141 1484 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1485 }
1486 }
1487 if (i) {
1488 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1489 tcg_temp_free_i32(tmp);
e677137d 1490 }
18c9b560
AZ
1491 gen_op_iwmmxt_movq_wRn_M0(wrd);
1492 }
1493 } else {
1494 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1495 tmp = iwmmxt_load_creg(wrd);
29531141 1496 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1497 } else {
1498 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1499 tmp = tcg_temp_new_i32();
e677137d
PB
1500 if (insn & (1 << 8)) {
1501 if (insn & (1 << 22)) { /* WSTRD */
da6b5335 1502 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1503 } else { /* WSTRW wRd */
1504 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1505 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
e677137d
PB
1506 }
1507 } else {
1508 if (insn & (1 << 22)) { /* WSTRH */
1509 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1510 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
e677137d
PB
1511 } else { /* WSTRB */
1512 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
29531141 1513 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
e677137d
PB
1514 }
1515 }
18c9b560 1516 }
29531141 1517 tcg_temp_free_i32(tmp);
18c9b560 1518 }
7d1b0095 1519 tcg_temp_free_i32(addr);
18c9b560
AZ
1520 return 0;
1521 }
1522
1523 if ((insn & 0x0f000000) != 0x0e000000)
1524 return 1;
1525
1526 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1527 case 0x000: /* WOR */
1528 wrd = (insn >> 12) & 0xf;
1529 rd0 = (insn >> 0) & 0xf;
1530 rd1 = (insn >> 16) & 0xf;
1531 gen_op_iwmmxt_movq_M0_wRn(rd0);
1532 gen_op_iwmmxt_orq_M0_wRn(rd1);
1533 gen_op_iwmmxt_setpsr_nz();
1534 gen_op_iwmmxt_movq_wRn_M0(wrd);
1535 gen_op_iwmmxt_set_mup();
1536 gen_op_iwmmxt_set_cup();
1537 break;
1538 case 0x011: /* TMCR */
1539 if (insn & 0xf)
1540 return 1;
1541 rd = (insn >> 12) & 0xf;
1542 wrd = (insn >> 16) & 0xf;
1543 switch (wrd) {
1544 case ARM_IWMMXT_wCID:
1545 case ARM_IWMMXT_wCASF:
1546 break;
1547 case ARM_IWMMXT_wCon:
1548 gen_op_iwmmxt_set_cup();
1549 /* Fall through. */
1550 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1551 tmp = iwmmxt_load_creg(wrd);
1552 tmp2 = load_reg(s, rd);
f669df27 1553 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1554 tcg_temp_free_i32(tmp2);
da6b5335 1555 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1556 break;
1557 case ARM_IWMMXT_wCGR0:
1558 case ARM_IWMMXT_wCGR1:
1559 case ARM_IWMMXT_wCGR2:
1560 case ARM_IWMMXT_wCGR3:
1561 gen_op_iwmmxt_set_cup();
da6b5335
FN
1562 tmp = load_reg(s, rd);
1563 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1564 break;
1565 default:
1566 return 1;
1567 }
1568 break;
1569 case 0x100: /* WXOR */
1570 wrd = (insn >> 12) & 0xf;
1571 rd0 = (insn >> 0) & 0xf;
1572 rd1 = (insn >> 16) & 0xf;
1573 gen_op_iwmmxt_movq_M0_wRn(rd0);
1574 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1575 gen_op_iwmmxt_setpsr_nz();
1576 gen_op_iwmmxt_movq_wRn_M0(wrd);
1577 gen_op_iwmmxt_set_mup();
1578 gen_op_iwmmxt_set_cup();
1579 break;
1580 case 0x111: /* TMRC */
1581 if (insn & 0xf)
1582 return 1;
1583 rd = (insn >> 12) & 0xf;
1584 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1585 tmp = iwmmxt_load_creg(wrd);
1586 store_reg(s, rd, tmp);
18c9b560
AZ
1587 break;
1588 case 0x300: /* WANDN */
1589 wrd = (insn >> 12) & 0xf;
1590 rd0 = (insn >> 0) & 0xf;
1591 rd1 = (insn >> 16) & 0xf;
1592 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1593 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1594 gen_op_iwmmxt_andq_M0_wRn(rd1);
1595 gen_op_iwmmxt_setpsr_nz();
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1599 break;
1600 case 0x200: /* WAND */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 0) & 0xf;
1603 rd1 = (insn >> 16) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 gen_op_iwmmxt_andq_M0_wRn(rd1);
1606 gen_op_iwmmxt_setpsr_nz();
1607 gen_op_iwmmxt_movq_wRn_M0(wrd);
1608 gen_op_iwmmxt_set_mup();
1609 gen_op_iwmmxt_set_cup();
1610 break;
1611 case 0x810: case 0xa10: /* WMADD */
1612 wrd = (insn >> 12) & 0xf;
1613 rd0 = (insn >> 0) & 0xf;
1614 rd1 = (insn >> 16) & 0xf;
1615 gen_op_iwmmxt_movq_M0_wRn(rd0);
1616 if (insn & (1 << 21))
1617 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1618 else
1619 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1620 gen_op_iwmmxt_movq_wRn_M0(wrd);
1621 gen_op_iwmmxt_set_mup();
1622 break;
1623 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1624 wrd = (insn >> 12) & 0xf;
1625 rd0 = (insn >> 16) & 0xf;
1626 rd1 = (insn >> 0) & 0xf;
1627 gen_op_iwmmxt_movq_M0_wRn(rd0);
1628 switch ((insn >> 22) & 3) {
1629 case 0:
1630 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1631 break;
1632 case 1:
1633 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1634 break;
1635 case 2:
1636 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1637 break;
1638 case 3:
1639 return 1;
1640 }
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 16) & 0xf;
1648 rd1 = (insn >> 0) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 switch ((insn >> 22) & 3) {
1651 case 0:
1652 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1653 break;
1654 case 1:
1655 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1656 break;
1657 case 2:
1658 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1659 break;
1660 case 3:
1661 return 1;
1662 }
1663 gen_op_iwmmxt_movq_wRn_M0(wrd);
1664 gen_op_iwmmxt_set_mup();
1665 gen_op_iwmmxt_set_cup();
1666 break;
1667 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1668 wrd = (insn >> 12) & 0xf;
1669 rd0 = (insn >> 16) & 0xf;
1670 rd1 = (insn >> 0) & 0xf;
1671 gen_op_iwmmxt_movq_M0_wRn(rd0);
1672 if (insn & (1 << 22))
1673 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1674 else
1675 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1676 if (!(insn & (1 << 20)))
1677 gen_op_iwmmxt_addl_M0_wRn(wrd);
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 break;
1681 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1682 wrd = (insn >> 12) & 0xf;
1683 rd0 = (insn >> 16) & 0xf;
1684 rd1 = (insn >> 0) & 0xf;
1685 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1686 if (insn & (1 << 21)) {
1687 if (insn & (1 << 20))
1688 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1689 else
1690 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1691 } else {
1692 if (insn & (1 << 20))
1693 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1694 else
1695 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1696 }
18c9b560
AZ
1697 gen_op_iwmmxt_movq_wRn_M0(wrd);
1698 gen_op_iwmmxt_set_mup();
1699 break;
1700 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1701 wrd = (insn >> 12) & 0xf;
1702 rd0 = (insn >> 16) & 0xf;
1703 rd1 = (insn >> 0) & 0xf;
1704 gen_op_iwmmxt_movq_M0_wRn(rd0);
1705 if (insn & (1 << 21))
1706 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1707 else
1708 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1709 if (!(insn & (1 << 20))) {
e677137d
PB
1710 iwmmxt_load_reg(cpu_V1, wrd);
1711 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1712 }
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 break;
1716 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
1721 switch ((insn >> 22) & 3) {
1722 case 0:
1723 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1724 break;
1725 case 1:
1726 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1727 break;
1728 case 2:
1729 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1730 break;
1731 case 3:
1732 return 1;
1733 }
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 gen_op_iwmmxt_set_cup();
1737 break;
1738 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1739 wrd = (insn >> 12) & 0xf;
1740 rd0 = (insn >> 16) & 0xf;
1741 rd1 = (insn >> 0) & 0xf;
1742 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1743 if (insn & (1 << 22)) {
1744 if (insn & (1 << 20))
1745 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1746 else
1747 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1748 } else {
1749 if (insn & (1 << 20))
1750 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1751 else
1752 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1753 }
18c9b560
AZ
1754 gen_op_iwmmxt_movq_wRn_M0(wrd);
1755 gen_op_iwmmxt_set_mup();
1756 gen_op_iwmmxt_set_cup();
1757 break;
1758 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1759 wrd = (insn >> 12) & 0xf;
1760 rd0 = (insn >> 16) & 0xf;
1761 rd1 = (insn >> 0) & 0xf;
1762 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1763 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1764 tcg_gen_andi_i32(tmp, tmp, 7);
1765 iwmmxt_load_reg(cpu_V1, rd1);
1766 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1767 tcg_temp_free_i32(tmp);
18c9b560
AZ
1768 gen_op_iwmmxt_movq_wRn_M0(wrd);
1769 gen_op_iwmmxt_set_mup();
1770 break;
1771 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1772 if (((insn >> 6) & 3) == 3)
1773 return 1;
18c9b560
AZ
1774 rd = (insn >> 12) & 0xf;
1775 wrd = (insn >> 16) & 0xf;
da6b5335 1776 tmp = load_reg(s, rd);
18c9b560
AZ
1777 gen_op_iwmmxt_movq_M0_wRn(wrd);
1778 switch ((insn >> 6) & 3) {
1779 case 0:
da6b5335
FN
1780 tmp2 = tcg_const_i32(0xff);
1781 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1782 break;
1783 case 1:
da6b5335
FN
1784 tmp2 = tcg_const_i32(0xffff);
1785 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1786 break;
1787 case 2:
da6b5335
FN
1788 tmp2 = tcg_const_i32(0xffffffff);
1789 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1790 break;
da6b5335 1791 default:
39d5492a
PM
1792 TCGV_UNUSED_I32(tmp2);
1793 TCGV_UNUSED_I32(tmp3);
18c9b560 1794 }
da6b5335 1795 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1796 tcg_temp_free_i32(tmp3);
1797 tcg_temp_free_i32(tmp2);
7d1b0095 1798 tcg_temp_free_i32(tmp);
18c9b560
AZ
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 break;
1802 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1803 rd = (insn >> 12) & 0xf;
1804 wrd = (insn >> 16) & 0xf;
da6b5335 1805 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1806 return 1;
1807 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1808 tmp = tcg_temp_new_i32();
18c9b560
AZ
1809 switch ((insn >> 22) & 3) {
1810 case 0:
da6b5335
FN
1811 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1812 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1813 if (insn & 8) {
1814 tcg_gen_ext8s_i32(tmp, tmp);
1815 } else {
1816 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1817 }
1818 break;
1819 case 1:
da6b5335
FN
1820 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1821 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1822 if (insn & 8) {
1823 tcg_gen_ext16s_i32(tmp, tmp);
1824 } else {
1825 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1826 }
1827 break;
1828 case 2:
da6b5335
FN
1829 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1830 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1831 break;
18c9b560 1832 }
da6b5335 1833 store_reg(s, rd, tmp);
18c9b560
AZ
1834 break;
1835 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1836 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1837 return 1;
da6b5335 1838 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1839 switch ((insn >> 22) & 3) {
1840 case 0:
da6b5335 1841 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1842 break;
1843 case 1:
da6b5335 1844 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1845 break;
1846 case 2:
da6b5335 1847 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1848 break;
18c9b560 1849 }
da6b5335
FN
1850 tcg_gen_shli_i32(tmp, tmp, 28);
1851 gen_set_nzcv(tmp);
7d1b0095 1852 tcg_temp_free_i32(tmp);
18c9b560
AZ
1853 break;
1854 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1855 if (((insn >> 6) & 3) == 3)
1856 return 1;
18c9b560
AZ
1857 rd = (insn >> 12) & 0xf;
1858 wrd = (insn >> 16) & 0xf;
da6b5335 1859 tmp = load_reg(s, rd);
18c9b560
AZ
1860 switch ((insn >> 6) & 3) {
1861 case 0:
da6b5335 1862 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1863 break;
1864 case 1:
da6b5335 1865 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1866 break;
1867 case 2:
da6b5335 1868 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1869 break;
18c9b560 1870 }
7d1b0095 1871 tcg_temp_free_i32(tmp);
18c9b560
AZ
1872 gen_op_iwmmxt_movq_wRn_M0(wrd);
1873 gen_op_iwmmxt_set_mup();
1874 break;
1875 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1876 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1877 return 1;
da6b5335 1878 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1879 tmp2 = tcg_temp_new_i32();
da6b5335 1880 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1881 switch ((insn >> 22) & 3) {
1882 case 0:
1883 for (i = 0; i < 7; i ++) {
da6b5335
FN
1884 tcg_gen_shli_i32(tmp2, tmp2, 4);
1885 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1886 }
1887 break;
1888 case 1:
1889 for (i = 0; i < 3; i ++) {
da6b5335
FN
1890 tcg_gen_shli_i32(tmp2, tmp2, 8);
1891 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1892 }
1893 break;
1894 case 2:
da6b5335
FN
1895 tcg_gen_shli_i32(tmp2, tmp2, 16);
1896 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1897 break;
18c9b560 1898 }
da6b5335 1899 gen_set_nzcv(tmp);
7d1b0095
PM
1900 tcg_temp_free_i32(tmp2);
1901 tcg_temp_free_i32(tmp);
18c9b560
AZ
1902 break;
1903 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1904 wrd = (insn >> 12) & 0xf;
1905 rd0 = (insn >> 16) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 switch ((insn >> 22) & 3) {
1908 case 0:
e677137d 1909 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1910 break;
1911 case 1:
e677137d 1912 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1913 break;
1914 case 2:
e677137d 1915 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1916 break;
1917 case 3:
1918 return 1;
1919 }
1920 gen_op_iwmmxt_movq_wRn_M0(wrd);
1921 gen_op_iwmmxt_set_mup();
1922 break;
1923 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1924 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1925 return 1;
da6b5335 1926 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1927 tmp2 = tcg_temp_new_i32();
da6b5335 1928 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1929 switch ((insn >> 22) & 3) {
1930 case 0:
1931 for (i = 0; i < 7; i ++) {
da6b5335
FN
1932 tcg_gen_shli_i32(tmp2, tmp2, 4);
1933 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1934 }
1935 break;
1936 case 1:
1937 for (i = 0; i < 3; i ++) {
da6b5335
FN
1938 tcg_gen_shli_i32(tmp2, tmp2, 8);
1939 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1940 }
1941 break;
1942 case 2:
da6b5335
FN
1943 tcg_gen_shli_i32(tmp2, tmp2, 16);
1944 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1945 break;
18c9b560 1946 }
da6b5335 1947 gen_set_nzcv(tmp);
7d1b0095
PM
1948 tcg_temp_free_i32(tmp2);
1949 tcg_temp_free_i32(tmp);
18c9b560
AZ
1950 break;
1951 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1952 rd = (insn >> 12) & 0xf;
1953 rd0 = (insn >> 16) & 0xf;
da6b5335 1954 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1955 return 1;
1956 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1957 tmp = tcg_temp_new_i32();
18c9b560
AZ
1958 switch ((insn >> 22) & 3) {
1959 case 0:
da6b5335 1960 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1961 break;
1962 case 1:
da6b5335 1963 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1964 break;
1965 case 2:
da6b5335 1966 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1967 break;
18c9b560 1968 }
da6b5335 1969 store_reg(s, rd, tmp);
18c9b560
AZ
1970 break;
1971 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1972 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1973 wrd = (insn >> 12) & 0xf;
1974 rd0 = (insn >> 16) & 0xf;
1975 rd1 = (insn >> 0) & 0xf;
1976 gen_op_iwmmxt_movq_M0_wRn(rd0);
1977 switch ((insn >> 22) & 3) {
1978 case 0:
1979 if (insn & (1 << 21))
1980 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1981 else
1982 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1983 break;
1984 case 1:
1985 if (insn & (1 << 21))
1986 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1987 else
1988 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1989 break;
1990 case 2:
1991 if (insn & (1 << 21))
1992 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1993 else
1994 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1995 break;
1996 case 3:
1997 return 1;
1998 }
1999 gen_op_iwmmxt_movq_wRn_M0(wrd);
2000 gen_op_iwmmxt_set_mup();
2001 gen_op_iwmmxt_set_cup();
2002 break;
2003 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2004 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2005 wrd = (insn >> 12) & 0xf;
2006 rd0 = (insn >> 16) & 0xf;
2007 gen_op_iwmmxt_movq_M0_wRn(rd0);
2008 switch ((insn >> 22) & 3) {
2009 case 0:
2010 if (insn & (1 << 21))
2011 gen_op_iwmmxt_unpacklsb_M0();
2012 else
2013 gen_op_iwmmxt_unpacklub_M0();
2014 break;
2015 case 1:
2016 if (insn & (1 << 21))
2017 gen_op_iwmmxt_unpacklsw_M0();
2018 else
2019 gen_op_iwmmxt_unpackluw_M0();
2020 break;
2021 case 2:
2022 if (insn & (1 << 21))
2023 gen_op_iwmmxt_unpacklsl_M0();
2024 else
2025 gen_op_iwmmxt_unpacklul_M0();
2026 break;
2027 case 3:
2028 return 1;
2029 }
2030 gen_op_iwmmxt_movq_wRn_M0(wrd);
2031 gen_op_iwmmxt_set_mup();
2032 gen_op_iwmmxt_set_cup();
2033 break;
2034 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2035 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2036 wrd = (insn >> 12) & 0xf;
2037 rd0 = (insn >> 16) & 0xf;
2038 gen_op_iwmmxt_movq_M0_wRn(rd0);
2039 switch ((insn >> 22) & 3) {
2040 case 0:
2041 if (insn & (1 << 21))
2042 gen_op_iwmmxt_unpackhsb_M0();
2043 else
2044 gen_op_iwmmxt_unpackhub_M0();
2045 break;
2046 case 1:
2047 if (insn & (1 << 21))
2048 gen_op_iwmmxt_unpackhsw_M0();
2049 else
2050 gen_op_iwmmxt_unpackhuw_M0();
2051 break;
2052 case 2:
2053 if (insn & (1 << 21))
2054 gen_op_iwmmxt_unpackhsl_M0();
2055 else
2056 gen_op_iwmmxt_unpackhul_M0();
2057 break;
2058 case 3:
2059 return 1;
2060 }
2061 gen_op_iwmmxt_movq_wRn_M0(wrd);
2062 gen_op_iwmmxt_set_mup();
2063 gen_op_iwmmxt_set_cup();
2064 break;
2065 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2066 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2067 if (((insn >> 22) & 3) == 0)
2068 return 1;
18c9b560
AZ
2069 wrd = (insn >> 12) & 0xf;
2070 rd0 = (insn >> 16) & 0xf;
2071 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2072 tmp = tcg_temp_new_i32();
da6b5335 2073 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2074 tcg_temp_free_i32(tmp);
18c9b560 2075 return 1;
da6b5335 2076 }
18c9b560 2077 switch ((insn >> 22) & 3) {
18c9b560 2078 case 1:
477955bd 2079 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2080 break;
2081 case 2:
477955bd 2082 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2083 break;
2084 case 3:
477955bd 2085 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2086 break;
2087 }
7d1b0095 2088 tcg_temp_free_i32(tmp);
18c9b560
AZ
2089 gen_op_iwmmxt_movq_wRn_M0(wrd);
2090 gen_op_iwmmxt_set_mup();
2091 gen_op_iwmmxt_set_cup();
2092 break;
2093 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2094 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2095 if (((insn >> 22) & 3) == 0)
2096 return 1;
18c9b560
AZ
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2100 tmp = tcg_temp_new_i32();
da6b5335 2101 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2102 tcg_temp_free_i32(tmp);
18c9b560 2103 return 1;
da6b5335 2104 }
18c9b560 2105 switch ((insn >> 22) & 3) {
18c9b560 2106 case 1:
477955bd 2107 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2108 break;
2109 case 2:
477955bd 2110 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2111 break;
2112 case 3:
477955bd 2113 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2114 break;
2115 }
7d1b0095 2116 tcg_temp_free_i32(tmp);
18c9b560
AZ
2117 gen_op_iwmmxt_movq_wRn_M0(wrd);
2118 gen_op_iwmmxt_set_mup();
2119 gen_op_iwmmxt_set_cup();
2120 break;
2121 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2122 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2123 if (((insn >> 22) & 3) == 0)
2124 return 1;
18c9b560
AZ
2125 wrd = (insn >> 12) & 0xf;
2126 rd0 = (insn >> 16) & 0xf;
2127 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2128 tmp = tcg_temp_new_i32();
da6b5335 2129 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2130 tcg_temp_free_i32(tmp);
18c9b560 2131 return 1;
da6b5335 2132 }
18c9b560 2133 switch ((insn >> 22) & 3) {
18c9b560 2134 case 1:
477955bd 2135 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2136 break;
2137 case 2:
477955bd 2138 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2139 break;
2140 case 3:
477955bd 2141 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2142 break;
2143 }
7d1b0095 2144 tcg_temp_free_i32(tmp);
18c9b560
AZ
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 gen_op_iwmmxt_set_cup();
2148 break;
2149 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2150 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2151 if (((insn >> 22) & 3) == 0)
2152 return 1;
18c9b560
AZ
2153 wrd = (insn >> 12) & 0xf;
2154 rd0 = (insn >> 16) & 0xf;
2155 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2156 tmp = tcg_temp_new_i32();
18c9b560 2157 switch ((insn >> 22) & 3) {
18c9b560 2158 case 1:
da6b5335 2159 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2160 tcg_temp_free_i32(tmp);
18c9b560 2161 return 1;
da6b5335 2162 }
477955bd 2163 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2164 break;
2165 case 2:
da6b5335 2166 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2167 tcg_temp_free_i32(tmp);
18c9b560 2168 return 1;
da6b5335 2169 }
477955bd 2170 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2171 break;
2172 case 3:
da6b5335 2173 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2174 tcg_temp_free_i32(tmp);
18c9b560 2175 return 1;
da6b5335 2176 }
477955bd 2177 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2178 break;
2179 }
7d1b0095 2180 tcg_temp_free_i32(tmp);
18c9b560
AZ
2181 gen_op_iwmmxt_movq_wRn_M0(wrd);
2182 gen_op_iwmmxt_set_mup();
2183 gen_op_iwmmxt_set_cup();
2184 break;
2185 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2186 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2187 wrd = (insn >> 12) & 0xf;
2188 rd0 = (insn >> 16) & 0xf;
2189 rd1 = (insn >> 0) & 0xf;
2190 gen_op_iwmmxt_movq_M0_wRn(rd0);
2191 switch ((insn >> 22) & 3) {
2192 case 0:
2193 if (insn & (1 << 21))
2194 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2195 else
2196 gen_op_iwmmxt_minub_M0_wRn(rd1);
2197 break;
2198 case 1:
2199 if (insn & (1 << 21))
2200 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2201 else
2202 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2203 break;
2204 case 2:
2205 if (insn & (1 << 21))
2206 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2207 else
2208 gen_op_iwmmxt_minul_M0_wRn(rd1);
2209 break;
2210 case 3:
2211 return 1;
2212 }
2213 gen_op_iwmmxt_movq_wRn_M0(wrd);
2214 gen_op_iwmmxt_set_mup();
2215 break;
2216 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2217 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2218 wrd = (insn >> 12) & 0xf;
2219 rd0 = (insn >> 16) & 0xf;
2220 rd1 = (insn >> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0);
2222 switch ((insn >> 22) & 3) {
2223 case 0:
2224 if (insn & (1 << 21))
2225 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2228 break;
2229 case 1:
2230 if (insn & (1 << 21))
2231 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2232 else
2233 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2234 break;
2235 case 2:
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2238 else
2239 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2240 break;
2241 case 3:
2242 return 1;
2243 }
2244 gen_op_iwmmxt_movq_wRn_M0(wrd);
2245 gen_op_iwmmxt_set_mup();
2246 break;
2247 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2248 case 0x402: case 0x502: case 0x602: case 0x702:
2249 wrd = (insn >> 12) & 0xf;
2250 rd0 = (insn >> 16) & 0xf;
2251 rd1 = (insn >> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2253 tmp = tcg_const_i32((insn >> 20) & 3);
2254 iwmmxt_load_reg(cpu_V1, rd1);
2255 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2256 tcg_temp_free_i32(tmp);
18c9b560
AZ
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 break;
2260 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2261 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2262 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2263 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2264 wrd = (insn >> 12) & 0xf;
2265 rd0 = (insn >> 16) & 0xf;
2266 rd1 = (insn >> 0) & 0xf;
2267 gen_op_iwmmxt_movq_M0_wRn(rd0);
2268 switch ((insn >> 20) & 0xf) {
2269 case 0x0:
2270 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2271 break;
2272 case 0x1:
2273 gen_op_iwmmxt_subub_M0_wRn(rd1);
2274 break;
2275 case 0x3:
2276 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2277 break;
2278 case 0x4:
2279 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2280 break;
2281 case 0x5:
2282 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2283 break;
2284 case 0x7:
2285 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2286 break;
2287 case 0x8:
2288 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2289 break;
2290 case 0x9:
2291 gen_op_iwmmxt_subul_M0_wRn(rd1);
2292 break;
2293 case 0xb:
2294 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2295 break;
2296 default:
2297 return 1;
2298 }
2299 gen_op_iwmmxt_movq_wRn_M0(wrd);
2300 gen_op_iwmmxt_set_mup();
2301 gen_op_iwmmxt_set_cup();
2302 break;
2303 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2304 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2305 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2306 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2307 wrd = (insn >> 12) & 0xf;
2308 rd0 = (insn >> 16) & 0xf;
2309 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2310 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2311 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2312 tcg_temp_free_i32(tmp);
18c9b560
AZ
2313 gen_op_iwmmxt_movq_wRn_M0(wrd);
2314 gen_op_iwmmxt_set_mup();
2315 gen_op_iwmmxt_set_cup();
2316 break;
2317 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2318 case 0x418: case 0x518: case 0x618: case 0x718:
2319 case 0x818: case 0x918: case 0xa18: case 0xb18:
2320 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2321 wrd = (insn >> 12) & 0xf;
2322 rd0 = (insn >> 16) & 0xf;
2323 rd1 = (insn >> 0) & 0xf;
2324 gen_op_iwmmxt_movq_M0_wRn(rd0);
2325 switch ((insn >> 20) & 0xf) {
2326 case 0x0:
2327 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2328 break;
2329 case 0x1:
2330 gen_op_iwmmxt_addub_M0_wRn(rd1);
2331 break;
2332 case 0x3:
2333 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2334 break;
2335 case 0x4:
2336 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2337 break;
2338 case 0x5:
2339 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2340 break;
2341 case 0x7:
2342 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2343 break;
2344 case 0x8:
2345 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2346 break;
2347 case 0x9:
2348 gen_op_iwmmxt_addul_M0_wRn(rd1);
2349 break;
2350 case 0xb:
2351 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2352 break;
2353 default:
2354 return 1;
2355 }
2356 gen_op_iwmmxt_movq_wRn_M0(wrd);
2357 gen_op_iwmmxt_set_mup();
2358 gen_op_iwmmxt_set_cup();
2359 break;
2360 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2361 case 0x408: case 0x508: case 0x608: case 0x708:
2362 case 0x808: case 0x908: case 0xa08: case 0xb08:
2363 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2364 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2365 return 1;
18c9b560
AZ
2366 wrd = (insn >> 12) & 0xf;
2367 rd0 = (insn >> 16) & 0xf;
2368 rd1 = (insn >> 0) & 0xf;
2369 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2370 switch ((insn >> 22) & 3) {
18c9b560
AZ
2371 case 1:
2372 if (insn & (1 << 21))
2373 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2374 else
2375 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2376 break;
2377 case 2:
2378 if (insn & (1 << 21))
2379 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2380 else
2381 gen_op_iwmmxt_packul_M0_wRn(rd1);
2382 break;
2383 case 3:
2384 if (insn & (1 << 21))
2385 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2386 else
2387 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2388 break;
2389 }
2390 gen_op_iwmmxt_movq_wRn_M0(wrd);
2391 gen_op_iwmmxt_set_mup();
2392 gen_op_iwmmxt_set_cup();
2393 break;
2394 case 0x201: case 0x203: case 0x205: case 0x207:
2395 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2396 case 0x211: case 0x213: case 0x215: case 0x217:
2397 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2398 wrd = (insn >> 5) & 0xf;
2399 rd0 = (insn >> 12) & 0xf;
2400 rd1 = (insn >> 0) & 0xf;
2401 if (rd0 == 0xf || rd1 == 0xf)
2402 return 1;
2403 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2404 tmp = load_reg(s, rd0);
2405 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2406 switch ((insn >> 16) & 0xf) {
2407 case 0x0: /* TMIA */
da6b5335 2408 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2409 break;
2410 case 0x8: /* TMIAPH */
da6b5335 2411 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2412 break;
2413 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2414 if (insn & (1 << 16))
da6b5335 2415 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2416 if (insn & (1 << 17))
da6b5335
FN
2417 tcg_gen_shri_i32(tmp2, tmp2, 16);
2418 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2419 break;
2420 default:
7d1b0095
PM
2421 tcg_temp_free_i32(tmp2);
2422 tcg_temp_free_i32(tmp);
18c9b560
AZ
2423 return 1;
2424 }
7d1b0095
PM
2425 tcg_temp_free_i32(tmp2);
2426 tcg_temp_free_i32(tmp);
18c9b560
AZ
2427 gen_op_iwmmxt_movq_wRn_M0(wrd);
2428 gen_op_iwmmxt_set_mup();
2429 break;
2430 default:
2431 return 1;
2432 }
2433
2434 return 0;
2435}
2436
a1c7273b 2437/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2438 (ie. an undefined instruction). */
0ecb72a5 2439static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2440{
2441 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2442 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2443
2444 if ((insn & 0x0ff00f10) == 0x0e200010) {
2445 /* Multiply with Internal Accumulate Format */
2446 rd0 = (insn >> 12) & 0xf;
2447 rd1 = insn & 0xf;
2448 acc = (insn >> 5) & 7;
2449
2450 if (acc != 0)
2451 return 1;
2452
3a554c0f
FN
2453 tmp = load_reg(s, rd0);
2454 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2455 switch ((insn >> 16) & 0xf) {
2456 case 0x0: /* MIA */
3a554c0f 2457 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2458 break;
2459 case 0x8: /* MIAPH */
3a554c0f 2460 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2461 break;
2462 case 0xc: /* MIABB */
2463 case 0xd: /* MIABT */
2464 case 0xe: /* MIATB */
2465 case 0xf: /* MIATT */
18c9b560 2466 if (insn & (1 << 16))
3a554c0f 2467 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2468 if (insn & (1 << 17))
3a554c0f
FN
2469 tcg_gen_shri_i32(tmp2, tmp2, 16);
2470 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2471 break;
2472 default:
2473 return 1;
2474 }
7d1b0095
PM
2475 tcg_temp_free_i32(tmp2);
2476 tcg_temp_free_i32(tmp);
18c9b560
AZ
2477
2478 gen_op_iwmmxt_movq_wRn_M0(acc);
2479 return 0;
2480 }
2481
2482 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2483 /* Internal Accumulator Access Format */
2484 rdhi = (insn >> 16) & 0xf;
2485 rdlo = (insn >> 12) & 0xf;
2486 acc = insn & 7;
2487
2488 if (acc != 0)
2489 return 1;
2490
2491 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2492 iwmmxt_load_reg(cpu_V0, acc);
2493 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2494 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2495 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2496 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2497 } else { /* MAR */
3a554c0f
FN
2498 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2499 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2500 }
2501 return 0;
2502 }
2503
2504 return 1;
2505}
2506
9ee6e8bb
PB
2507#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2508#define VFP_SREG(insn, bigbit, smallbit) \
2509 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2510#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2511 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2512 reg = (((insn) >> (bigbit)) & 0x0f) \
2513 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2514 } else { \
2515 if (insn & (1 << (smallbit))) \
2516 return 1; \
2517 reg = ((insn) >> (bigbit)) & 0x0f; \
2518 }} while (0)
2519
2520#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2521#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2522#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2523#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2524#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2525#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2526
4373f3ce 2527/* Move between integer and VFP cores. */
39d5492a 2528static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2529{
39d5492a 2530 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2531 tcg_gen_mov_i32(tmp, cpu_F0s);
2532 return tmp;
2533}
2534
39d5492a 2535static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2536{
2537 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2538 tcg_temp_free_i32(tmp);
4373f3ce
PB
2539}
2540
39d5492a 2541static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2542{
39d5492a 2543 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2544 if (shift)
2545 tcg_gen_shri_i32(var, var, shift);
86831435 2546 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2547 tcg_gen_shli_i32(tmp, var, 8);
2548 tcg_gen_or_i32(var, var, tmp);
2549 tcg_gen_shli_i32(tmp, var, 16);
2550 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2551 tcg_temp_free_i32(tmp);
ad69471c
PB
2552}
2553
39d5492a 2554static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2555{
39d5492a 2556 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2557 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2558 tcg_gen_shli_i32(tmp, var, 16);
2559 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2560 tcg_temp_free_i32(tmp);
ad69471c
PB
2561}
2562
39d5492a 2563static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2564{
39d5492a 2565 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2566 tcg_gen_andi_i32(var, var, 0xffff0000);
2567 tcg_gen_shri_i32(tmp, var, 16);
2568 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2569 tcg_temp_free_i32(tmp);
ad69471c
PB
2570}
2571
39d5492a 2572static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2573{
2574 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2575 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2576 switch (size) {
2577 case 0:
58ab8e96 2578 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2579 gen_neon_dup_u8(tmp, 0);
2580 break;
2581 case 1:
58ab8e96 2582 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2583 gen_neon_dup_low16(tmp);
2584 break;
2585 case 2:
58ab8e96 2586 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2587 break;
2588 default: /* Avoid compiler warnings. */
2589 abort();
2590 }
2591 return tmp;
2592}
2593
a1c7273b 2594/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2595 (ie. an undefined instruction). */
0ecb72a5 2596static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2597{
2598 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2599 int dp, veclen;
39d5492a
PM
2600 TCGv_i32 addr;
2601 TCGv_i32 tmp;
2602 TCGv_i32 tmp2;
b7bcbe95 2603
40f137e1
PB
2604 if (!arm_feature(env, ARM_FEATURE_VFP))
2605 return 1;
2606
5df8bac1 2607 if (!s->vfp_enabled) {
9ee6e8bb 2608 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2609 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2610 return 1;
2611 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2612 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2613 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2614 return 1;
2615 }
b7bcbe95
FB
2616 dp = ((insn & 0xf00) == 0xb00);
2617 switch ((insn >> 24) & 0xf) {
2618 case 0xe:
2619 if (insn & (1 << 4)) {
2620 /* single register transfer */
b7bcbe95
FB
2621 rd = (insn >> 12) & 0xf;
2622 if (dp) {
9ee6e8bb
PB
2623 int size;
2624 int pass;
2625
2626 VFP_DREG_N(rn, insn);
2627 if (insn & 0xf)
b7bcbe95 2628 return 1;
9ee6e8bb
PB
2629 if (insn & 0x00c00060
2630 && !arm_feature(env, ARM_FEATURE_NEON))
2631 return 1;
2632
2633 pass = (insn >> 21) & 1;
2634 if (insn & (1 << 22)) {
2635 size = 0;
2636 offset = ((insn >> 5) & 3) * 8;
2637 } else if (insn & (1 << 5)) {
2638 size = 1;
2639 offset = (insn & (1 << 6)) ? 16 : 0;
2640 } else {
2641 size = 2;
2642 offset = 0;
2643 }
18c9b560 2644 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2645 /* vfp->arm */
ad69471c 2646 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2647 switch (size) {
2648 case 0:
9ee6e8bb 2649 if (offset)
ad69471c 2650 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2651 if (insn & (1 << 23))
ad69471c 2652 gen_uxtb(tmp);
9ee6e8bb 2653 else
ad69471c 2654 gen_sxtb(tmp);
9ee6e8bb
PB
2655 break;
2656 case 1:
9ee6e8bb
PB
2657 if (insn & (1 << 23)) {
2658 if (offset) {
ad69471c 2659 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2660 } else {
ad69471c 2661 gen_uxth(tmp);
9ee6e8bb
PB
2662 }
2663 } else {
2664 if (offset) {
ad69471c 2665 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2666 } else {
ad69471c 2667 gen_sxth(tmp);
9ee6e8bb
PB
2668 }
2669 }
2670 break;
2671 case 2:
9ee6e8bb
PB
2672 break;
2673 }
ad69471c 2674 store_reg(s, rd, tmp);
b7bcbe95
FB
2675 } else {
2676 /* arm->vfp */
ad69471c 2677 tmp = load_reg(s, rd);
9ee6e8bb
PB
2678 if (insn & (1 << 23)) {
2679 /* VDUP */
2680 if (size == 0) {
ad69471c 2681 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2682 } else if (size == 1) {
ad69471c 2683 gen_neon_dup_low16(tmp);
9ee6e8bb 2684 }
cbbccffc 2685 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2686 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2687 tcg_gen_mov_i32(tmp2, tmp);
2688 neon_store_reg(rn, n, tmp2);
2689 }
2690 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2691 } else {
2692 /* VMOV */
2693 switch (size) {
2694 case 0:
ad69471c 2695 tmp2 = neon_load_reg(rn, pass);
d593c48e 2696 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2697 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2698 break;
2699 case 1:
ad69471c 2700 tmp2 = neon_load_reg(rn, pass);
d593c48e 2701 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2702 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2703 break;
2704 case 2:
9ee6e8bb
PB
2705 break;
2706 }
ad69471c 2707 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2708 }
b7bcbe95 2709 }
9ee6e8bb
PB
2710 } else { /* !dp */
2711 if ((insn & 0x6f) != 0x00)
2712 return 1;
2713 rn = VFP_SREG_N(insn);
18c9b560 2714 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2715 /* vfp->arm */
2716 if (insn & (1 << 21)) {
2717 /* system register */
40f137e1 2718 rn >>= 1;
9ee6e8bb 2719
b7bcbe95 2720 switch (rn) {
40f137e1 2721 case ARM_VFP_FPSID:
4373f3ce 2722 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2723 VFP3 restricts all id registers to privileged
2724 accesses. */
2725 if (IS_USER(s)
2726 && arm_feature(env, ARM_FEATURE_VFP3))
2727 return 1;
4373f3ce 2728 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2729 break;
40f137e1 2730 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2731 if (IS_USER(s))
2732 return 1;
4373f3ce 2733 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2734 break;
40f137e1
PB
2735 case ARM_VFP_FPINST:
2736 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2737 /* Not present in VFP3. */
2738 if (IS_USER(s)
2739 || arm_feature(env, ARM_FEATURE_VFP3))
2740 return 1;
4373f3ce 2741 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2742 break;
40f137e1 2743 case ARM_VFP_FPSCR:
601d70b9 2744 if (rd == 15) {
4373f3ce
PB
2745 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2746 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2747 } else {
7d1b0095 2748 tmp = tcg_temp_new_i32();
4373f3ce
PB
2749 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2750 }
b7bcbe95 2751 break;
9ee6e8bb
PB
2752 case ARM_VFP_MVFR0:
2753 case ARM_VFP_MVFR1:
2754 if (IS_USER(s)
06ed5d66 2755 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2756 return 1;
4373f3ce 2757 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2758 break;
b7bcbe95
FB
2759 default:
2760 return 1;
2761 }
2762 } else {
2763 gen_mov_F0_vreg(0, rn);
4373f3ce 2764 tmp = gen_vfp_mrs();
b7bcbe95
FB
2765 }
2766 if (rd == 15) {
b5ff1b31 2767 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2768 gen_set_nzcv(tmp);
7d1b0095 2769 tcg_temp_free_i32(tmp);
4373f3ce
PB
2770 } else {
2771 store_reg(s, rd, tmp);
2772 }
b7bcbe95
FB
2773 } else {
2774 /* arm->vfp */
b7bcbe95 2775 if (insn & (1 << 21)) {
40f137e1 2776 rn >>= 1;
b7bcbe95
FB
2777 /* system register */
2778 switch (rn) {
40f137e1 2779 case ARM_VFP_FPSID:
9ee6e8bb
PB
2780 case ARM_VFP_MVFR0:
2781 case ARM_VFP_MVFR1:
b7bcbe95
FB
2782 /* Writes are ignored. */
2783 break;
40f137e1 2784 case ARM_VFP_FPSCR:
e4c1cfa5 2785 tmp = load_reg(s, rd);
4373f3ce 2786 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2787 tcg_temp_free_i32(tmp);
b5ff1b31 2788 gen_lookup_tb(s);
b7bcbe95 2789 break;
40f137e1 2790 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2791 if (IS_USER(s))
2792 return 1;
71b3c3de
JR
2793 /* TODO: VFP subarchitecture support.
2794 * For now, keep the EN bit only */
e4c1cfa5 2795 tmp = load_reg(s, rd);
71b3c3de 2796 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2797 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2798 gen_lookup_tb(s);
2799 break;
2800 case ARM_VFP_FPINST:
2801 case ARM_VFP_FPINST2:
e4c1cfa5 2802 tmp = load_reg(s, rd);
4373f3ce 2803 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2804 break;
b7bcbe95
FB
2805 default:
2806 return 1;
2807 }
2808 } else {
e4c1cfa5 2809 tmp = load_reg(s, rd);
4373f3ce 2810 gen_vfp_msr(tmp);
b7bcbe95
FB
2811 gen_mov_vreg_F0(0, rn);
2812 }
2813 }
2814 }
2815 } else {
2816 /* data processing */
2817 /* The opcode is in bits 23, 21, 20 and 6. */
2818 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2819 if (dp) {
2820 if (op == 15) {
2821 /* rn is opcode */
2822 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2823 } else {
2824 /* rn is register number */
9ee6e8bb 2825 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2826 }
2827
04595bf6 2828 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2829 /* Integer or single precision destination. */
9ee6e8bb 2830 rd = VFP_SREG_D(insn);
b7bcbe95 2831 } else {
9ee6e8bb 2832 VFP_DREG_D(rd, insn);
b7bcbe95 2833 }
04595bf6
PM
2834 if (op == 15 &&
2835 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2836 /* VCVT from int is always from S reg regardless of dp bit.
2837 * VCVT with immediate frac_bits has same format as SREG_M
2838 */
2839 rm = VFP_SREG_M(insn);
b7bcbe95 2840 } else {
9ee6e8bb 2841 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2842 }
2843 } else {
9ee6e8bb 2844 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2845 if (op == 15 && rn == 15) {
2846 /* Double precision destination. */
9ee6e8bb
PB
2847 VFP_DREG_D(rd, insn);
2848 } else {
2849 rd = VFP_SREG_D(insn);
2850 }
04595bf6
PM
2851 /* NB that we implicitly rely on the encoding for the frac_bits
2852 * in VCVT of fixed to float being the same as that of an SREG_M
2853 */
9ee6e8bb 2854 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2855 }
2856
69d1fc22 2857 veclen = s->vec_len;
b7bcbe95
FB
2858 if (op == 15 && rn > 3)
2859 veclen = 0;
2860
2861 /* Shut up compiler warnings. */
2862 delta_m = 0;
2863 delta_d = 0;
2864 bank_mask = 0;
3b46e624 2865
b7bcbe95
FB
2866 if (veclen > 0) {
2867 if (dp)
2868 bank_mask = 0xc;
2869 else
2870 bank_mask = 0x18;
2871
2872 /* Figure out what type of vector operation this is. */
2873 if ((rd & bank_mask) == 0) {
2874 /* scalar */
2875 veclen = 0;
2876 } else {
2877 if (dp)
69d1fc22 2878 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2879 else
69d1fc22 2880 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2881
2882 if ((rm & bank_mask) == 0) {
2883 /* mixed scalar/vector */
2884 delta_m = 0;
2885 } else {
2886 /* vector */
2887 delta_m = delta_d;
2888 }
2889 }
2890 }
2891
2892 /* Load the initial operands. */
2893 if (op == 15) {
2894 switch (rn) {
2895 case 16:
2896 case 17:
2897 /* Integer source */
2898 gen_mov_F0_vreg(0, rm);
2899 break;
2900 case 8:
2901 case 9:
2902 /* Compare */
2903 gen_mov_F0_vreg(dp, rd);
2904 gen_mov_F1_vreg(dp, rm);
2905 break;
2906 case 10:
2907 case 11:
2908 /* Compare with zero */
2909 gen_mov_F0_vreg(dp, rd);
2910 gen_vfp_F1_ld0(dp);
2911 break;
9ee6e8bb
PB
2912 case 20:
2913 case 21:
2914 case 22:
2915 case 23:
644ad806
PB
2916 case 28:
2917 case 29:
2918 case 30:
2919 case 31:
9ee6e8bb
PB
2920 /* Source and destination the same. */
2921 gen_mov_F0_vreg(dp, rd);
2922 break;
6e0c0ed1
PM
2923 case 4:
2924 case 5:
2925 case 6:
2926 case 7:
2927 /* VCVTB, VCVTT: only present with the halfprec extension,
2928 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2929 */
2930 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2931 return 1;
2932 }
2933 /* Otherwise fall through */
b7bcbe95
FB
2934 default:
2935 /* One source operand. */
2936 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2937 break;
b7bcbe95
FB
2938 }
2939 } else {
2940 /* Two source operands. */
2941 gen_mov_F0_vreg(dp, rn);
2942 gen_mov_F1_vreg(dp, rm);
2943 }
2944
2945 for (;;) {
2946 /* Perform the calculation. */
2947 switch (op) {
605a6aed
PM
2948 case 0: /* VMLA: fd + (fn * fm) */
2949 /* Note that order of inputs to the add matters for NaNs */
2950 gen_vfp_F1_mul(dp);
2951 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2952 gen_vfp_add(dp);
2953 break;
605a6aed 2954 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2955 gen_vfp_mul(dp);
605a6aed
PM
2956 gen_vfp_F1_neg(dp);
2957 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2958 gen_vfp_add(dp);
2959 break;
605a6aed
PM
2960 case 2: /* VNMLS: -fd + (fn * fm) */
2961 /* Note that it isn't valid to replace (-A + B) with (B - A)
2962 * or similar plausible looking simplifications
2963 * because this will give wrong results for NaNs.
2964 */
2965 gen_vfp_F1_mul(dp);
2966 gen_mov_F0_vreg(dp, rd);
2967 gen_vfp_neg(dp);
2968 gen_vfp_add(dp);
b7bcbe95 2969 break;
605a6aed 2970 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2971 gen_vfp_mul(dp);
605a6aed
PM
2972 gen_vfp_F1_neg(dp);
2973 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2974 gen_vfp_neg(dp);
605a6aed 2975 gen_vfp_add(dp);
b7bcbe95
FB
2976 break;
2977 case 4: /* mul: fn * fm */
2978 gen_vfp_mul(dp);
2979 break;
2980 case 5: /* nmul: -(fn * fm) */
2981 gen_vfp_mul(dp);
2982 gen_vfp_neg(dp);
2983 break;
2984 case 6: /* add: fn + fm */
2985 gen_vfp_add(dp);
2986 break;
2987 case 7: /* sub: fn - fm */
2988 gen_vfp_sub(dp);
2989 break;
2990 case 8: /* div: fn / fm */
2991 gen_vfp_div(dp);
2992 break;
da97f52c
PM
2993 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
2994 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
2995 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
2996 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
2997 /* These are fused multiply-add, and must be done as one
2998 * floating point operation with no rounding between the
2999 * multiplication and addition steps.
3000 * NB that doing the negations here as separate steps is
3001 * correct : an input NaN should come out with its sign bit
3002 * flipped if it is a negated-input.
3003 */
3004 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3005 return 1;
3006 }
3007 if (dp) {
3008 TCGv_ptr fpst;
3009 TCGv_i64 frd;
3010 if (op & 1) {
3011 /* VFNMS, VFMS */
3012 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3013 }
3014 frd = tcg_temp_new_i64();
3015 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3016 if (op & 2) {
3017 /* VFNMA, VFNMS */
3018 gen_helper_vfp_negd(frd, frd);
3019 }
3020 fpst = get_fpstatus_ptr(0);
3021 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3022 cpu_F1d, frd, fpst);
3023 tcg_temp_free_ptr(fpst);
3024 tcg_temp_free_i64(frd);
3025 } else {
3026 TCGv_ptr fpst;
3027 TCGv_i32 frd;
3028 if (op & 1) {
3029 /* VFNMS, VFMS */
3030 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3031 }
3032 frd = tcg_temp_new_i32();
3033 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3034 if (op & 2) {
3035 gen_helper_vfp_negs(frd, frd);
3036 }
3037 fpst = get_fpstatus_ptr(0);
3038 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3039 cpu_F1s, frd, fpst);
3040 tcg_temp_free_ptr(fpst);
3041 tcg_temp_free_i32(frd);
3042 }
3043 break;
9ee6e8bb
PB
3044 case 14: /* fconst */
3045 if (!arm_feature(env, ARM_FEATURE_VFP3))
3046 return 1;
3047
3048 n = (insn << 12) & 0x80000000;
3049 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3050 if (dp) {
3051 if (i & 0x40)
3052 i |= 0x3f80;
3053 else
3054 i |= 0x4000;
3055 n |= i << 16;
4373f3ce 3056 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3057 } else {
3058 if (i & 0x40)
3059 i |= 0x780;
3060 else
3061 i |= 0x800;
3062 n |= i << 19;
5b340b51 3063 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3064 }
9ee6e8bb 3065 break;
b7bcbe95
FB
3066 case 15: /* extension space */
3067 switch (rn) {
3068 case 0: /* cpy */
3069 /* no-op */
3070 break;
3071 case 1: /* abs */
3072 gen_vfp_abs(dp);
3073 break;
3074 case 2: /* neg */
3075 gen_vfp_neg(dp);
3076 break;
3077 case 3: /* sqrt */
3078 gen_vfp_sqrt(dp);
3079 break;
60011498 3080 case 4: /* vcvtb.f32.f16 */
60011498
PB
3081 tmp = gen_vfp_mrs();
3082 tcg_gen_ext16u_i32(tmp, tmp);
3083 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3084 tcg_temp_free_i32(tmp);
60011498
PB
3085 break;
3086 case 5: /* vcvtt.f32.f16 */
60011498
PB
3087 tmp = gen_vfp_mrs();
3088 tcg_gen_shri_i32(tmp, tmp, 16);
3089 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3090 tcg_temp_free_i32(tmp);
60011498
PB
3091 break;
3092 case 6: /* vcvtb.f16.f32 */
7d1b0095 3093 tmp = tcg_temp_new_i32();
60011498
PB
3094 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3095 gen_mov_F0_vreg(0, rd);
3096 tmp2 = gen_vfp_mrs();
3097 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3098 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3099 tcg_temp_free_i32(tmp2);
60011498
PB
3100 gen_vfp_msr(tmp);
3101 break;
3102 case 7: /* vcvtt.f16.f32 */
7d1b0095 3103 tmp = tcg_temp_new_i32();
60011498
PB
3104 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3105 tcg_gen_shli_i32(tmp, tmp, 16);
3106 gen_mov_F0_vreg(0, rd);
3107 tmp2 = gen_vfp_mrs();
3108 tcg_gen_ext16u_i32(tmp2, tmp2);
3109 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3110 tcg_temp_free_i32(tmp2);
60011498
PB
3111 gen_vfp_msr(tmp);
3112 break;
b7bcbe95
FB
3113 case 8: /* cmp */
3114 gen_vfp_cmp(dp);
3115 break;
3116 case 9: /* cmpe */
3117 gen_vfp_cmpe(dp);
3118 break;
3119 case 10: /* cmpz */
3120 gen_vfp_cmp(dp);
3121 break;
3122 case 11: /* cmpez */
3123 gen_vfp_F1_ld0(dp);
3124 gen_vfp_cmpe(dp);
3125 break;
3126 case 15: /* single<->double conversion */
3127 if (dp)
4373f3ce 3128 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3129 else
4373f3ce 3130 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3131 break;
3132 case 16: /* fuito */
5500b06c 3133 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3134 break;
3135 case 17: /* fsito */
5500b06c 3136 gen_vfp_sito(dp, 0);
b7bcbe95 3137 break;
9ee6e8bb
PB
3138 case 20: /* fshto */
3139 if (!arm_feature(env, ARM_FEATURE_VFP3))
3140 return 1;
5500b06c 3141 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3142 break;
3143 case 21: /* fslto */
3144 if (!arm_feature(env, ARM_FEATURE_VFP3))
3145 return 1;
5500b06c 3146 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3147 break;
3148 case 22: /* fuhto */
3149 if (!arm_feature(env, ARM_FEATURE_VFP3))
3150 return 1;
5500b06c 3151 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3152 break;
3153 case 23: /* fulto */
3154 if (!arm_feature(env, ARM_FEATURE_VFP3))
3155 return 1;
5500b06c 3156 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3157 break;
b7bcbe95 3158 case 24: /* ftoui */
5500b06c 3159 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3160 break;
3161 case 25: /* ftouiz */
5500b06c 3162 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3163 break;
3164 case 26: /* ftosi */
5500b06c 3165 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3166 break;
3167 case 27: /* ftosiz */
5500b06c 3168 gen_vfp_tosiz(dp, 0);
b7bcbe95 3169 break;
9ee6e8bb
PB
3170 case 28: /* ftosh */
3171 if (!arm_feature(env, ARM_FEATURE_VFP3))
3172 return 1;
5500b06c 3173 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3174 break;
3175 case 29: /* ftosl */
3176 if (!arm_feature(env, ARM_FEATURE_VFP3))
3177 return 1;
5500b06c 3178 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3179 break;
3180 case 30: /* ftouh */
3181 if (!arm_feature(env, ARM_FEATURE_VFP3))
3182 return 1;
5500b06c 3183 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3184 break;
3185 case 31: /* ftoul */
3186 if (!arm_feature(env, ARM_FEATURE_VFP3))
3187 return 1;
5500b06c 3188 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3189 break;
b7bcbe95 3190 default: /* undefined */
b7bcbe95
FB
3191 return 1;
3192 }
3193 break;
3194 default: /* undefined */
b7bcbe95
FB
3195 return 1;
3196 }
3197
3198 /* Write back the result. */
3199 if (op == 15 && (rn >= 8 && rn <= 11))
3200 ; /* Comparison, do nothing. */
04595bf6
PM
3201 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3202 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3203 gen_mov_vreg_F0(0, rd);
3204 else if (op == 15 && rn == 15)
3205 /* conversion */
3206 gen_mov_vreg_F0(!dp, rd);
3207 else
3208 gen_mov_vreg_F0(dp, rd);
3209
3210 /* break out of the loop if we have finished */
3211 if (veclen == 0)
3212 break;
3213
3214 if (op == 15 && delta_m == 0) {
3215 /* single source one-many */
3216 while (veclen--) {
3217 rd = ((rd + delta_d) & (bank_mask - 1))
3218 | (rd & bank_mask);
3219 gen_mov_vreg_F0(dp, rd);
3220 }
3221 break;
3222 }
3223 /* Setup the next operands. */
3224 veclen--;
3225 rd = ((rd + delta_d) & (bank_mask - 1))
3226 | (rd & bank_mask);
3227
3228 if (op == 15) {
3229 /* One source operand. */
3230 rm = ((rm + delta_m) & (bank_mask - 1))
3231 | (rm & bank_mask);
3232 gen_mov_F0_vreg(dp, rm);
3233 } else {
3234 /* Two source operands. */
3235 rn = ((rn + delta_d) & (bank_mask - 1))
3236 | (rn & bank_mask);
3237 gen_mov_F0_vreg(dp, rn);
3238 if (delta_m) {
3239 rm = ((rm + delta_m) & (bank_mask - 1))
3240 | (rm & bank_mask);
3241 gen_mov_F1_vreg(dp, rm);
3242 }
3243 }
3244 }
3245 }
3246 break;
3247 case 0xc:
3248 case 0xd:
8387da81 3249 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3250 /* two-register transfer */
3251 rn = (insn >> 16) & 0xf;
3252 rd = (insn >> 12) & 0xf;
3253 if (dp) {
9ee6e8bb
PB
3254 VFP_DREG_M(rm, insn);
3255 } else {
3256 rm = VFP_SREG_M(insn);
3257 }
b7bcbe95 3258
18c9b560 3259 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3260 /* vfp->arm */
3261 if (dp) {
4373f3ce
PB
3262 gen_mov_F0_vreg(0, rm * 2);
3263 tmp = gen_vfp_mrs();
3264 store_reg(s, rd, tmp);
3265 gen_mov_F0_vreg(0, rm * 2 + 1);
3266 tmp = gen_vfp_mrs();
3267 store_reg(s, rn, tmp);
b7bcbe95
FB
3268 } else {
3269 gen_mov_F0_vreg(0, rm);
4373f3ce 3270 tmp = gen_vfp_mrs();
8387da81 3271 store_reg(s, rd, tmp);
b7bcbe95 3272 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3273 tmp = gen_vfp_mrs();
8387da81 3274 store_reg(s, rn, tmp);
b7bcbe95
FB
3275 }
3276 } else {
3277 /* arm->vfp */
3278 if (dp) {
4373f3ce
PB
3279 tmp = load_reg(s, rd);
3280 gen_vfp_msr(tmp);
3281 gen_mov_vreg_F0(0, rm * 2);
3282 tmp = load_reg(s, rn);
3283 gen_vfp_msr(tmp);
3284 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3285 } else {
8387da81 3286 tmp = load_reg(s, rd);
4373f3ce 3287 gen_vfp_msr(tmp);
b7bcbe95 3288 gen_mov_vreg_F0(0, rm);
8387da81 3289 tmp = load_reg(s, rn);
4373f3ce 3290 gen_vfp_msr(tmp);
b7bcbe95
FB
3291 gen_mov_vreg_F0(0, rm + 1);
3292 }
3293 }
3294 } else {
3295 /* Load/store */
3296 rn = (insn >> 16) & 0xf;
3297 if (dp)
9ee6e8bb 3298 VFP_DREG_D(rd, insn);
b7bcbe95 3299 else
9ee6e8bb 3300 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3301 if ((insn & 0x01200000) == 0x01000000) {
3302 /* Single load/store */
3303 offset = (insn & 0xff) << 2;
3304 if ((insn & (1 << 23)) == 0)
3305 offset = -offset;
934814f1
PM
3306 if (s->thumb && rn == 15) {
3307 /* This is actually UNPREDICTABLE */
3308 addr = tcg_temp_new_i32();
3309 tcg_gen_movi_i32(addr, s->pc & ~2);
3310 } else {
3311 addr = load_reg(s, rn);
3312 }
312eea9f 3313 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3314 if (insn & (1 << 20)) {
312eea9f 3315 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3316 gen_mov_vreg_F0(dp, rd);
3317 } else {
3318 gen_mov_F0_vreg(dp, rd);
312eea9f 3319 gen_vfp_st(s, dp, addr);
b7bcbe95 3320 }
7d1b0095 3321 tcg_temp_free_i32(addr);
b7bcbe95
FB
3322 } else {
3323 /* load/store multiple */
934814f1 3324 int w = insn & (1 << 21);
b7bcbe95
FB
3325 if (dp)
3326 n = (insn >> 1) & 0x7f;
3327 else
3328 n = insn & 0xff;
3329
934814f1
PM
3330 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3331 /* P == U , W == 1 => UNDEF */
3332 return 1;
3333 }
3334 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3335 /* UNPREDICTABLE cases for bad immediates: we choose to
3336 * UNDEF to avoid generating huge numbers of TCG ops
3337 */
3338 return 1;
3339 }
3340 if (rn == 15 && w) {
3341 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3342 return 1;
3343 }
3344
3345 if (s->thumb && rn == 15) {
3346 /* This is actually UNPREDICTABLE */
3347 addr = tcg_temp_new_i32();
3348 tcg_gen_movi_i32(addr, s->pc & ~2);
3349 } else {
3350 addr = load_reg(s, rn);
3351 }
b7bcbe95 3352 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3353 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3354
3355 if (dp)
3356 offset = 8;
3357 else
3358 offset = 4;
3359 for (i = 0; i < n; i++) {
18c9b560 3360 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3361 /* load */
312eea9f 3362 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3363 gen_mov_vreg_F0(dp, rd + i);
3364 } else {
3365 /* store */
3366 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3367 gen_vfp_st(s, dp, addr);
b7bcbe95 3368 }
312eea9f 3369 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3370 }
934814f1 3371 if (w) {
b7bcbe95
FB
3372 /* writeback */
3373 if (insn & (1 << 24))
3374 offset = -offset * n;
3375 else if (dp && (insn & 1))
3376 offset = 4;
3377 else
3378 offset = 0;
3379
3380 if (offset != 0)
312eea9f
FN
3381 tcg_gen_addi_i32(addr, addr, offset);
3382 store_reg(s, rn, addr);
3383 } else {
7d1b0095 3384 tcg_temp_free_i32(addr);
b7bcbe95
FB
3385 }
3386 }
3387 }
3388 break;
3389 default:
3390 /* Should never happen. */
3391 return 1;
3392 }
3393 return 0;
3394}
3395
6e256c93 3396static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3397{
6e256c93
FB
3398 TranslationBlock *tb;
3399
3400 tb = s->tb;
3401 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3402 tcg_gen_goto_tb(n);
8984bd2e 3403 gen_set_pc_im(dest);
4b4a72e5 3404 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3405 } else {
8984bd2e 3406 gen_set_pc_im(dest);
57fec1fe 3407 tcg_gen_exit_tb(0);
6e256c93 3408 }
c53be334
FB
3409}
3410
8aaca4c0
FB
3411static inline void gen_jmp (DisasContext *s, uint32_t dest)
3412{
551bd27f 3413 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3414 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3415 if (s->thumb)
d9ba4830
PB
3416 dest |= 1;
3417 gen_bx_im(s, dest);
8aaca4c0 3418 } else {
6e256c93 3419 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3420 s->is_jmp = DISAS_TB_JUMP;
3421 }
3422}
3423
39d5492a 3424static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3425{
ee097184 3426 if (x)
d9ba4830 3427 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3428 else
d9ba4830 3429 gen_sxth(t0);
ee097184 3430 if (y)
d9ba4830 3431 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3432 else
d9ba4830
PB
3433 gen_sxth(t1);
3434 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3435}
3436
3437/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3438static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3439 uint32_t mask;
3440
3441 mask = 0;
3442 if (flags & (1 << 0))
3443 mask |= 0xff;
3444 if (flags & (1 << 1))
3445 mask |= 0xff00;
3446 if (flags & (1 << 2))
3447 mask |= 0xff0000;
3448 if (flags & (1 << 3))
3449 mask |= 0xff000000;
9ee6e8bb 3450
2ae23e75 3451 /* Mask out undefined bits. */
9ee6e8bb 3452 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3453 if (!arm_feature(env, ARM_FEATURE_V4T))
3454 mask &= ~CPSR_T;
3455 if (!arm_feature(env, ARM_FEATURE_V5))
3456 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3457 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3458 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3459 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3460 mask &= ~CPSR_IT;
9ee6e8bb 3461 /* Mask out execution state bits. */
2ae23e75 3462 if (!spsr)
e160c51c 3463 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3464 /* Mask out privileged bits. */
3465 if (IS_USER(s))
9ee6e8bb 3466 mask &= CPSR_USER;
b5ff1b31
FB
3467 return mask;
3468}
3469
2fbac54b 3470/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3471static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3472{
39d5492a 3473 TCGv_i32 tmp;
b5ff1b31
FB
3474 if (spsr) {
3475 /* ??? This is also undefined in system mode. */
3476 if (IS_USER(s))
3477 return 1;
d9ba4830
PB
3478
3479 tmp = load_cpu_field(spsr);
3480 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3481 tcg_gen_andi_i32(t0, t0, mask);
3482 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3483 store_cpu_field(tmp, spsr);
b5ff1b31 3484 } else {
2fbac54b 3485 gen_set_cpsr(t0, mask);
b5ff1b31 3486 }
7d1b0095 3487 tcg_temp_free_i32(t0);
b5ff1b31
FB
3488 gen_lookup_tb(s);
3489 return 0;
3490}
3491
2fbac54b
FN
3492/* Returns nonzero if access to the PSR is not permitted. */
3493static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3494{
39d5492a 3495 TCGv_i32 tmp;
7d1b0095 3496 tmp = tcg_temp_new_i32();
2fbac54b
FN
3497 tcg_gen_movi_i32(tmp, val);
3498 return gen_set_psr(s, mask, spsr, tmp);
3499}
3500
e9bb4aa9 3501/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3502static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3503{
39d5492a 3504 TCGv_i32 tmp;
e9bb4aa9 3505 store_reg(s, 15, pc);
d9ba4830
PB
3506 tmp = load_cpu_field(spsr);
3507 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3508 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3509 s->is_jmp = DISAS_UPDATE;
3510}
3511
b0109805 3512/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3513static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3514{
b0109805 3515 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3516 tcg_temp_free_i32(cpsr);
b0109805 3517 store_reg(s, 15, pc);
9ee6e8bb
PB
3518 s->is_jmp = DISAS_UPDATE;
3519}
3b46e624 3520
9ee6e8bb
PB
3521static inline void
3522gen_set_condexec (DisasContext *s)
3523{
3524 if (s->condexec_mask) {
8f01245e 3525 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3526 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3527 tcg_gen_movi_i32(tmp, val);
d9ba4830 3528 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3529 }
3530}
3b46e624 3531
bc4a0de0
PM
3532static void gen_exception_insn(DisasContext *s, int offset, int excp)
3533{
3534 gen_set_condexec(s);
3535 gen_set_pc_im(s->pc - offset);
3536 gen_exception(excp);
3537 s->is_jmp = DISAS_JUMP;
3538}
3539
9ee6e8bb
PB
3540static void gen_nop_hint(DisasContext *s, int val)
3541{
3542 switch (val) {
3543 case 3: /* wfi */
8984bd2e 3544 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3545 s->is_jmp = DISAS_WFI;
3546 break;
3547 case 2: /* wfe */
3548 case 4: /* sev */
3549 /* TODO: Implement SEV and WFE. May help SMP performance. */
3550 default: /* nop */
3551 break;
3552 }
3553}
99c475ab 3554
ad69471c 3555#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3556
39d5492a 3557static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3558{
3559 switch (size) {
dd8fbd78
FN
3560 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3561 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3562 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3563 default: abort();
9ee6e8bb 3564 }
9ee6e8bb
PB
3565}
3566
39d5492a 3567static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3568{
3569 switch (size) {
dd8fbd78
FN
3570 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3571 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3572 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3573 default: return;
3574 }
3575}
3576
3577/* 32-bit pairwise ops end up the same as the elementwise versions. */
3578#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3579#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3580#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3581#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3582
ad69471c
PB
3583#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3584 switch ((size << 1) | u) { \
3585 case 0: \
dd8fbd78 3586 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3587 break; \
3588 case 1: \
dd8fbd78 3589 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3590 break; \
3591 case 2: \
dd8fbd78 3592 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3593 break; \
3594 case 3: \
dd8fbd78 3595 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3596 break; \
3597 case 4: \
dd8fbd78 3598 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3599 break; \
3600 case 5: \
dd8fbd78 3601 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3602 break; \
3603 default: return 1; \
3604 }} while (0)
9ee6e8bb
PB
3605
3606#define GEN_NEON_INTEGER_OP(name) do { \
3607 switch ((size << 1) | u) { \
ad69471c 3608 case 0: \
dd8fbd78 3609 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3610 break; \
3611 case 1: \
dd8fbd78 3612 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3613 break; \
3614 case 2: \
dd8fbd78 3615 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3616 break; \
3617 case 3: \
dd8fbd78 3618 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3619 break; \
3620 case 4: \
dd8fbd78 3621 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3622 break; \
3623 case 5: \
dd8fbd78 3624 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3625 break; \
9ee6e8bb
PB
3626 default: return 1; \
3627 }} while (0)
3628
39d5492a 3629static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3630{
39d5492a 3631 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3632 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3633 return tmp;
9ee6e8bb
PB
3634}
3635
39d5492a 3636static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3637{
dd8fbd78 3638 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3639 tcg_temp_free_i32(var);
9ee6e8bb
PB
3640}
3641
39d5492a 3642static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3643{
39d5492a 3644 TCGv_i32 tmp;
9ee6e8bb 3645 if (size == 1) {
0fad6efc
PM
3646 tmp = neon_load_reg(reg & 7, reg >> 4);
3647 if (reg & 8) {
dd8fbd78 3648 gen_neon_dup_high16(tmp);
0fad6efc
PM
3649 } else {
3650 gen_neon_dup_low16(tmp);
dd8fbd78 3651 }
0fad6efc
PM
3652 } else {
3653 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3654 }
dd8fbd78 3655 return tmp;
9ee6e8bb
PB
3656}
3657
02acedf9 3658static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3659{
39d5492a 3660 TCGv_i32 tmp, tmp2;
600b828c 3661 if (!q && size == 2) {
02acedf9
PM
3662 return 1;
3663 }
3664 tmp = tcg_const_i32(rd);
3665 tmp2 = tcg_const_i32(rm);
3666 if (q) {
3667 switch (size) {
3668 case 0:
02da0b2d 3669 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3670 break;
3671 case 1:
02da0b2d 3672 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3673 break;
3674 case 2:
02da0b2d 3675 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3676 break;
3677 default:
3678 abort();
3679 }
3680 } else {
3681 switch (size) {
3682 case 0:
02da0b2d 3683 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3684 break;
3685 case 1:
02da0b2d 3686 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3687 break;
3688 default:
3689 abort();
3690 }
3691 }
3692 tcg_temp_free_i32(tmp);
3693 tcg_temp_free_i32(tmp2);
3694 return 0;
19457615
FN
3695}
3696
d68a6f3a 3697static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3698{
39d5492a 3699 TCGv_i32 tmp, tmp2;
600b828c 3700 if (!q && size == 2) {
d68a6f3a
PM
3701 return 1;
3702 }
3703 tmp = tcg_const_i32(rd);
3704 tmp2 = tcg_const_i32(rm);
3705 if (q) {
3706 switch (size) {
3707 case 0:
02da0b2d 3708 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3709 break;
3710 case 1:
02da0b2d 3711 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3712 break;
3713 case 2:
02da0b2d 3714 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3715 break;
3716 default:
3717 abort();
3718 }
3719 } else {
3720 switch (size) {
3721 case 0:
02da0b2d 3722 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3723 break;
3724 case 1:
02da0b2d 3725 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3726 break;
3727 default:
3728 abort();
3729 }
3730 }
3731 tcg_temp_free_i32(tmp);
3732 tcg_temp_free_i32(tmp2);
3733 return 0;
19457615
FN
3734}
3735
39d5492a 3736static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3737{
39d5492a 3738 TCGv_i32 rd, tmp;
19457615 3739
7d1b0095
PM
3740 rd = tcg_temp_new_i32();
3741 tmp = tcg_temp_new_i32();
19457615
FN
3742
3743 tcg_gen_shli_i32(rd, t0, 8);
3744 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3745 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3746 tcg_gen_or_i32(rd, rd, tmp);
3747
3748 tcg_gen_shri_i32(t1, t1, 8);
3749 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3750 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3751 tcg_gen_or_i32(t1, t1, tmp);
3752 tcg_gen_mov_i32(t0, rd);
3753
7d1b0095
PM
3754 tcg_temp_free_i32(tmp);
3755 tcg_temp_free_i32(rd);
19457615
FN
3756}
3757
39d5492a 3758static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3759{
39d5492a 3760 TCGv_i32 rd, tmp;
19457615 3761
7d1b0095
PM
3762 rd = tcg_temp_new_i32();
3763 tmp = tcg_temp_new_i32();
19457615
FN
3764
3765 tcg_gen_shli_i32(rd, t0, 16);
3766 tcg_gen_andi_i32(tmp, t1, 0xffff);
3767 tcg_gen_or_i32(rd, rd, tmp);
3768 tcg_gen_shri_i32(t1, t1, 16);
3769 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3770 tcg_gen_or_i32(t1, t1, tmp);
3771 tcg_gen_mov_i32(t0, rd);
3772
7d1b0095
PM
3773 tcg_temp_free_i32(tmp);
3774 tcg_temp_free_i32(rd);
19457615
FN
3775}
3776
3777
9ee6e8bb
PB
3778static struct {
3779 int nregs;
3780 int interleave;
3781 int spacing;
3782} neon_ls_element_type[11] = {
3783 {4, 4, 1},
3784 {4, 4, 2},
3785 {4, 1, 1},
3786 {4, 2, 1},
3787 {3, 3, 1},
3788 {3, 3, 2},
3789 {3, 1, 1},
3790 {1, 1, 1},
3791 {2, 2, 1},
3792 {2, 2, 2},
3793 {2, 1, 1}
3794};
3795
3796/* Translate a NEON load/store element instruction. Return nonzero if the
3797 instruction is invalid. */
0ecb72a5 3798static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3799{
3800 int rd, rn, rm;
3801 int op;
3802 int nregs;
3803 int interleave;
84496233 3804 int spacing;
9ee6e8bb
PB
3805 int stride;
3806 int size;
3807 int reg;
3808 int pass;
3809 int load;
3810 int shift;
9ee6e8bb 3811 int n;
39d5492a
PM
3812 TCGv_i32 addr;
3813 TCGv_i32 tmp;
3814 TCGv_i32 tmp2;
84496233 3815 TCGv_i64 tmp64;
9ee6e8bb 3816
5df8bac1 3817 if (!s->vfp_enabled)
9ee6e8bb
PB
3818 return 1;
3819 VFP_DREG_D(rd, insn);
3820 rn = (insn >> 16) & 0xf;
3821 rm = insn & 0xf;
3822 load = (insn & (1 << 21)) != 0;
3823 if ((insn & (1 << 23)) == 0) {
3824 /* Load store all elements. */
3825 op = (insn >> 8) & 0xf;
3826 size = (insn >> 6) & 3;
84496233 3827 if (op > 10)
9ee6e8bb 3828 return 1;
f2dd89d0
PM
3829 /* Catch UNDEF cases for bad values of align field */
3830 switch (op & 0xc) {
3831 case 4:
3832 if (((insn >> 5) & 1) == 1) {
3833 return 1;
3834 }
3835 break;
3836 case 8:
3837 if (((insn >> 4) & 3) == 3) {
3838 return 1;
3839 }
3840 break;
3841 default:
3842 break;
3843 }
9ee6e8bb
PB
3844 nregs = neon_ls_element_type[op].nregs;
3845 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3846 spacing = neon_ls_element_type[op].spacing;
3847 if (size == 3 && (interleave | spacing) != 1)
3848 return 1;
e318a60b 3849 addr = tcg_temp_new_i32();
dcc65026 3850 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3851 stride = (1 << size) * interleave;
3852 for (reg = 0; reg < nregs; reg++) {
3853 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3854 load_reg_var(s, addr, rn);
3855 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3856 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3857 load_reg_var(s, addr, rn);
3858 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3859 }
84496233 3860 if (size == 3) {
8ed1237d 3861 tmp64 = tcg_temp_new_i64();
84496233 3862 if (load) {
8ed1237d 3863 tcg_gen_qemu_ld64(tmp64, addr, IS_USER(s));
84496233 3864 neon_store_reg64(tmp64, rd);
84496233 3865 } else {
84496233 3866 neon_load_reg64(tmp64, rd);
8ed1237d 3867 tcg_gen_qemu_st64(tmp64, addr, IS_USER(s));
84496233 3868 }
8ed1237d 3869 tcg_temp_free_i64(tmp64);
84496233
JR
3870 tcg_gen_addi_i32(addr, addr, stride);
3871 } else {
3872 for (pass = 0; pass < 2; pass++) {
3873 if (size == 2) {
3874 if (load) {
58ab8e96
PM
3875 tmp = tcg_temp_new_i32();
3876 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3877 neon_store_reg(rd, pass, tmp);
3878 } else {
3879 tmp = neon_load_reg(rd, pass);
58ab8e96
PM
3880 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
3881 tcg_temp_free_i32(tmp);
84496233 3882 }
1b2b1e54 3883 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3884 } else if (size == 1) {
3885 if (load) {
58ab8e96
PM
3886 tmp = tcg_temp_new_i32();
3887 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
84496233 3888 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96
PM
3889 tmp2 = tcg_temp_new_i32();
3890 tcg_gen_qemu_ld16u(tmp2, addr, IS_USER(s));
84496233 3891 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3892 tcg_gen_shli_i32(tmp2, tmp2, 16);
3893 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3894 tcg_temp_free_i32(tmp2);
84496233
JR
3895 neon_store_reg(rd, pass, tmp);
3896 } else {
3897 tmp = neon_load_reg(rd, pass);
7d1b0095 3898 tmp2 = tcg_temp_new_i32();
84496233 3899 tcg_gen_shri_i32(tmp2, tmp, 16);
58ab8e96
PM
3900 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
3901 tcg_temp_free_i32(tmp);
84496233 3902 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96
PM
3903 tcg_gen_qemu_st16(tmp2, addr, IS_USER(s));
3904 tcg_temp_free_i32(tmp2);
1b2b1e54 3905 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3906 }
84496233
JR
3907 } else /* size == 0 */ {
3908 if (load) {
39d5492a 3909 TCGV_UNUSED_I32(tmp2);
84496233 3910 for (n = 0; n < 4; n++) {
58ab8e96
PM
3911 tmp = tcg_temp_new_i32();
3912 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3913 tcg_gen_addi_i32(addr, addr, stride);
3914 if (n == 0) {
3915 tmp2 = tmp;
3916 } else {
41ba8341
PB
3917 tcg_gen_shli_i32(tmp, tmp, n * 8);
3918 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3919 tcg_temp_free_i32(tmp);
84496233 3920 }
9ee6e8bb 3921 }
84496233
JR
3922 neon_store_reg(rd, pass, tmp2);
3923 } else {
3924 tmp2 = neon_load_reg(rd, pass);
3925 for (n = 0; n < 4; n++) {
7d1b0095 3926 tmp = tcg_temp_new_i32();
84496233
JR
3927 if (n == 0) {
3928 tcg_gen_mov_i32(tmp, tmp2);
3929 } else {
3930 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3931 }
58ab8e96
PM
3932 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
3933 tcg_temp_free_i32(tmp);
84496233
JR
3934 tcg_gen_addi_i32(addr, addr, stride);
3935 }
7d1b0095 3936 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3937 }
3938 }
3939 }
3940 }
84496233 3941 rd += spacing;
9ee6e8bb 3942 }
e318a60b 3943 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3944 stride = nregs * 8;
3945 } else {
3946 size = (insn >> 10) & 3;
3947 if (size == 3) {
3948 /* Load single element to all lanes. */
8e18cde3
PM
3949 int a = (insn >> 4) & 1;
3950 if (!load) {
9ee6e8bb 3951 return 1;
8e18cde3 3952 }
9ee6e8bb
PB
3953 size = (insn >> 6) & 3;
3954 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3955
3956 if (size == 3) {
3957 if (nregs != 4 || a == 0) {
9ee6e8bb 3958 return 1;
99c475ab 3959 }
8e18cde3
PM
3960 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3961 size = 2;
3962 }
3963 if (nregs == 1 && a == 1 && size == 0) {
3964 return 1;
3965 }
3966 if (nregs == 3 && a == 1) {
3967 return 1;
3968 }
e318a60b 3969 addr = tcg_temp_new_i32();
8e18cde3
PM
3970 load_reg_var(s, addr, rn);
3971 if (nregs == 1) {
3972 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3973 tmp = gen_load_and_replicate(s, addr, size);
3974 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3975 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3976 if (insn & (1 << 5)) {
3977 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3978 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3979 }
3980 tcg_temp_free_i32(tmp);
3981 } else {
3982 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
3983 stride = (insn & (1 << 5)) ? 2 : 1;
3984 for (reg = 0; reg < nregs; reg++) {
3985 tmp = gen_load_and_replicate(s, addr, size);
3986 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3987 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3988 tcg_temp_free_i32(tmp);
3989 tcg_gen_addi_i32(addr, addr, 1 << size);
3990 rd += stride;
3991 }
9ee6e8bb 3992 }
e318a60b 3993 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3994 stride = (1 << size) * nregs;
3995 } else {
3996 /* Single element. */
93262b16 3997 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
3998 pass = (insn >> 7) & 1;
3999 switch (size) {
4000 case 0:
4001 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4002 stride = 1;
4003 break;
4004 case 1:
4005 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4006 stride = (insn & (1 << 5)) ? 2 : 1;
4007 break;
4008 case 2:
4009 shift = 0;
9ee6e8bb
PB
4010 stride = (insn & (1 << 6)) ? 2 : 1;
4011 break;
4012 default:
4013 abort();
4014 }
4015 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4016 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4017 switch (nregs) {
4018 case 1:
4019 if (((idx & (1 << size)) != 0) ||
4020 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4021 return 1;
4022 }
4023 break;
4024 case 3:
4025 if ((idx & 1) != 0) {
4026 return 1;
4027 }
4028 /* fall through */
4029 case 2:
4030 if (size == 2 && (idx & 2) != 0) {
4031 return 1;
4032 }
4033 break;
4034 case 4:
4035 if ((size == 2) && ((idx & 3) == 3)) {
4036 return 1;
4037 }
4038 break;
4039 default:
4040 abort();
4041 }
4042 if ((rd + stride * (nregs - 1)) > 31) {
4043 /* Attempts to write off the end of the register file
4044 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4045 * the neon_load_reg() would write off the end of the array.
4046 */
4047 return 1;
4048 }
e318a60b 4049 addr = tcg_temp_new_i32();
dcc65026 4050 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4051 for (reg = 0; reg < nregs; reg++) {
4052 if (load) {
58ab8e96 4053 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4054 switch (size) {
4055 case 0:
58ab8e96 4056 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4057 break;
4058 case 1:
58ab8e96 4059 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4060 break;
4061 case 2:
58ab8e96 4062 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4063 break;
a50f5b91
PB
4064 default: /* Avoid compiler warnings. */
4065 abort();
9ee6e8bb
PB
4066 }
4067 if (size != 2) {
8f8e3aa4 4068 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4069 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4070 shift, size ? 16 : 8);
7d1b0095 4071 tcg_temp_free_i32(tmp2);
9ee6e8bb 4072 }
8f8e3aa4 4073 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4074 } else { /* Store */
8f8e3aa4
PB
4075 tmp = neon_load_reg(rd, pass);
4076 if (shift)
4077 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4078 switch (size) {
4079 case 0:
58ab8e96 4080 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4081 break;
4082 case 1:
58ab8e96 4083 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4084 break;
4085 case 2:
58ab8e96 4086 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4087 break;
99c475ab 4088 }
58ab8e96 4089 tcg_temp_free_i32(tmp);
99c475ab 4090 }
9ee6e8bb 4091 rd += stride;
1b2b1e54 4092 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4093 }
e318a60b 4094 tcg_temp_free_i32(addr);
9ee6e8bb 4095 stride = nregs * (1 << size);
99c475ab 4096 }
9ee6e8bb
PB
4097 }
4098 if (rm != 15) {
39d5492a 4099 TCGv_i32 base;
b26eefb6
PB
4100
4101 base = load_reg(s, rn);
9ee6e8bb 4102 if (rm == 13) {
b26eefb6 4103 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4104 } else {
39d5492a 4105 TCGv_i32 index;
b26eefb6
PB
4106 index = load_reg(s, rm);
4107 tcg_gen_add_i32(base, base, index);
7d1b0095 4108 tcg_temp_free_i32(index);
9ee6e8bb 4109 }
b26eefb6 4110 store_reg(s, rn, base);
9ee6e8bb
PB
4111 }
4112 return 0;
4113}
3b46e624 4114
8f8e3aa4 4115/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4116static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4117{
4118 tcg_gen_and_i32(t, t, c);
f669df27 4119 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4120 tcg_gen_or_i32(dest, t, f);
4121}
4122
39d5492a 4123static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4124{
4125 switch (size) {
4126 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4127 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4128 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4129 default: abort();
4130 }
4131}
4132
39d5492a 4133static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4134{
4135 switch (size) {
02da0b2d
PM
4136 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4137 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4138 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4139 default: abort();
4140 }
4141}
4142
39d5492a 4143static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4144{
4145 switch (size) {
02da0b2d
PM
4146 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4147 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4148 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4149 default: abort();
4150 }
4151}
4152
39d5492a 4153static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4154{
4155 switch (size) {
02da0b2d
PM
4156 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4157 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4158 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4159 default: abort();
4160 }
4161}
4162
39d5492a 4163static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4164 int q, int u)
4165{
4166 if (q) {
4167 if (u) {
4168 switch (size) {
4169 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4170 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4171 default: abort();
4172 }
4173 } else {
4174 switch (size) {
4175 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4176 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4177 default: abort();
4178 }
4179 }
4180 } else {
4181 if (u) {
4182 switch (size) {
b408a9b0
CL
4183 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4184 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4185 default: abort();
4186 }
4187 } else {
4188 switch (size) {
4189 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4190 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4191 default: abort();
4192 }
4193 }
4194 }
4195}
4196
39d5492a 4197static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4198{
4199 if (u) {
4200 switch (size) {
4201 case 0: gen_helper_neon_widen_u8(dest, src); break;
4202 case 1: gen_helper_neon_widen_u16(dest, src); break;
4203 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4204 default: abort();
4205 }
4206 } else {
4207 switch (size) {
4208 case 0: gen_helper_neon_widen_s8(dest, src); break;
4209 case 1: gen_helper_neon_widen_s16(dest, src); break;
4210 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4211 default: abort();
4212 }
4213 }
7d1b0095 4214 tcg_temp_free_i32(src);
ad69471c
PB
4215}
4216
4217static inline void gen_neon_addl(int size)
4218{
4219 switch (size) {
4220 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4221 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4222 case 2: tcg_gen_add_i64(CPU_V001); break;
4223 default: abort();
4224 }
4225}
4226
4227static inline void gen_neon_subl(int size)
4228{
4229 switch (size) {
4230 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4231 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4232 case 2: tcg_gen_sub_i64(CPU_V001); break;
4233 default: abort();
4234 }
4235}
4236
a7812ae4 4237static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4238{
4239 switch (size) {
4240 case 0: gen_helper_neon_negl_u16(var, var); break;
4241 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4242 case 2:
4243 tcg_gen_neg_i64(var, var);
4244 break;
ad69471c
PB
4245 default: abort();
4246 }
4247}
4248
a7812ae4 4249static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4250{
4251 switch (size) {
02da0b2d
PM
4252 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4253 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4254 default: abort();
4255 }
4256}
4257
39d5492a
PM
4258static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4259 int size, int u)
ad69471c 4260{
a7812ae4 4261 TCGv_i64 tmp;
ad69471c
PB
4262
4263 switch ((size << 1) | u) {
4264 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4265 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4266 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4267 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4268 case 4:
4269 tmp = gen_muls_i64_i32(a, b);
4270 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4271 tcg_temp_free_i64(tmp);
ad69471c
PB
4272 break;
4273 case 5:
4274 tmp = gen_mulu_i64_i32(a, b);
4275 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4276 tcg_temp_free_i64(tmp);
ad69471c
PB
4277 break;
4278 default: abort();
4279 }
c6067f04
CL
4280
4281 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4282 Don't forget to clean them now. */
4283 if (size < 2) {
7d1b0095
PM
4284 tcg_temp_free_i32(a);
4285 tcg_temp_free_i32(b);
c6067f04 4286 }
ad69471c
PB
4287}
4288
39d5492a
PM
4289static void gen_neon_narrow_op(int op, int u, int size,
4290 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4291{
4292 if (op) {
4293 if (u) {
4294 gen_neon_unarrow_sats(size, dest, src);
4295 } else {
4296 gen_neon_narrow(size, dest, src);
4297 }
4298 } else {
4299 if (u) {
4300 gen_neon_narrow_satu(size, dest, src);
4301 } else {
4302 gen_neon_narrow_sats(size, dest, src);
4303 }
4304 }
4305}
4306
62698be3
PM
4307/* Symbolic constants for op fields for Neon 3-register same-length.
4308 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4309 * table A7-9.
4310 */
4311#define NEON_3R_VHADD 0
4312#define NEON_3R_VQADD 1
4313#define NEON_3R_VRHADD 2
4314#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4315#define NEON_3R_VHSUB 4
4316#define NEON_3R_VQSUB 5
4317#define NEON_3R_VCGT 6
4318#define NEON_3R_VCGE 7
4319#define NEON_3R_VSHL 8
4320#define NEON_3R_VQSHL 9
4321#define NEON_3R_VRSHL 10
4322#define NEON_3R_VQRSHL 11
4323#define NEON_3R_VMAX 12
4324#define NEON_3R_VMIN 13
4325#define NEON_3R_VABD 14
4326#define NEON_3R_VABA 15
4327#define NEON_3R_VADD_VSUB 16
4328#define NEON_3R_VTST_VCEQ 17
4329#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4330#define NEON_3R_VMUL 19
4331#define NEON_3R_VPMAX 20
4332#define NEON_3R_VPMIN 21
4333#define NEON_3R_VQDMULH_VQRDMULH 22
4334#define NEON_3R_VPADD 23
da97f52c 4335#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4336#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4337#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4338#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4339#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4340#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4341#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4342
4343static const uint8_t neon_3r_sizes[] = {
4344 [NEON_3R_VHADD] = 0x7,
4345 [NEON_3R_VQADD] = 0xf,
4346 [NEON_3R_VRHADD] = 0x7,
4347 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4348 [NEON_3R_VHSUB] = 0x7,
4349 [NEON_3R_VQSUB] = 0xf,
4350 [NEON_3R_VCGT] = 0x7,
4351 [NEON_3R_VCGE] = 0x7,
4352 [NEON_3R_VSHL] = 0xf,
4353 [NEON_3R_VQSHL] = 0xf,
4354 [NEON_3R_VRSHL] = 0xf,
4355 [NEON_3R_VQRSHL] = 0xf,
4356 [NEON_3R_VMAX] = 0x7,
4357 [NEON_3R_VMIN] = 0x7,
4358 [NEON_3R_VABD] = 0x7,
4359 [NEON_3R_VABA] = 0x7,
4360 [NEON_3R_VADD_VSUB] = 0xf,
4361 [NEON_3R_VTST_VCEQ] = 0x7,
4362 [NEON_3R_VML] = 0x7,
4363 [NEON_3R_VMUL] = 0x7,
4364 [NEON_3R_VPMAX] = 0x7,
4365 [NEON_3R_VPMIN] = 0x7,
4366 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4367 [NEON_3R_VPADD] = 0x7,
da97f52c 4368 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4369 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4370 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4371 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4372 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4373 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4374 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4375};
4376
600b828c
PM
4377/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4378 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4379 * table A7-13.
4380 */
4381#define NEON_2RM_VREV64 0
4382#define NEON_2RM_VREV32 1
4383#define NEON_2RM_VREV16 2
4384#define NEON_2RM_VPADDL 4
4385#define NEON_2RM_VPADDL_U 5
4386#define NEON_2RM_VCLS 8
4387#define NEON_2RM_VCLZ 9
4388#define NEON_2RM_VCNT 10
4389#define NEON_2RM_VMVN 11
4390#define NEON_2RM_VPADAL 12
4391#define NEON_2RM_VPADAL_U 13
4392#define NEON_2RM_VQABS 14
4393#define NEON_2RM_VQNEG 15
4394#define NEON_2RM_VCGT0 16
4395#define NEON_2RM_VCGE0 17
4396#define NEON_2RM_VCEQ0 18
4397#define NEON_2RM_VCLE0 19
4398#define NEON_2RM_VCLT0 20
4399#define NEON_2RM_VABS 22
4400#define NEON_2RM_VNEG 23
4401#define NEON_2RM_VCGT0_F 24
4402#define NEON_2RM_VCGE0_F 25
4403#define NEON_2RM_VCEQ0_F 26
4404#define NEON_2RM_VCLE0_F 27
4405#define NEON_2RM_VCLT0_F 28
4406#define NEON_2RM_VABS_F 30
4407#define NEON_2RM_VNEG_F 31
4408#define NEON_2RM_VSWP 32
4409#define NEON_2RM_VTRN 33
4410#define NEON_2RM_VUZP 34
4411#define NEON_2RM_VZIP 35
4412#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4413#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4414#define NEON_2RM_VSHLL 38
4415#define NEON_2RM_VCVT_F16_F32 44
4416#define NEON_2RM_VCVT_F32_F16 46
4417#define NEON_2RM_VRECPE 56
4418#define NEON_2RM_VRSQRTE 57
4419#define NEON_2RM_VRECPE_F 58
4420#define NEON_2RM_VRSQRTE_F 59
4421#define NEON_2RM_VCVT_FS 60
4422#define NEON_2RM_VCVT_FU 61
4423#define NEON_2RM_VCVT_SF 62
4424#define NEON_2RM_VCVT_UF 63
4425
4426static int neon_2rm_is_float_op(int op)
4427{
4428 /* Return true if this neon 2reg-misc op is float-to-float */
4429 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4430 op >= NEON_2RM_VRECPE_F);
4431}
4432
4433/* Each entry in this array has bit n set if the insn allows
4434 * size value n (otherwise it will UNDEF). Since unallocated
4435 * op values will have no bits set they always UNDEF.
4436 */
4437static const uint8_t neon_2rm_sizes[] = {
4438 [NEON_2RM_VREV64] = 0x7,
4439 [NEON_2RM_VREV32] = 0x3,
4440 [NEON_2RM_VREV16] = 0x1,
4441 [NEON_2RM_VPADDL] = 0x7,
4442 [NEON_2RM_VPADDL_U] = 0x7,
4443 [NEON_2RM_VCLS] = 0x7,
4444 [NEON_2RM_VCLZ] = 0x7,
4445 [NEON_2RM_VCNT] = 0x1,
4446 [NEON_2RM_VMVN] = 0x1,
4447 [NEON_2RM_VPADAL] = 0x7,
4448 [NEON_2RM_VPADAL_U] = 0x7,
4449 [NEON_2RM_VQABS] = 0x7,
4450 [NEON_2RM_VQNEG] = 0x7,
4451 [NEON_2RM_VCGT0] = 0x7,
4452 [NEON_2RM_VCGE0] = 0x7,
4453 [NEON_2RM_VCEQ0] = 0x7,
4454 [NEON_2RM_VCLE0] = 0x7,
4455 [NEON_2RM_VCLT0] = 0x7,
4456 [NEON_2RM_VABS] = 0x7,
4457 [NEON_2RM_VNEG] = 0x7,
4458 [NEON_2RM_VCGT0_F] = 0x4,
4459 [NEON_2RM_VCGE0_F] = 0x4,
4460 [NEON_2RM_VCEQ0_F] = 0x4,
4461 [NEON_2RM_VCLE0_F] = 0x4,
4462 [NEON_2RM_VCLT0_F] = 0x4,
4463 [NEON_2RM_VABS_F] = 0x4,
4464 [NEON_2RM_VNEG_F] = 0x4,
4465 [NEON_2RM_VSWP] = 0x1,
4466 [NEON_2RM_VTRN] = 0x7,
4467 [NEON_2RM_VUZP] = 0x7,
4468 [NEON_2RM_VZIP] = 0x7,
4469 [NEON_2RM_VMOVN] = 0x7,
4470 [NEON_2RM_VQMOVN] = 0x7,
4471 [NEON_2RM_VSHLL] = 0x7,
4472 [NEON_2RM_VCVT_F16_F32] = 0x2,
4473 [NEON_2RM_VCVT_F32_F16] = 0x2,
4474 [NEON_2RM_VRECPE] = 0x4,
4475 [NEON_2RM_VRSQRTE] = 0x4,
4476 [NEON_2RM_VRECPE_F] = 0x4,
4477 [NEON_2RM_VRSQRTE_F] = 0x4,
4478 [NEON_2RM_VCVT_FS] = 0x4,
4479 [NEON_2RM_VCVT_FU] = 0x4,
4480 [NEON_2RM_VCVT_SF] = 0x4,
4481 [NEON_2RM_VCVT_UF] = 0x4,
4482};
4483
9ee6e8bb
PB
4484/* Translate a NEON data processing instruction. Return nonzero if the
4485 instruction is invalid.
ad69471c
PB
4486 We process data in a mixture of 32-bit and 64-bit chunks.
4487 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4488
0ecb72a5 4489static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4490{
4491 int op;
4492 int q;
4493 int rd, rn, rm;
4494 int size;
4495 int shift;
4496 int pass;
4497 int count;
4498 int pairwise;
4499 int u;
ca9a32e4 4500 uint32_t imm, mask;
39d5492a 4501 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4502 TCGv_i64 tmp64;
9ee6e8bb 4503
5df8bac1 4504 if (!s->vfp_enabled)
9ee6e8bb
PB
4505 return 1;
4506 q = (insn & (1 << 6)) != 0;
4507 u = (insn >> 24) & 1;
4508 VFP_DREG_D(rd, insn);
4509 VFP_DREG_N(rn, insn);
4510 VFP_DREG_M(rm, insn);
4511 size = (insn >> 20) & 3;
4512 if ((insn & (1 << 23)) == 0) {
4513 /* Three register same length. */
4514 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4515 /* Catch invalid op and bad size combinations: UNDEF */
4516 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4517 return 1;
4518 }
25f84f79
PM
4519 /* All insns of this form UNDEF for either this condition or the
4520 * superset of cases "Q==1"; we catch the latter later.
4521 */
4522 if (q && ((rd | rn | rm) & 1)) {
4523 return 1;
4524 }
62698be3
PM
4525 if (size == 3 && op != NEON_3R_LOGIC) {
4526 /* 64-bit element instructions. */
9ee6e8bb 4527 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4528 neon_load_reg64(cpu_V0, rn + pass);
4529 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4530 switch (op) {
62698be3 4531 case NEON_3R_VQADD:
9ee6e8bb 4532 if (u) {
02da0b2d
PM
4533 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4534 cpu_V0, cpu_V1);
2c0262af 4535 } else {
02da0b2d
PM
4536 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4537 cpu_V0, cpu_V1);
2c0262af 4538 }
9ee6e8bb 4539 break;
62698be3 4540 case NEON_3R_VQSUB:
9ee6e8bb 4541 if (u) {
02da0b2d
PM
4542 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4543 cpu_V0, cpu_V1);
ad69471c 4544 } else {
02da0b2d
PM
4545 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4546 cpu_V0, cpu_V1);
ad69471c
PB
4547 }
4548 break;
62698be3 4549 case NEON_3R_VSHL:
ad69471c
PB
4550 if (u) {
4551 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4552 } else {
4553 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4554 }
4555 break;
62698be3 4556 case NEON_3R_VQSHL:
ad69471c 4557 if (u) {
02da0b2d
PM
4558 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4559 cpu_V1, cpu_V0);
ad69471c 4560 } else {
02da0b2d
PM
4561 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4562 cpu_V1, cpu_V0);
ad69471c
PB
4563 }
4564 break;
62698be3 4565 case NEON_3R_VRSHL:
ad69471c
PB
4566 if (u) {
4567 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4568 } else {
ad69471c
PB
4569 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4570 }
4571 break;
62698be3 4572 case NEON_3R_VQRSHL:
ad69471c 4573 if (u) {
02da0b2d
PM
4574 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4575 cpu_V1, cpu_V0);
ad69471c 4576 } else {
02da0b2d
PM
4577 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4578 cpu_V1, cpu_V0);
1e8d4eec 4579 }
9ee6e8bb 4580 break;
62698be3 4581 case NEON_3R_VADD_VSUB:
9ee6e8bb 4582 if (u) {
ad69471c 4583 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4584 } else {
ad69471c 4585 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4586 }
4587 break;
4588 default:
4589 abort();
2c0262af 4590 }
ad69471c 4591 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4592 }
9ee6e8bb 4593 return 0;
2c0262af 4594 }
25f84f79 4595 pairwise = 0;
9ee6e8bb 4596 switch (op) {
62698be3
PM
4597 case NEON_3R_VSHL:
4598 case NEON_3R_VQSHL:
4599 case NEON_3R_VRSHL:
4600 case NEON_3R_VQRSHL:
9ee6e8bb 4601 {
ad69471c
PB
4602 int rtmp;
4603 /* Shift instruction operands are reversed. */
4604 rtmp = rn;
9ee6e8bb 4605 rn = rm;
ad69471c 4606 rm = rtmp;
9ee6e8bb 4607 }
2c0262af 4608 break;
25f84f79
PM
4609 case NEON_3R_VPADD:
4610 if (u) {
4611 return 1;
4612 }
4613 /* Fall through */
62698be3
PM
4614 case NEON_3R_VPMAX:
4615 case NEON_3R_VPMIN:
9ee6e8bb 4616 pairwise = 1;
2c0262af 4617 break;
25f84f79
PM
4618 case NEON_3R_FLOAT_ARITH:
4619 pairwise = (u && size < 2); /* if VPADD (float) */
4620 break;
4621 case NEON_3R_FLOAT_MINMAX:
4622 pairwise = u; /* if VPMIN/VPMAX (float) */
4623 break;
4624 case NEON_3R_FLOAT_CMP:
4625 if (!u && size) {
4626 /* no encoding for U=0 C=1x */
4627 return 1;
4628 }
4629 break;
4630 case NEON_3R_FLOAT_ACMP:
4631 if (!u) {
4632 return 1;
4633 }
4634 break;
4635 case NEON_3R_VRECPS_VRSQRTS:
4636 if (u) {
4637 return 1;
4638 }
2c0262af 4639 break;
25f84f79
PM
4640 case NEON_3R_VMUL:
4641 if (u && (size != 0)) {
4642 /* UNDEF on invalid size for polynomial subcase */
4643 return 1;
4644 }
2c0262af 4645 break;
da97f52c
PM
4646 case NEON_3R_VFM:
4647 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4648 return 1;
4649 }
4650 break;
9ee6e8bb 4651 default:
2c0262af 4652 break;
9ee6e8bb 4653 }
dd8fbd78 4654
25f84f79
PM
4655 if (pairwise && q) {
4656 /* All the pairwise insns UNDEF if Q is set */
4657 return 1;
4658 }
4659
9ee6e8bb
PB
4660 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4661
4662 if (pairwise) {
4663 /* Pairwise. */
a5a14945
JR
4664 if (pass < 1) {
4665 tmp = neon_load_reg(rn, 0);
4666 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4667 } else {
a5a14945
JR
4668 tmp = neon_load_reg(rm, 0);
4669 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4670 }
4671 } else {
4672 /* Elementwise. */
dd8fbd78
FN
4673 tmp = neon_load_reg(rn, pass);
4674 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4675 }
4676 switch (op) {
62698be3 4677 case NEON_3R_VHADD:
9ee6e8bb
PB
4678 GEN_NEON_INTEGER_OP(hadd);
4679 break;
62698be3 4680 case NEON_3R_VQADD:
02da0b2d 4681 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4682 break;
62698be3 4683 case NEON_3R_VRHADD:
9ee6e8bb 4684 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4685 break;
62698be3 4686 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4687 switch ((u << 2) | size) {
4688 case 0: /* VAND */
dd8fbd78 4689 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4690 break;
4691 case 1: /* BIC */
f669df27 4692 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4693 break;
4694 case 2: /* VORR */
dd8fbd78 4695 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4696 break;
4697 case 3: /* VORN */
f669df27 4698 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4699 break;
4700 case 4: /* VEOR */
dd8fbd78 4701 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4702 break;
4703 case 5: /* VBSL */
dd8fbd78
FN
4704 tmp3 = neon_load_reg(rd, pass);
4705 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4706 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4707 break;
4708 case 6: /* VBIT */
dd8fbd78
FN
4709 tmp3 = neon_load_reg(rd, pass);
4710 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4711 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4712 break;
4713 case 7: /* VBIF */
dd8fbd78
FN
4714 tmp3 = neon_load_reg(rd, pass);
4715 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4716 tcg_temp_free_i32(tmp3);
9ee6e8bb 4717 break;
2c0262af
FB
4718 }
4719 break;
62698be3 4720 case NEON_3R_VHSUB:
9ee6e8bb
PB
4721 GEN_NEON_INTEGER_OP(hsub);
4722 break;
62698be3 4723 case NEON_3R_VQSUB:
02da0b2d 4724 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4725 break;
62698be3 4726 case NEON_3R_VCGT:
9ee6e8bb
PB
4727 GEN_NEON_INTEGER_OP(cgt);
4728 break;
62698be3 4729 case NEON_3R_VCGE:
9ee6e8bb
PB
4730 GEN_NEON_INTEGER_OP(cge);
4731 break;
62698be3 4732 case NEON_3R_VSHL:
ad69471c 4733 GEN_NEON_INTEGER_OP(shl);
2c0262af 4734 break;
62698be3 4735 case NEON_3R_VQSHL:
02da0b2d 4736 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4737 break;
62698be3 4738 case NEON_3R_VRSHL:
ad69471c 4739 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4740 break;
62698be3 4741 case NEON_3R_VQRSHL:
02da0b2d 4742 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4743 break;
62698be3 4744 case NEON_3R_VMAX:
9ee6e8bb
PB
4745 GEN_NEON_INTEGER_OP(max);
4746 break;
62698be3 4747 case NEON_3R_VMIN:
9ee6e8bb
PB
4748 GEN_NEON_INTEGER_OP(min);
4749 break;
62698be3 4750 case NEON_3R_VABD:
9ee6e8bb
PB
4751 GEN_NEON_INTEGER_OP(abd);
4752 break;
62698be3 4753 case NEON_3R_VABA:
9ee6e8bb 4754 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4755 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4756 tmp2 = neon_load_reg(rd, pass);
4757 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4758 break;
62698be3 4759 case NEON_3R_VADD_VSUB:
9ee6e8bb 4760 if (!u) { /* VADD */
62698be3 4761 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4762 } else { /* VSUB */
4763 switch (size) {
dd8fbd78
FN
4764 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4765 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4766 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4767 default: abort();
9ee6e8bb
PB
4768 }
4769 }
4770 break;
62698be3 4771 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4772 if (!u) { /* VTST */
4773 switch (size) {
dd8fbd78
FN
4774 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4775 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4776 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4777 default: abort();
9ee6e8bb
PB
4778 }
4779 } else { /* VCEQ */
4780 switch (size) {
dd8fbd78
FN
4781 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4782 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4783 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4784 default: abort();
9ee6e8bb
PB
4785 }
4786 }
4787 break;
62698be3 4788 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4789 switch (size) {
dd8fbd78
FN
4790 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4791 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4792 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4793 default: abort();
9ee6e8bb 4794 }
7d1b0095 4795 tcg_temp_free_i32(tmp2);
dd8fbd78 4796 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4797 if (u) { /* VMLS */
dd8fbd78 4798 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4799 } else { /* VMLA */
dd8fbd78 4800 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4801 }
4802 break;
62698be3 4803 case NEON_3R_VMUL:
9ee6e8bb 4804 if (u) { /* polynomial */
dd8fbd78 4805 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4806 } else { /* Integer */
4807 switch (size) {
dd8fbd78
FN
4808 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4809 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4810 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4811 default: abort();
9ee6e8bb
PB
4812 }
4813 }
4814 break;
62698be3 4815 case NEON_3R_VPMAX:
9ee6e8bb
PB
4816 GEN_NEON_INTEGER_OP(pmax);
4817 break;
62698be3 4818 case NEON_3R_VPMIN:
9ee6e8bb
PB
4819 GEN_NEON_INTEGER_OP(pmin);
4820 break;
62698be3 4821 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4822 if (!u) { /* VQDMULH */
4823 switch (size) {
02da0b2d
PM
4824 case 1:
4825 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4826 break;
4827 case 2:
4828 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4829 break;
62698be3 4830 default: abort();
9ee6e8bb 4831 }
62698be3 4832 } else { /* VQRDMULH */
9ee6e8bb 4833 switch (size) {
02da0b2d
PM
4834 case 1:
4835 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4836 break;
4837 case 2:
4838 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4839 break;
62698be3 4840 default: abort();
9ee6e8bb
PB
4841 }
4842 }
4843 break;
62698be3 4844 case NEON_3R_VPADD:
9ee6e8bb 4845 switch (size) {
dd8fbd78
FN
4846 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4847 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4848 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4849 default: abort();
9ee6e8bb
PB
4850 }
4851 break;
62698be3 4852 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4853 {
4854 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4855 switch ((u << 2) | size) {
4856 case 0: /* VADD */
aa47cfdd
PM
4857 case 4: /* VPADD */
4858 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4859 break;
4860 case 2: /* VSUB */
aa47cfdd 4861 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4862 break;
4863 case 6: /* VABD */
aa47cfdd 4864 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4865 break;
4866 default:
62698be3 4867 abort();
9ee6e8bb 4868 }
aa47cfdd 4869 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4870 break;
aa47cfdd 4871 }
62698be3 4872 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4873 {
4874 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4875 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4876 if (!u) {
7d1b0095 4877 tcg_temp_free_i32(tmp2);
dd8fbd78 4878 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4879 if (size == 0) {
aa47cfdd 4880 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4881 } else {
aa47cfdd 4882 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4883 }
4884 }
aa47cfdd 4885 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4886 break;
aa47cfdd 4887 }
62698be3 4888 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4889 {
4890 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4891 if (!u) {
aa47cfdd 4892 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4893 } else {
aa47cfdd
PM
4894 if (size == 0) {
4895 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4896 } else {
4897 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4898 }
b5ff1b31 4899 }
aa47cfdd 4900 tcg_temp_free_ptr(fpstatus);
2c0262af 4901 break;
aa47cfdd 4902 }
62698be3 4903 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4904 {
4905 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4906 if (size == 0) {
4907 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4908 } else {
4909 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4910 }
4911 tcg_temp_free_ptr(fpstatus);
2c0262af 4912 break;
aa47cfdd 4913 }
62698be3 4914 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4915 {
4916 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4917 if (size == 0) {
4918 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4919 } else {
4920 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4921 }
4922 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4923 break;
aa47cfdd 4924 }
62698be3 4925 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4926 if (size == 0)
dd8fbd78 4927 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4928 else
dd8fbd78 4929 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4930 break;
da97f52c
PM
4931 case NEON_3R_VFM:
4932 {
4933 /* VFMA, VFMS: fused multiply-add */
4934 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4935 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4936 if (size) {
4937 /* VFMS */
4938 gen_helper_vfp_negs(tmp, tmp);
4939 }
4940 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4941 tcg_temp_free_i32(tmp3);
4942 tcg_temp_free_ptr(fpstatus);
4943 break;
4944 }
9ee6e8bb
PB
4945 default:
4946 abort();
2c0262af 4947 }
7d1b0095 4948 tcg_temp_free_i32(tmp2);
dd8fbd78 4949
9ee6e8bb
PB
4950 /* Save the result. For elementwise operations we can put it
4951 straight into the destination register. For pairwise operations
4952 we have to be careful to avoid clobbering the source operands. */
4953 if (pairwise && rd == rm) {
dd8fbd78 4954 neon_store_scratch(pass, tmp);
9ee6e8bb 4955 } else {
dd8fbd78 4956 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4957 }
4958
4959 } /* for pass */
4960 if (pairwise && rd == rm) {
4961 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4962 tmp = neon_load_scratch(pass);
4963 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4964 }
4965 }
ad69471c 4966 /* End of 3 register same size operations. */
9ee6e8bb
PB
4967 } else if (insn & (1 << 4)) {
4968 if ((insn & 0x00380080) != 0) {
4969 /* Two registers and shift. */
4970 op = (insn >> 8) & 0xf;
4971 if (insn & (1 << 7)) {
cc13115b
PM
4972 /* 64-bit shift. */
4973 if (op > 7) {
4974 return 1;
4975 }
9ee6e8bb
PB
4976 size = 3;
4977 } else {
4978 size = 2;
4979 while ((insn & (1 << (size + 19))) == 0)
4980 size--;
4981 }
4982 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4983 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4984 by immediate using the variable shift operations. */
4985 if (op < 8) {
4986 /* Shift by immediate:
4987 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
4988 if (q && ((rd | rm) & 1)) {
4989 return 1;
4990 }
4991 if (!u && (op == 4 || op == 6)) {
4992 return 1;
4993 }
9ee6e8bb
PB
4994 /* Right shifts are encoded as N - shift, where N is the
4995 element size in bits. */
4996 if (op <= 4)
4997 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4998 if (size == 3) {
4999 count = q + 1;
5000 } else {
5001 count = q ? 4: 2;
5002 }
5003 switch (size) {
5004 case 0:
5005 imm = (uint8_t) shift;
5006 imm |= imm << 8;
5007 imm |= imm << 16;
5008 break;
5009 case 1:
5010 imm = (uint16_t) shift;
5011 imm |= imm << 16;
5012 break;
5013 case 2:
5014 case 3:
5015 imm = shift;
5016 break;
5017 default:
5018 abort();
5019 }
5020
5021 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5022 if (size == 3) {
5023 neon_load_reg64(cpu_V0, rm + pass);
5024 tcg_gen_movi_i64(cpu_V1, imm);
5025 switch (op) {
5026 case 0: /* VSHR */
5027 case 1: /* VSRA */
5028 if (u)
5029 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5030 else
ad69471c 5031 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5032 break;
ad69471c
PB
5033 case 2: /* VRSHR */
5034 case 3: /* VRSRA */
5035 if (u)
5036 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5037 else
ad69471c 5038 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5039 break;
ad69471c 5040 case 4: /* VSRI */
ad69471c
PB
5041 case 5: /* VSHL, VSLI */
5042 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5043 break;
0322b26e 5044 case 6: /* VQSHLU */
02da0b2d
PM
5045 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5046 cpu_V0, cpu_V1);
ad69471c 5047 break;
0322b26e
PM
5048 case 7: /* VQSHL */
5049 if (u) {
02da0b2d 5050 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5051 cpu_V0, cpu_V1);
5052 } else {
02da0b2d 5053 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5054 cpu_V0, cpu_V1);
5055 }
9ee6e8bb 5056 break;
9ee6e8bb 5057 }
ad69471c
PB
5058 if (op == 1 || op == 3) {
5059 /* Accumulate. */
5371cb81 5060 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5061 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5062 } else if (op == 4 || (op == 5 && u)) {
5063 /* Insert */
923e6509
CL
5064 neon_load_reg64(cpu_V1, rd + pass);
5065 uint64_t mask;
5066 if (shift < -63 || shift > 63) {
5067 mask = 0;
5068 } else {
5069 if (op == 4) {
5070 mask = 0xffffffffffffffffull >> -shift;
5071 } else {
5072 mask = 0xffffffffffffffffull << shift;
5073 }
5074 }
5075 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5076 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5077 }
5078 neon_store_reg64(cpu_V0, rd + pass);
5079 } else { /* size < 3 */
5080 /* Operands in T0 and T1. */
dd8fbd78 5081 tmp = neon_load_reg(rm, pass);
7d1b0095 5082 tmp2 = tcg_temp_new_i32();
dd8fbd78 5083 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5084 switch (op) {
5085 case 0: /* VSHR */
5086 case 1: /* VSRA */
5087 GEN_NEON_INTEGER_OP(shl);
5088 break;
5089 case 2: /* VRSHR */
5090 case 3: /* VRSRA */
5091 GEN_NEON_INTEGER_OP(rshl);
5092 break;
5093 case 4: /* VSRI */
ad69471c
PB
5094 case 5: /* VSHL, VSLI */
5095 switch (size) {
dd8fbd78
FN
5096 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5097 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5098 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5099 default: abort();
ad69471c
PB
5100 }
5101 break;
0322b26e 5102 case 6: /* VQSHLU */
ad69471c 5103 switch (size) {
0322b26e 5104 case 0:
02da0b2d
PM
5105 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5106 tmp, tmp2);
0322b26e
PM
5107 break;
5108 case 1:
02da0b2d
PM
5109 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5110 tmp, tmp2);
0322b26e
PM
5111 break;
5112 case 2:
02da0b2d
PM
5113 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5114 tmp, tmp2);
0322b26e
PM
5115 break;
5116 default:
cc13115b 5117 abort();
ad69471c
PB
5118 }
5119 break;
0322b26e 5120 case 7: /* VQSHL */
02da0b2d 5121 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5122 break;
ad69471c 5123 }
7d1b0095 5124 tcg_temp_free_i32(tmp2);
ad69471c
PB
5125
5126 if (op == 1 || op == 3) {
5127 /* Accumulate. */
dd8fbd78 5128 tmp2 = neon_load_reg(rd, pass);
5371cb81 5129 gen_neon_add(size, tmp, tmp2);
7d1b0095 5130 tcg_temp_free_i32(tmp2);
ad69471c
PB
5131 } else if (op == 4 || (op == 5 && u)) {
5132 /* Insert */
5133 switch (size) {
5134 case 0:
5135 if (op == 4)
ca9a32e4 5136 mask = 0xff >> -shift;
ad69471c 5137 else
ca9a32e4
JR
5138 mask = (uint8_t)(0xff << shift);
5139 mask |= mask << 8;
5140 mask |= mask << 16;
ad69471c
PB
5141 break;
5142 case 1:
5143 if (op == 4)
ca9a32e4 5144 mask = 0xffff >> -shift;
ad69471c 5145 else
ca9a32e4
JR
5146 mask = (uint16_t)(0xffff << shift);
5147 mask |= mask << 16;
ad69471c
PB
5148 break;
5149 case 2:
ca9a32e4
JR
5150 if (shift < -31 || shift > 31) {
5151 mask = 0;
5152 } else {
5153 if (op == 4)
5154 mask = 0xffffffffu >> -shift;
5155 else
5156 mask = 0xffffffffu << shift;
5157 }
ad69471c
PB
5158 break;
5159 default:
5160 abort();
5161 }
dd8fbd78 5162 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5163 tcg_gen_andi_i32(tmp, tmp, mask);
5164 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5165 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5166 tcg_temp_free_i32(tmp2);
ad69471c 5167 }
dd8fbd78 5168 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5169 }
5170 } /* for pass */
5171 } else if (op < 10) {
ad69471c 5172 /* Shift by immediate and narrow:
9ee6e8bb 5173 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5174 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5175 if (rm & 1) {
5176 return 1;
5177 }
9ee6e8bb
PB
5178 shift = shift - (1 << (size + 3));
5179 size++;
92cdfaeb 5180 if (size == 3) {
a7812ae4 5181 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5182 neon_load_reg64(cpu_V0, rm);
5183 neon_load_reg64(cpu_V1, rm + 1);
5184 for (pass = 0; pass < 2; pass++) {
5185 TCGv_i64 in;
5186 if (pass == 0) {
5187 in = cpu_V0;
5188 } else {
5189 in = cpu_V1;
5190 }
ad69471c 5191 if (q) {
0b36f4cd 5192 if (input_unsigned) {
92cdfaeb 5193 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5194 } else {
92cdfaeb 5195 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5196 }
ad69471c 5197 } else {
0b36f4cd 5198 if (input_unsigned) {
92cdfaeb 5199 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5200 } else {
92cdfaeb 5201 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5202 }
ad69471c 5203 }
7d1b0095 5204 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5205 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5206 neon_store_reg(rd, pass, tmp);
5207 } /* for pass */
5208 tcg_temp_free_i64(tmp64);
5209 } else {
5210 if (size == 1) {
5211 imm = (uint16_t)shift;
5212 imm |= imm << 16;
2c0262af 5213 } else {
92cdfaeb
PM
5214 /* size == 2 */
5215 imm = (uint32_t)shift;
5216 }
5217 tmp2 = tcg_const_i32(imm);
5218 tmp4 = neon_load_reg(rm + 1, 0);
5219 tmp5 = neon_load_reg(rm + 1, 1);
5220 for (pass = 0; pass < 2; pass++) {
5221 if (pass == 0) {
5222 tmp = neon_load_reg(rm, 0);
5223 } else {
5224 tmp = tmp4;
5225 }
0b36f4cd
CL
5226 gen_neon_shift_narrow(size, tmp, tmp2, q,
5227 input_unsigned);
92cdfaeb
PM
5228 if (pass == 0) {
5229 tmp3 = neon_load_reg(rm, 1);
5230 } else {
5231 tmp3 = tmp5;
5232 }
0b36f4cd
CL
5233 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5234 input_unsigned);
36aa55dc 5235 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5236 tcg_temp_free_i32(tmp);
5237 tcg_temp_free_i32(tmp3);
5238 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5239 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5240 neon_store_reg(rd, pass, tmp);
5241 } /* for pass */
c6067f04 5242 tcg_temp_free_i32(tmp2);
b75263d6 5243 }
9ee6e8bb 5244 } else if (op == 10) {
cc13115b
PM
5245 /* VSHLL, VMOVL */
5246 if (q || (rd & 1)) {
9ee6e8bb 5247 return 1;
cc13115b 5248 }
ad69471c
PB
5249 tmp = neon_load_reg(rm, 0);
5250 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5251 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5252 if (pass == 1)
5253 tmp = tmp2;
5254
5255 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5256
9ee6e8bb
PB
5257 if (shift != 0) {
5258 /* The shift is less than the width of the source
ad69471c
PB
5259 type, so we can just shift the whole register. */
5260 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5261 /* Widen the result of shift: we need to clear
5262 * the potential overflow bits resulting from
5263 * left bits of the narrow input appearing as
5264 * right bits of left the neighbour narrow
5265 * input. */
ad69471c
PB
5266 if (size < 2 || !u) {
5267 uint64_t imm64;
5268 if (size == 0) {
5269 imm = (0xffu >> (8 - shift));
5270 imm |= imm << 16;
acdf01ef 5271 } else if (size == 1) {
ad69471c 5272 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5273 } else {
5274 /* size == 2 */
5275 imm = 0xffffffff >> (32 - shift);
5276 }
5277 if (size < 2) {
5278 imm64 = imm | (((uint64_t)imm) << 32);
5279 } else {
5280 imm64 = imm;
9ee6e8bb 5281 }
acdf01ef 5282 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5283 }
5284 }
ad69471c 5285 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5286 }
f73534a5 5287 } else if (op >= 14) {
9ee6e8bb 5288 /* VCVT fixed-point. */
cc13115b
PM
5289 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5290 return 1;
5291 }
f73534a5
PM
5292 /* We have already masked out the must-be-1 top bit of imm6,
5293 * hence this 32-shift where the ARM ARM has 64-imm6.
5294 */
5295 shift = 32 - shift;
9ee6e8bb 5296 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5297 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5298 if (!(op & 1)) {
9ee6e8bb 5299 if (u)
5500b06c 5300 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5301 else
5500b06c 5302 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5303 } else {
5304 if (u)
5500b06c 5305 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5306 else
5500b06c 5307 gen_vfp_tosl(0, shift, 1);
2c0262af 5308 }
4373f3ce 5309 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5310 }
5311 } else {
9ee6e8bb
PB
5312 return 1;
5313 }
5314 } else { /* (insn & 0x00380080) == 0 */
5315 int invert;
7d80fee5
PM
5316 if (q && (rd & 1)) {
5317 return 1;
5318 }
9ee6e8bb
PB
5319
5320 op = (insn >> 8) & 0xf;
5321 /* One register and immediate. */
5322 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5323 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5324 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5325 * We choose to not special-case this and will behave as if a
5326 * valid constant encoding of 0 had been given.
5327 */
9ee6e8bb
PB
5328 switch (op) {
5329 case 0: case 1:
5330 /* no-op */
5331 break;
5332 case 2: case 3:
5333 imm <<= 8;
5334 break;
5335 case 4: case 5:
5336 imm <<= 16;
5337 break;
5338 case 6: case 7:
5339 imm <<= 24;
5340 break;
5341 case 8: case 9:
5342 imm |= imm << 16;
5343 break;
5344 case 10: case 11:
5345 imm = (imm << 8) | (imm << 24);
5346 break;
5347 case 12:
8e31209e 5348 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5349 break;
5350 case 13:
5351 imm = (imm << 16) | 0xffff;
5352 break;
5353 case 14:
5354 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5355 if (invert)
5356 imm = ~imm;
5357 break;
5358 case 15:
7d80fee5
PM
5359 if (invert) {
5360 return 1;
5361 }
9ee6e8bb
PB
5362 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5363 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5364 break;
5365 }
5366 if (invert)
5367 imm = ~imm;
5368
9ee6e8bb
PB
5369 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5370 if (op & 1 && op < 12) {
ad69471c 5371 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5372 if (invert) {
5373 /* The immediate value has already been inverted, so
5374 BIC becomes AND. */
ad69471c 5375 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5376 } else {
ad69471c 5377 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5378 }
9ee6e8bb 5379 } else {
ad69471c 5380 /* VMOV, VMVN. */
7d1b0095 5381 tmp = tcg_temp_new_i32();
9ee6e8bb 5382 if (op == 14 && invert) {
a5a14945 5383 int n;
ad69471c
PB
5384 uint32_t val;
5385 val = 0;
9ee6e8bb
PB
5386 for (n = 0; n < 4; n++) {
5387 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5388 val |= 0xff << (n * 8);
9ee6e8bb 5389 }
ad69471c
PB
5390 tcg_gen_movi_i32(tmp, val);
5391 } else {
5392 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5393 }
9ee6e8bb 5394 }
ad69471c 5395 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5396 }
5397 }
e4b3861d 5398 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5399 if (size != 3) {
5400 op = (insn >> 8) & 0xf;
5401 if ((insn & (1 << 6)) == 0) {
5402 /* Three registers of different lengths. */
5403 int src1_wide;
5404 int src2_wide;
5405 int prewiden;
695272dc
PM
5406 /* undefreq: bit 0 : UNDEF if size != 0
5407 * bit 1 : UNDEF if size == 0
5408 * bit 2 : UNDEF if U == 1
5409 * Note that [1:0] set implies 'always UNDEF'
5410 */
5411 int undefreq;
5412 /* prewiden, src1_wide, src2_wide, undefreq */
5413 static const int neon_3reg_wide[16][4] = {
5414 {1, 0, 0, 0}, /* VADDL */
5415 {1, 1, 0, 0}, /* VADDW */
5416 {1, 0, 0, 0}, /* VSUBL */
5417 {1, 1, 0, 0}, /* VSUBW */
5418 {0, 1, 1, 0}, /* VADDHN */
5419 {0, 0, 0, 0}, /* VABAL */
5420 {0, 1, 1, 0}, /* VSUBHN */
5421 {0, 0, 0, 0}, /* VABDL */
5422 {0, 0, 0, 0}, /* VMLAL */
5423 {0, 0, 0, 6}, /* VQDMLAL */
5424 {0, 0, 0, 0}, /* VMLSL */
5425 {0, 0, 0, 6}, /* VQDMLSL */
5426 {0, 0, 0, 0}, /* Integer VMULL */
5427 {0, 0, 0, 2}, /* VQDMULL */
5428 {0, 0, 0, 5}, /* Polynomial VMULL */
5429 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5430 };
5431
5432 prewiden = neon_3reg_wide[op][0];
5433 src1_wide = neon_3reg_wide[op][1];
5434 src2_wide = neon_3reg_wide[op][2];
695272dc 5435 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5436
695272dc
PM
5437 if (((undefreq & 1) && (size != 0)) ||
5438 ((undefreq & 2) && (size == 0)) ||
5439 ((undefreq & 4) && u)) {
5440 return 1;
5441 }
5442 if ((src1_wide && (rn & 1)) ||
5443 (src2_wide && (rm & 1)) ||
5444 (!src2_wide && (rd & 1))) {
ad69471c 5445 return 1;
695272dc 5446 }
ad69471c 5447
9ee6e8bb
PB
5448 /* Avoid overlapping operands. Wide source operands are
5449 always aligned so will never overlap with wide
5450 destinations in problematic ways. */
8f8e3aa4 5451 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5452 tmp = neon_load_reg(rm, 1);
5453 neon_store_scratch(2, tmp);
8f8e3aa4 5454 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5455 tmp = neon_load_reg(rn, 1);
5456 neon_store_scratch(2, tmp);
9ee6e8bb 5457 }
39d5492a 5458 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5459 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5460 if (src1_wide) {
5461 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5462 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5463 } else {
ad69471c 5464 if (pass == 1 && rd == rn) {
dd8fbd78 5465 tmp = neon_load_scratch(2);
9ee6e8bb 5466 } else {
ad69471c
PB
5467 tmp = neon_load_reg(rn, pass);
5468 }
5469 if (prewiden) {
5470 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5471 }
5472 }
ad69471c
PB
5473 if (src2_wide) {
5474 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5475 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5476 } else {
ad69471c 5477 if (pass == 1 && rd == rm) {
dd8fbd78 5478 tmp2 = neon_load_scratch(2);
9ee6e8bb 5479 } else {
ad69471c
PB
5480 tmp2 = neon_load_reg(rm, pass);
5481 }
5482 if (prewiden) {
5483 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5484 }
9ee6e8bb
PB
5485 }
5486 switch (op) {
5487 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5488 gen_neon_addl(size);
9ee6e8bb 5489 break;
79b0e534 5490 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5491 gen_neon_subl(size);
9ee6e8bb
PB
5492 break;
5493 case 5: case 7: /* VABAL, VABDL */
5494 switch ((size << 1) | u) {
ad69471c
PB
5495 case 0:
5496 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5497 break;
5498 case 1:
5499 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5500 break;
5501 case 2:
5502 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5503 break;
5504 case 3:
5505 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5506 break;
5507 case 4:
5508 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5509 break;
5510 case 5:
5511 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5512 break;
9ee6e8bb
PB
5513 default: abort();
5514 }
7d1b0095
PM
5515 tcg_temp_free_i32(tmp2);
5516 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5517 break;
5518 case 8: case 9: case 10: case 11: case 12: case 13:
5519 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5520 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5521 break;
5522 case 14: /* Polynomial VMULL */
e5ca24cb 5523 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5524 tcg_temp_free_i32(tmp2);
5525 tcg_temp_free_i32(tmp);
e5ca24cb 5526 break;
695272dc
PM
5527 default: /* 15 is RESERVED: caught earlier */
5528 abort();
9ee6e8bb 5529 }
ebcd88ce
PM
5530 if (op == 13) {
5531 /* VQDMULL */
5532 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5533 neon_store_reg64(cpu_V0, rd + pass);
5534 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5535 /* Accumulate. */
ebcd88ce 5536 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5537 switch (op) {
4dc064e6
PM
5538 case 10: /* VMLSL */
5539 gen_neon_negl(cpu_V0, size);
5540 /* Fall through */
5541 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5542 gen_neon_addl(size);
9ee6e8bb
PB
5543 break;
5544 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5545 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5546 if (op == 11) {
5547 gen_neon_negl(cpu_V0, size);
5548 }
ad69471c
PB
5549 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5550 break;
9ee6e8bb
PB
5551 default:
5552 abort();
5553 }
ad69471c 5554 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5555 } else if (op == 4 || op == 6) {
5556 /* Narrowing operation. */
7d1b0095 5557 tmp = tcg_temp_new_i32();
79b0e534 5558 if (!u) {
9ee6e8bb 5559 switch (size) {
ad69471c
PB
5560 case 0:
5561 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5562 break;
5563 case 1:
5564 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5565 break;
5566 case 2:
5567 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5568 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5569 break;
9ee6e8bb
PB
5570 default: abort();
5571 }
5572 } else {
5573 switch (size) {
ad69471c
PB
5574 case 0:
5575 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5576 break;
5577 case 1:
5578 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5579 break;
5580 case 2:
5581 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5582 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5583 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5584 break;
9ee6e8bb
PB
5585 default: abort();
5586 }
5587 }
ad69471c
PB
5588 if (pass == 0) {
5589 tmp3 = tmp;
5590 } else {
5591 neon_store_reg(rd, 0, tmp3);
5592 neon_store_reg(rd, 1, tmp);
5593 }
9ee6e8bb
PB
5594 } else {
5595 /* Write back the result. */
ad69471c 5596 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5597 }
5598 }
5599 } else {
3e3326df
PM
5600 /* Two registers and a scalar. NB that for ops of this form
5601 * the ARM ARM labels bit 24 as Q, but it is in our variable
5602 * 'u', not 'q'.
5603 */
5604 if (size == 0) {
5605 return 1;
5606 }
9ee6e8bb 5607 switch (op) {
9ee6e8bb 5608 case 1: /* Float VMLA scalar */
9ee6e8bb 5609 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5610 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5611 if (size == 1) {
5612 return 1;
5613 }
5614 /* fall through */
5615 case 0: /* Integer VMLA scalar */
5616 case 4: /* Integer VMLS scalar */
5617 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5618 case 12: /* VQDMULH scalar */
5619 case 13: /* VQRDMULH scalar */
3e3326df
PM
5620 if (u && ((rd | rn) & 1)) {
5621 return 1;
5622 }
dd8fbd78
FN
5623 tmp = neon_get_scalar(size, rm);
5624 neon_store_scratch(0, tmp);
9ee6e8bb 5625 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5626 tmp = neon_load_scratch(0);
5627 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5628 if (op == 12) {
5629 if (size == 1) {
02da0b2d 5630 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5631 } else {
02da0b2d 5632 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5633 }
5634 } else if (op == 13) {
5635 if (size == 1) {
02da0b2d 5636 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5637 } else {
02da0b2d 5638 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5639 }
5640 } else if (op & 1) {
aa47cfdd
PM
5641 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5642 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5643 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5644 } else {
5645 switch (size) {
dd8fbd78
FN
5646 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5647 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5648 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5649 default: abort();
9ee6e8bb
PB
5650 }
5651 }
7d1b0095 5652 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5653 if (op < 8) {
5654 /* Accumulate. */
dd8fbd78 5655 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5656 switch (op) {
5657 case 0:
dd8fbd78 5658 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5659 break;
5660 case 1:
aa47cfdd
PM
5661 {
5662 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5663 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5664 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5665 break;
aa47cfdd 5666 }
9ee6e8bb 5667 case 4:
dd8fbd78 5668 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5669 break;
5670 case 5:
aa47cfdd
PM
5671 {
5672 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5673 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5674 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5675 break;
aa47cfdd 5676 }
9ee6e8bb
PB
5677 default:
5678 abort();
5679 }
7d1b0095 5680 tcg_temp_free_i32(tmp2);
9ee6e8bb 5681 }
dd8fbd78 5682 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5683 }
5684 break;
9ee6e8bb 5685 case 3: /* VQDMLAL scalar */
9ee6e8bb 5686 case 7: /* VQDMLSL scalar */
9ee6e8bb 5687 case 11: /* VQDMULL scalar */
3e3326df 5688 if (u == 1) {
ad69471c 5689 return 1;
3e3326df
PM
5690 }
5691 /* fall through */
5692 case 2: /* VMLAL sclar */
5693 case 6: /* VMLSL scalar */
5694 case 10: /* VMULL scalar */
5695 if (rd & 1) {
5696 return 1;
5697 }
dd8fbd78 5698 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5699 /* We need a copy of tmp2 because gen_neon_mull
5700 * deletes it during pass 0. */
7d1b0095 5701 tmp4 = tcg_temp_new_i32();
c6067f04 5702 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5703 tmp3 = neon_load_reg(rn, 1);
ad69471c 5704
9ee6e8bb 5705 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5706 if (pass == 0) {
5707 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5708 } else {
dd8fbd78 5709 tmp = tmp3;
c6067f04 5710 tmp2 = tmp4;
9ee6e8bb 5711 }
ad69471c 5712 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5713 if (op != 11) {
5714 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5715 }
9ee6e8bb 5716 switch (op) {
4dc064e6
PM
5717 case 6:
5718 gen_neon_negl(cpu_V0, size);
5719 /* Fall through */
5720 case 2:
ad69471c 5721 gen_neon_addl(size);
9ee6e8bb
PB
5722 break;
5723 case 3: case 7:
ad69471c 5724 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5725 if (op == 7) {
5726 gen_neon_negl(cpu_V0, size);
5727 }
ad69471c 5728 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5729 break;
5730 case 10:
5731 /* no-op */
5732 break;
5733 case 11:
ad69471c 5734 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5735 break;
5736 default:
5737 abort();
5738 }
ad69471c 5739 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5740 }
dd8fbd78 5741
dd8fbd78 5742
9ee6e8bb
PB
5743 break;
5744 default: /* 14 and 15 are RESERVED */
5745 return 1;
5746 }
5747 }
5748 } else { /* size == 3 */
5749 if (!u) {
5750 /* Extract. */
9ee6e8bb 5751 imm = (insn >> 8) & 0xf;
ad69471c
PB
5752
5753 if (imm > 7 && !q)
5754 return 1;
5755
52579ea1
PM
5756 if (q && ((rd | rn | rm) & 1)) {
5757 return 1;
5758 }
5759
ad69471c
PB
5760 if (imm == 0) {
5761 neon_load_reg64(cpu_V0, rn);
5762 if (q) {
5763 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5764 }
ad69471c
PB
5765 } else if (imm == 8) {
5766 neon_load_reg64(cpu_V0, rn + 1);
5767 if (q) {
5768 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5769 }
ad69471c 5770 } else if (q) {
a7812ae4 5771 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5772 if (imm < 8) {
5773 neon_load_reg64(cpu_V0, rn);
a7812ae4 5774 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5775 } else {
5776 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5777 neon_load_reg64(tmp64, rm);
ad69471c
PB
5778 }
5779 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5780 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5781 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5782 if (imm < 8) {
5783 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5784 } else {
ad69471c
PB
5785 neon_load_reg64(cpu_V1, rm + 1);
5786 imm -= 8;
9ee6e8bb 5787 }
ad69471c 5788 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5789 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5790 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5791 tcg_temp_free_i64(tmp64);
ad69471c 5792 } else {
a7812ae4 5793 /* BUGFIX */
ad69471c 5794 neon_load_reg64(cpu_V0, rn);
a7812ae4 5795 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5796 neon_load_reg64(cpu_V1, rm);
a7812ae4 5797 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5798 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5799 }
5800 neon_store_reg64(cpu_V0, rd);
5801 if (q) {
5802 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5803 }
5804 } else if ((insn & (1 << 11)) == 0) {
5805 /* Two register misc. */
5806 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5807 size = (insn >> 18) & 3;
600b828c
PM
5808 /* UNDEF for unknown op values and bad op-size combinations */
5809 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5810 return 1;
5811 }
fc2a9b37
PM
5812 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5813 q && ((rm | rd) & 1)) {
5814 return 1;
5815 }
9ee6e8bb 5816 switch (op) {
600b828c 5817 case NEON_2RM_VREV64:
9ee6e8bb 5818 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5819 tmp = neon_load_reg(rm, pass * 2);
5820 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5821 switch (size) {
dd8fbd78
FN
5822 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5823 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5824 case 2: /* no-op */ break;
5825 default: abort();
5826 }
dd8fbd78 5827 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5828 if (size == 2) {
dd8fbd78 5829 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5830 } else {
9ee6e8bb 5831 switch (size) {
dd8fbd78
FN
5832 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5833 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5834 default: abort();
5835 }
dd8fbd78 5836 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5837 }
5838 }
5839 break;
600b828c
PM
5840 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5841 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5842 for (pass = 0; pass < q + 1; pass++) {
5843 tmp = neon_load_reg(rm, pass * 2);
5844 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5845 tmp = neon_load_reg(rm, pass * 2 + 1);
5846 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5847 switch (size) {
5848 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5849 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5850 case 2: tcg_gen_add_i64(CPU_V001); break;
5851 default: abort();
5852 }
600b828c 5853 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5854 /* Accumulate. */
ad69471c
PB
5855 neon_load_reg64(cpu_V1, rd + pass);
5856 gen_neon_addl(size);
9ee6e8bb 5857 }
ad69471c 5858 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5859 }
5860 break;
600b828c 5861 case NEON_2RM_VTRN:
9ee6e8bb 5862 if (size == 2) {
a5a14945 5863 int n;
9ee6e8bb 5864 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5865 tmp = neon_load_reg(rm, n);
5866 tmp2 = neon_load_reg(rd, n + 1);
5867 neon_store_reg(rm, n, tmp2);
5868 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5869 }
5870 } else {
5871 goto elementwise;
5872 }
5873 break;
600b828c 5874 case NEON_2RM_VUZP:
02acedf9 5875 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5876 return 1;
9ee6e8bb
PB
5877 }
5878 break;
600b828c 5879 case NEON_2RM_VZIP:
d68a6f3a 5880 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5881 return 1;
9ee6e8bb
PB
5882 }
5883 break;
600b828c
PM
5884 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5885 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5886 if (rm & 1) {
5887 return 1;
5888 }
39d5492a 5889 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5890 for (pass = 0; pass < 2; pass++) {
ad69471c 5891 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5892 tmp = tcg_temp_new_i32();
600b828c
PM
5893 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5894 tmp, cpu_V0);
ad69471c
PB
5895 if (pass == 0) {
5896 tmp2 = tmp;
5897 } else {
5898 neon_store_reg(rd, 0, tmp2);
5899 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5900 }
9ee6e8bb
PB
5901 }
5902 break;
600b828c 5903 case NEON_2RM_VSHLL:
fc2a9b37 5904 if (q || (rd & 1)) {
9ee6e8bb 5905 return 1;
600b828c 5906 }
ad69471c
PB
5907 tmp = neon_load_reg(rm, 0);
5908 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5909 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5910 if (pass == 1)
5911 tmp = tmp2;
5912 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5913 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5914 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5915 }
5916 break;
600b828c 5917 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5918 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5919 q || (rm & 1)) {
5920 return 1;
5921 }
7d1b0095
PM
5922 tmp = tcg_temp_new_i32();
5923 tmp2 = tcg_temp_new_i32();
60011498 5924 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5925 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5926 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5927 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5928 tcg_gen_shli_i32(tmp2, tmp2, 16);
5929 tcg_gen_or_i32(tmp2, tmp2, tmp);
5930 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5931 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5932 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5933 neon_store_reg(rd, 0, tmp2);
7d1b0095 5934 tmp2 = tcg_temp_new_i32();
2d981da7 5935 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5936 tcg_gen_shli_i32(tmp2, tmp2, 16);
5937 tcg_gen_or_i32(tmp2, tmp2, tmp);
5938 neon_store_reg(rd, 1, tmp2);
7d1b0095 5939 tcg_temp_free_i32(tmp);
60011498 5940 break;
600b828c 5941 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5942 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5943 q || (rd & 1)) {
5944 return 1;
5945 }
7d1b0095 5946 tmp3 = tcg_temp_new_i32();
60011498
PB
5947 tmp = neon_load_reg(rm, 0);
5948 tmp2 = neon_load_reg(rm, 1);
5949 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5950 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5951 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5952 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5953 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5954 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5955 tcg_temp_free_i32(tmp);
60011498 5956 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5957 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5958 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5959 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5960 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5961 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5962 tcg_temp_free_i32(tmp2);
5963 tcg_temp_free_i32(tmp3);
60011498 5964 break;
9ee6e8bb
PB
5965 default:
5966 elementwise:
5967 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5968 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5969 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5970 neon_reg_offset(rm, pass));
39d5492a 5971 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5972 } else {
dd8fbd78 5973 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5974 }
5975 switch (op) {
600b828c 5976 case NEON_2RM_VREV32:
9ee6e8bb 5977 switch (size) {
dd8fbd78
FN
5978 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5979 case 1: gen_swap_half(tmp); break;
600b828c 5980 default: abort();
9ee6e8bb
PB
5981 }
5982 break;
600b828c 5983 case NEON_2RM_VREV16:
dd8fbd78 5984 gen_rev16(tmp);
9ee6e8bb 5985 break;
600b828c 5986 case NEON_2RM_VCLS:
9ee6e8bb 5987 switch (size) {
dd8fbd78
FN
5988 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5989 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5990 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 5991 default: abort();
9ee6e8bb
PB
5992 }
5993 break;
600b828c 5994 case NEON_2RM_VCLZ:
9ee6e8bb 5995 switch (size) {
dd8fbd78
FN
5996 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5997 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5998 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 5999 default: abort();
9ee6e8bb
PB
6000 }
6001 break;
600b828c 6002 case NEON_2RM_VCNT:
dd8fbd78 6003 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6004 break;
600b828c 6005 case NEON_2RM_VMVN:
dd8fbd78 6006 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6007 break;
600b828c 6008 case NEON_2RM_VQABS:
9ee6e8bb 6009 switch (size) {
02da0b2d
PM
6010 case 0:
6011 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6012 break;
6013 case 1:
6014 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6015 break;
6016 case 2:
6017 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6018 break;
600b828c 6019 default: abort();
9ee6e8bb
PB
6020 }
6021 break;
600b828c 6022 case NEON_2RM_VQNEG:
9ee6e8bb 6023 switch (size) {
02da0b2d
PM
6024 case 0:
6025 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6026 break;
6027 case 1:
6028 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6029 break;
6030 case 2:
6031 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6032 break;
600b828c 6033 default: abort();
9ee6e8bb
PB
6034 }
6035 break;
600b828c 6036 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6037 tmp2 = tcg_const_i32(0);
9ee6e8bb 6038 switch(size) {
dd8fbd78
FN
6039 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6040 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6041 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6042 default: abort();
9ee6e8bb 6043 }
39d5492a 6044 tcg_temp_free_i32(tmp2);
600b828c 6045 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6046 tcg_gen_not_i32(tmp, tmp);
600b828c 6047 }
9ee6e8bb 6048 break;
600b828c 6049 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6050 tmp2 = tcg_const_i32(0);
9ee6e8bb 6051 switch(size) {
dd8fbd78
FN
6052 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6053 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6054 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6055 default: abort();
9ee6e8bb 6056 }
39d5492a 6057 tcg_temp_free_i32(tmp2);
600b828c 6058 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6059 tcg_gen_not_i32(tmp, tmp);
600b828c 6060 }
9ee6e8bb 6061 break;
600b828c 6062 case NEON_2RM_VCEQ0:
dd8fbd78 6063 tmp2 = tcg_const_i32(0);
9ee6e8bb 6064 switch(size) {
dd8fbd78
FN
6065 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6066 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6067 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6068 default: abort();
9ee6e8bb 6069 }
39d5492a 6070 tcg_temp_free_i32(tmp2);
9ee6e8bb 6071 break;
600b828c 6072 case NEON_2RM_VABS:
9ee6e8bb 6073 switch(size) {
dd8fbd78
FN
6074 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6075 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6076 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6077 default: abort();
9ee6e8bb
PB
6078 }
6079 break;
600b828c 6080 case NEON_2RM_VNEG:
dd8fbd78
FN
6081 tmp2 = tcg_const_i32(0);
6082 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6083 tcg_temp_free_i32(tmp2);
9ee6e8bb 6084 break;
600b828c 6085 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6086 {
6087 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6088 tmp2 = tcg_const_i32(0);
aa47cfdd 6089 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6090 tcg_temp_free_i32(tmp2);
aa47cfdd 6091 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6092 break;
aa47cfdd 6093 }
600b828c 6094 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6095 {
6096 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6097 tmp2 = tcg_const_i32(0);
aa47cfdd 6098 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6099 tcg_temp_free_i32(tmp2);
aa47cfdd 6100 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6101 break;
aa47cfdd 6102 }
600b828c 6103 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6104 {
6105 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6106 tmp2 = tcg_const_i32(0);
aa47cfdd 6107 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6108 tcg_temp_free_i32(tmp2);
aa47cfdd 6109 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6110 break;
aa47cfdd 6111 }
600b828c 6112 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6113 {
6114 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6115 tmp2 = tcg_const_i32(0);
aa47cfdd 6116 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6117 tcg_temp_free_i32(tmp2);
aa47cfdd 6118 tcg_temp_free_ptr(fpstatus);
0e326109 6119 break;
aa47cfdd 6120 }
600b828c 6121 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6122 {
6123 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6124 tmp2 = tcg_const_i32(0);
aa47cfdd 6125 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6126 tcg_temp_free_i32(tmp2);
aa47cfdd 6127 tcg_temp_free_ptr(fpstatus);
0e326109 6128 break;
aa47cfdd 6129 }
600b828c 6130 case NEON_2RM_VABS_F:
4373f3ce 6131 gen_vfp_abs(0);
9ee6e8bb 6132 break;
600b828c 6133 case NEON_2RM_VNEG_F:
4373f3ce 6134 gen_vfp_neg(0);
9ee6e8bb 6135 break;
600b828c 6136 case NEON_2RM_VSWP:
dd8fbd78
FN
6137 tmp2 = neon_load_reg(rd, pass);
6138 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6139 break;
600b828c 6140 case NEON_2RM_VTRN:
dd8fbd78 6141 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6142 switch (size) {
dd8fbd78
FN
6143 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6144 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6145 default: abort();
9ee6e8bb 6146 }
dd8fbd78 6147 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6148 break;
600b828c 6149 case NEON_2RM_VRECPE:
dd8fbd78 6150 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6151 break;
600b828c 6152 case NEON_2RM_VRSQRTE:
dd8fbd78 6153 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6154 break;
600b828c 6155 case NEON_2RM_VRECPE_F:
4373f3ce 6156 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6157 break;
600b828c 6158 case NEON_2RM_VRSQRTE_F:
4373f3ce 6159 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6160 break;
600b828c 6161 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6162 gen_vfp_sito(0, 1);
9ee6e8bb 6163 break;
600b828c 6164 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6165 gen_vfp_uito(0, 1);
9ee6e8bb 6166 break;
600b828c 6167 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6168 gen_vfp_tosiz(0, 1);
9ee6e8bb 6169 break;
600b828c 6170 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6171 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6172 break;
6173 default:
600b828c
PM
6174 /* Reserved op values were caught by the
6175 * neon_2rm_sizes[] check earlier.
6176 */
6177 abort();
9ee6e8bb 6178 }
600b828c 6179 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6180 tcg_gen_st_f32(cpu_F0s, cpu_env,
6181 neon_reg_offset(rd, pass));
9ee6e8bb 6182 } else {
dd8fbd78 6183 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6184 }
6185 }
6186 break;
6187 }
6188 } else if ((insn & (1 << 10)) == 0) {
6189 /* VTBL, VTBX. */
56907d77
PM
6190 int n = ((insn >> 8) & 3) + 1;
6191 if ((rn + n) > 32) {
6192 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6193 * helper function running off the end of the register file.
6194 */
6195 return 1;
6196 }
6197 n <<= 3;
9ee6e8bb 6198 if (insn & (1 << 6)) {
8f8e3aa4 6199 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6200 } else {
7d1b0095 6201 tmp = tcg_temp_new_i32();
8f8e3aa4 6202 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6203 }
8f8e3aa4 6204 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6205 tmp4 = tcg_const_i32(rn);
6206 tmp5 = tcg_const_i32(n);
9ef39277 6207 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6208 tcg_temp_free_i32(tmp);
9ee6e8bb 6209 if (insn & (1 << 6)) {
8f8e3aa4 6210 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6211 } else {
7d1b0095 6212 tmp = tcg_temp_new_i32();
8f8e3aa4 6213 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6214 }
8f8e3aa4 6215 tmp3 = neon_load_reg(rm, 1);
9ef39277 6216 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6217 tcg_temp_free_i32(tmp5);
6218 tcg_temp_free_i32(tmp4);
8f8e3aa4 6219 neon_store_reg(rd, 0, tmp2);
3018f259 6220 neon_store_reg(rd, 1, tmp3);
7d1b0095 6221 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6222 } else if ((insn & 0x380) == 0) {
6223 /* VDUP */
133da6aa
JR
6224 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6225 return 1;
6226 }
9ee6e8bb 6227 if (insn & (1 << 19)) {
dd8fbd78 6228 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6229 } else {
dd8fbd78 6230 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6231 }
6232 if (insn & (1 << 16)) {
dd8fbd78 6233 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6234 } else if (insn & (1 << 17)) {
6235 if ((insn >> 18) & 1)
dd8fbd78 6236 gen_neon_dup_high16(tmp);
9ee6e8bb 6237 else
dd8fbd78 6238 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6239 }
6240 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6241 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6242 tcg_gen_mov_i32(tmp2, tmp);
6243 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6244 }
7d1b0095 6245 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6246 } else {
6247 return 1;
6248 }
6249 }
6250 }
6251 return 0;
6252}
6253
0ecb72a5 6254static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6255{
4b6a83fb
PM
6256 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6257 const ARMCPRegInfo *ri;
6258 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6259
6260 cpnum = (insn >> 8) & 0xf;
6261 if (arm_feature(env, ARM_FEATURE_XSCALE)
6262 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6263 return 1;
6264
4b6a83fb 6265 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6266 switch (cpnum) {
6267 case 0:
6268 case 1:
6269 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6270 return disas_iwmmxt_insn(env, s, insn);
6271 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6272 return disas_dsp_insn(env, s, insn);
6273 }
6274 return 1;
6275 case 10:
6276 case 11:
6277 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6278 default:
6279 break;
6280 }
6281
6282 /* Otherwise treat as a generic register access */
6283 is64 = (insn & (1 << 25)) == 0;
6284 if (!is64 && ((insn & (1 << 4)) == 0)) {
6285 /* cdp */
6286 return 1;
6287 }
6288
6289 crm = insn & 0xf;
6290 if (is64) {
6291 crn = 0;
6292 opc1 = (insn >> 4) & 0xf;
6293 opc2 = 0;
6294 rt2 = (insn >> 16) & 0xf;
6295 } else {
6296 crn = (insn >> 16) & 0xf;
6297 opc1 = (insn >> 21) & 7;
6298 opc2 = (insn >> 5) & 7;
6299 rt2 = 0;
6300 }
6301 isread = (insn >> 20) & 1;
6302 rt = (insn >> 12) & 0xf;
6303
6304 ri = get_arm_cp_reginfo(cpu,
6305 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6306 if (ri) {
6307 /* Check access permissions */
6308 if (!cp_access_ok(env, ri, isread)) {
6309 return 1;
6310 }
6311
6312 /* Handle special cases first */
6313 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6314 case ARM_CP_NOP:
6315 return 0;
6316 case ARM_CP_WFI:
6317 if (isread) {
6318 return 1;
6319 }
6320 gen_set_pc_im(s->pc);
6321 s->is_jmp = DISAS_WFI;
2bee5105 6322 return 0;
4b6a83fb
PM
6323 default:
6324 break;
6325 }
6326
6327 if (isread) {
6328 /* Read */
6329 if (is64) {
6330 TCGv_i64 tmp64;
6331 TCGv_i32 tmp;
6332 if (ri->type & ARM_CP_CONST) {
6333 tmp64 = tcg_const_i64(ri->resetvalue);
6334 } else if (ri->readfn) {
6335 TCGv_ptr tmpptr;
6336 gen_set_pc_im(s->pc);
6337 tmp64 = tcg_temp_new_i64();
6338 tmpptr = tcg_const_ptr(ri);
6339 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6340 tcg_temp_free_ptr(tmpptr);
6341 } else {
6342 tmp64 = tcg_temp_new_i64();
6343 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6344 }
6345 tmp = tcg_temp_new_i32();
6346 tcg_gen_trunc_i64_i32(tmp, tmp64);
6347 store_reg(s, rt, tmp);
6348 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6349 tmp = tcg_temp_new_i32();
4b6a83fb 6350 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6351 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6352 store_reg(s, rt2, tmp);
6353 } else {
39d5492a 6354 TCGv_i32 tmp;
4b6a83fb
PM
6355 if (ri->type & ARM_CP_CONST) {
6356 tmp = tcg_const_i32(ri->resetvalue);
6357 } else if (ri->readfn) {
6358 TCGv_ptr tmpptr;
6359 gen_set_pc_im(s->pc);
6360 tmp = tcg_temp_new_i32();
6361 tmpptr = tcg_const_ptr(ri);
6362 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6363 tcg_temp_free_ptr(tmpptr);
6364 } else {
6365 tmp = load_cpu_offset(ri->fieldoffset);
6366 }
6367 if (rt == 15) {
6368 /* Destination register of r15 for 32 bit loads sets
6369 * the condition codes from the high 4 bits of the value
6370 */
6371 gen_set_nzcv(tmp);
6372 tcg_temp_free_i32(tmp);
6373 } else {
6374 store_reg(s, rt, tmp);
6375 }
6376 }
6377 } else {
6378 /* Write */
6379 if (ri->type & ARM_CP_CONST) {
6380 /* If not forbidden by access permissions, treat as WI */
6381 return 0;
6382 }
6383
6384 if (is64) {
39d5492a 6385 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6386 TCGv_i64 tmp64 = tcg_temp_new_i64();
6387 tmplo = load_reg(s, rt);
6388 tmphi = load_reg(s, rt2);
6389 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6390 tcg_temp_free_i32(tmplo);
6391 tcg_temp_free_i32(tmphi);
6392 if (ri->writefn) {
6393 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6394 gen_set_pc_im(s->pc);
6395 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6396 tcg_temp_free_ptr(tmpptr);
6397 } else {
6398 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6399 }
6400 tcg_temp_free_i64(tmp64);
6401 } else {
6402 if (ri->writefn) {
39d5492a 6403 TCGv_i32 tmp;
4b6a83fb
PM
6404 TCGv_ptr tmpptr;
6405 gen_set_pc_im(s->pc);
6406 tmp = load_reg(s, rt);
6407 tmpptr = tcg_const_ptr(ri);
6408 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6409 tcg_temp_free_ptr(tmpptr);
6410 tcg_temp_free_i32(tmp);
6411 } else {
39d5492a 6412 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6413 store_cpu_offset(tmp, ri->fieldoffset);
6414 }
6415 }
6416 /* We default to ending the TB on a coprocessor register write,
6417 * but allow this to be suppressed by the register definition
6418 * (usually only necessary to work around guest bugs).
6419 */
6420 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6421 gen_lookup_tb(s);
6422 }
6423 }
6424 return 0;
6425 }
6426
4a9a539f 6427 return 1;
9ee6e8bb
PB
6428}
6429
5e3f878a
PB
6430
6431/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6432static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6433{
39d5492a 6434 TCGv_i32 tmp;
7d1b0095 6435 tmp = tcg_temp_new_i32();
5e3f878a
PB
6436 tcg_gen_trunc_i64_i32(tmp, val);
6437 store_reg(s, rlow, tmp);
7d1b0095 6438 tmp = tcg_temp_new_i32();
5e3f878a
PB
6439 tcg_gen_shri_i64(val, val, 32);
6440 tcg_gen_trunc_i64_i32(tmp, val);
6441 store_reg(s, rhigh, tmp);
6442}
6443
6444/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6445static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6446{
a7812ae4 6447 TCGv_i64 tmp;
39d5492a 6448 TCGv_i32 tmp2;
5e3f878a 6449
36aa55dc 6450 /* Load value and extend to 64 bits. */
a7812ae4 6451 tmp = tcg_temp_new_i64();
5e3f878a
PB
6452 tmp2 = load_reg(s, rlow);
6453 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6454 tcg_temp_free_i32(tmp2);
5e3f878a 6455 tcg_gen_add_i64(val, val, tmp);
b75263d6 6456 tcg_temp_free_i64(tmp);
5e3f878a
PB
6457}
6458
6459/* load and add a 64-bit value from a register pair. */
a7812ae4 6460static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6461{
a7812ae4 6462 TCGv_i64 tmp;
39d5492a
PM
6463 TCGv_i32 tmpl;
6464 TCGv_i32 tmph;
5e3f878a
PB
6465
6466 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6467 tmpl = load_reg(s, rlow);
6468 tmph = load_reg(s, rhigh);
a7812ae4 6469 tmp = tcg_temp_new_i64();
36aa55dc 6470 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6471 tcg_temp_free_i32(tmpl);
6472 tcg_temp_free_i32(tmph);
5e3f878a 6473 tcg_gen_add_i64(val, val, tmp);
b75263d6 6474 tcg_temp_free_i64(tmp);
5e3f878a
PB
6475}
6476
c9f10124 6477/* Set N and Z flags from hi|lo. */
39d5492a 6478static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6479{
c9f10124
RH
6480 tcg_gen_mov_i32(cpu_NF, hi);
6481 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6482}
6483
426f5abc
PB
6484/* Load/Store exclusive instructions are implemented by remembering
6485 the value/address loaded, and seeing if these are the same
b90372ad 6486 when the store is performed. This should be sufficient to implement
426f5abc
PB
6487 the architecturally mandated semantics, and avoids having to monitor
6488 regular stores.
6489
6490 In system emulation mode only one CPU will be running at once, so
6491 this sequence is effectively atomic. In user emulation mode we
6492 throw an exception and handle the atomic operation elsewhere. */
6493static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6494 TCGv_i32 addr, int size)
426f5abc 6495{
94ee24e7 6496 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6497
6498 switch (size) {
6499 case 0:
94ee24e7 6500 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6501 break;
6502 case 1:
94ee24e7 6503 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6504 break;
6505 case 2:
6506 case 3:
94ee24e7 6507 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6508 break;
6509 default:
6510 abort();
6511 }
6512 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6513 store_reg(s, rt, tmp);
6514 if (size == 3) {
39d5492a 6515 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6516 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7
PM
6517 tmp = tcg_temp_new_i32();
6518 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6519 tcg_temp_free_i32(tmp2);
426f5abc
PB
6520 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6521 store_reg(s, rt2, tmp);
6522 }
6523 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6524}
6525
6526static void gen_clrex(DisasContext *s)
6527{
6528 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6529}
6530
6531#ifdef CONFIG_USER_ONLY
6532static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6533 TCGv_i32 addr, int size)
426f5abc
PB
6534{
6535 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6536 tcg_gen_movi_i32(cpu_exclusive_info,
6537 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6538 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6539}
6540#else
6541static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6542 TCGv_i32 addr, int size)
426f5abc 6543{
39d5492a 6544 TCGv_i32 tmp;
426f5abc
PB
6545 int done_label;
6546 int fail_label;
6547
6548 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6549 [addr] = {Rt};
6550 {Rd} = 0;
6551 } else {
6552 {Rd} = 1;
6553 } */
6554 fail_label = gen_new_label();
6555 done_label = gen_new_label();
6556 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6557 tmp = tcg_temp_new_i32();
426f5abc
PB
6558 switch (size) {
6559 case 0:
94ee24e7 6560 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6561 break;
6562 case 1:
94ee24e7 6563 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6564 break;
6565 case 2:
6566 case 3:
94ee24e7 6567 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6568 break;
6569 default:
6570 abort();
6571 }
6572 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6573 tcg_temp_free_i32(tmp);
426f5abc 6574 if (size == 3) {
39d5492a 6575 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6576 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7
PM
6577 tmp = tcg_temp_new_i32();
6578 tcg_gen_qemu_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6579 tcg_temp_free_i32(tmp2);
426f5abc 6580 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6581 tcg_temp_free_i32(tmp);
426f5abc
PB
6582 }
6583 tmp = load_reg(s, rt);
6584 switch (size) {
6585 case 0:
94ee24e7 6586 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6587 break;
6588 case 1:
94ee24e7 6589 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6590 break;
6591 case 2:
6592 case 3:
94ee24e7 6593 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6594 break;
6595 default:
6596 abort();
6597 }
94ee24e7 6598 tcg_temp_free_i32(tmp);
426f5abc
PB
6599 if (size == 3) {
6600 tcg_gen_addi_i32(addr, addr, 4);
6601 tmp = load_reg(s, rt2);
94ee24e7
PM
6602 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
6603 tcg_temp_free_i32(tmp);
426f5abc
PB
6604 }
6605 tcg_gen_movi_i32(cpu_R[rd], 0);
6606 tcg_gen_br(done_label);
6607 gen_set_label(fail_label);
6608 tcg_gen_movi_i32(cpu_R[rd], 1);
6609 gen_set_label(done_label);
6610 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6611}
6612#endif
6613
81465888
PM
6614/* gen_srs:
6615 * @env: CPUARMState
6616 * @s: DisasContext
6617 * @mode: mode field from insn (which stack to store to)
6618 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6619 * @writeback: true if writeback bit set
6620 *
6621 * Generate code for the SRS (Store Return State) insn.
6622 */
6623static void gen_srs(DisasContext *s,
6624 uint32_t mode, uint32_t amode, bool writeback)
6625{
6626 int32_t offset;
6627 TCGv_i32 addr = tcg_temp_new_i32();
6628 TCGv_i32 tmp = tcg_const_i32(mode);
6629 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6630 tcg_temp_free_i32(tmp);
6631 switch (amode) {
6632 case 0: /* DA */
6633 offset = -4;
6634 break;
6635 case 1: /* IA */
6636 offset = 0;
6637 break;
6638 case 2: /* DB */
6639 offset = -8;
6640 break;
6641 case 3: /* IB */
6642 offset = 4;
6643 break;
6644 default:
6645 abort();
6646 }
6647 tcg_gen_addi_i32(addr, addr, offset);
6648 tmp = load_reg(s, 14);
5a839c0d
PM
6649 tcg_gen_qemu_st32(tmp, addr, 0);
6650 tcg_temp_free_i32(tmp);
81465888
PM
6651 tmp = load_cpu_field(spsr);
6652 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
6653 tcg_gen_qemu_st32(tmp, addr, 0);
6654 tcg_temp_free_i32(tmp);
81465888
PM
6655 if (writeback) {
6656 switch (amode) {
6657 case 0:
6658 offset = -8;
6659 break;
6660 case 1:
6661 offset = 4;
6662 break;
6663 case 2:
6664 offset = -4;
6665 break;
6666 case 3:
6667 offset = 0;
6668 break;
6669 default:
6670 abort();
6671 }
6672 tcg_gen_addi_i32(addr, addr, offset);
6673 tmp = tcg_const_i32(mode);
6674 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6675 tcg_temp_free_i32(tmp);
6676 }
6677 tcg_temp_free_i32(addr);
6678}
6679
0ecb72a5 6680static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6681{
6682 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6683 TCGv_i32 tmp;
6684 TCGv_i32 tmp2;
6685 TCGv_i32 tmp3;
6686 TCGv_i32 addr;
a7812ae4 6687 TCGv_i64 tmp64;
9ee6e8bb 6688
d31dd73e 6689 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6690 s->pc += 4;
6691
6692 /* M variants do not implement ARM mode. */
6693 if (IS_M(env))
6694 goto illegal_op;
6695 cond = insn >> 28;
6696 if (cond == 0xf){
be5e7a76
DES
6697 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6698 * choose to UNDEF. In ARMv5 and above the space is used
6699 * for miscellaneous unconditional instructions.
6700 */
6701 ARCH(5);
6702
9ee6e8bb
PB
6703 /* Unconditional instructions. */
6704 if (((insn >> 25) & 7) == 1) {
6705 /* NEON Data processing. */
6706 if (!arm_feature(env, ARM_FEATURE_NEON))
6707 goto illegal_op;
6708
6709 if (disas_neon_data_insn(env, s, insn))
6710 goto illegal_op;
6711 return;
6712 }
6713 if ((insn & 0x0f100000) == 0x04000000) {
6714 /* NEON load/store. */
6715 if (!arm_feature(env, ARM_FEATURE_NEON))
6716 goto illegal_op;
6717
6718 if (disas_neon_ls_insn(env, s, insn))
6719 goto illegal_op;
6720 return;
6721 }
3d185e5d
PM
6722 if (((insn & 0x0f30f000) == 0x0510f000) ||
6723 ((insn & 0x0f30f010) == 0x0710f000)) {
6724 if ((insn & (1 << 22)) == 0) {
6725 /* PLDW; v7MP */
6726 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6727 goto illegal_op;
6728 }
6729 }
6730 /* Otherwise PLD; v5TE+ */
be5e7a76 6731 ARCH(5TE);
3d185e5d
PM
6732 return;
6733 }
6734 if (((insn & 0x0f70f000) == 0x0450f000) ||
6735 ((insn & 0x0f70f010) == 0x0650f000)) {
6736 ARCH(7);
6737 return; /* PLI; V7 */
6738 }
6739 if (((insn & 0x0f700000) == 0x04100000) ||
6740 ((insn & 0x0f700010) == 0x06100000)) {
6741 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6742 goto illegal_op;
6743 }
6744 return; /* v7MP: Unallocated memory hint: must NOP */
6745 }
6746
6747 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6748 ARCH(6);
6749 /* setend */
10962fd5
PM
6750 if (((insn >> 9) & 1) != s->bswap_code) {
6751 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6752 goto illegal_op;
6753 }
6754 return;
6755 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6756 switch ((insn >> 4) & 0xf) {
6757 case 1: /* clrex */
6758 ARCH(6K);
426f5abc 6759 gen_clrex(s);
9ee6e8bb
PB
6760 return;
6761 case 4: /* dsb */
6762 case 5: /* dmb */
6763 case 6: /* isb */
6764 ARCH(7);
6765 /* We don't emulate caches so these are a no-op. */
6766 return;
6767 default:
6768 goto illegal_op;
6769 }
6770 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6771 /* srs */
81465888 6772 if (IS_USER(s)) {
9ee6e8bb 6773 goto illegal_op;
9ee6e8bb 6774 }
81465888
PM
6775 ARCH(6);
6776 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6777 return;
ea825eee 6778 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6779 /* rfe */
c67b6b71 6780 int32_t offset;
9ee6e8bb
PB
6781 if (IS_USER(s))
6782 goto illegal_op;
6783 ARCH(6);
6784 rn = (insn >> 16) & 0xf;
b0109805 6785 addr = load_reg(s, rn);
9ee6e8bb
PB
6786 i = (insn >> 23) & 3;
6787 switch (i) {
b0109805 6788 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6789 case 1: offset = 0; break; /* IA */
6790 case 2: offset = -8; break; /* DB */
b0109805 6791 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6792 default: abort();
6793 }
6794 if (offset)
b0109805
PB
6795 tcg_gen_addi_i32(addr, addr, offset);
6796 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d
PM
6797 tmp = tcg_temp_new_i32();
6798 tcg_gen_qemu_ld32u(tmp, addr, 0);
b0109805 6799 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
6800 tmp2 = tcg_temp_new_i32();
6801 tcg_gen_qemu_ld32u(tmp, addr, 0);
9ee6e8bb
PB
6802 if (insn & (1 << 21)) {
6803 /* Base writeback. */
6804 switch (i) {
b0109805 6805 case 0: offset = -8; break;
c67b6b71
FN
6806 case 1: offset = 4; break;
6807 case 2: offset = -4; break;
b0109805 6808 case 3: offset = 0; break;
9ee6e8bb
PB
6809 default: abort();
6810 }
6811 if (offset)
b0109805
PB
6812 tcg_gen_addi_i32(addr, addr, offset);
6813 store_reg(s, rn, addr);
6814 } else {
7d1b0095 6815 tcg_temp_free_i32(addr);
9ee6e8bb 6816 }
b0109805 6817 gen_rfe(s, tmp, tmp2);
c67b6b71 6818 return;
9ee6e8bb
PB
6819 } else if ((insn & 0x0e000000) == 0x0a000000) {
6820 /* branch link and change to thumb (blx <offset>) */
6821 int32_t offset;
6822
6823 val = (uint32_t)s->pc;
7d1b0095 6824 tmp = tcg_temp_new_i32();
d9ba4830
PB
6825 tcg_gen_movi_i32(tmp, val);
6826 store_reg(s, 14, tmp);
9ee6e8bb
PB
6827 /* Sign-extend the 24-bit offset */
6828 offset = (((int32_t)insn) << 8) >> 8;
6829 /* offset * 4 + bit24 * 2 + (thumb bit) */
6830 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6831 /* pipeline offset */
6832 val += 4;
be5e7a76 6833 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6834 gen_bx_im(s, val);
9ee6e8bb
PB
6835 return;
6836 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6837 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6838 /* iWMMXt register transfer. */
6839 if (env->cp15.c15_cpar & (1 << 1))
6840 if (!disas_iwmmxt_insn(env, s, insn))
6841 return;
6842 }
6843 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6844 /* Coprocessor double register transfer. */
be5e7a76 6845 ARCH(5TE);
9ee6e8bb
PB
6846 } else if ((insn & 0x0f000010) == 0x0e000010) {
6847 /* Additional coprocessor register transfer. */
7997d92f 6848 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6849 uint32_t mask;
6850 uint32_t val;
6851 /* cps (privileged) */
6852 if (IS_USER(s))
6853 return;
6854 mask = val = 0;
6855 if (insn & (1 << 19)) {
6856 if (insn & (1 << 8))
6857 mask |= CPSR_A;
6858 if (insn & (1 << 7))
6859 mask |= CPSR_I;
6860 if (insn & (1 << 6))
6861 mask |= CPSR_F;
6862 if (insn & (1 << 18))
6863 val |= mask;
6864 }
7997d92f 6865 if (insn & (1 << 17)) {
9ee6e8bb
PB
6866 mask |= CPSR_M;
6867 val |= (insn & 0x1f);
6868 }
6869 if (mask) {
2fbac54b 6870 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6871 }
6872 return;
6873 }
6874 goto illegal_op;
6875 }
6876 if (cond != 0xe) {
6877 /* if not always execute, we generate a conditional jump to
6878 next instruction */
6879 s->condlabel = gen_new_label();
d9ba4830 6880 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6881 s->condjmp = 1;
6882 }
6883 if ((insn & 0x0f900000) == 0x03000000) {
6884 if ((insn & (1 << 21)) == 0) {
6885 ARCH(6T2);
6886 rd = (insn >> 12) & 0xf;
6887 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6888 if ((insn & (1 << 22)) == 0) {
6889 /* MOVW */
7d1b0095 6890 tmp = tcg_temp_new_i32();
5e3f878a 6891 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6892 } else {
6893 /* MOVT */
5e3f878a 6894 tmp = load_reg(s, rd);
86831435 6895 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6896 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6897 }
5e3f878a 6898 store_reg(s, rd, tmp);
9ee6e8bb
PB
6899 } else {
6900 if (((insn >> 12) & 0xf) != 0xf)
6901 goto illegal_op;
6902 if (((insn >> 16) & 0xf) == 0) {
6903 gen_nop_hint(s, insn & 0xff);
6904 } else {
6905 /* CPSR = immediate */
6906 val = insn & 0xff;
6907 shift = ((insn >> 8) & 0xf) * 2;
6908 if (shift)
6909 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6910 i = ((insn & (1 << 22)) != 0);
2fbac54b 6911 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6912 goto illegal_op;
6913 }
6914 }
6915 } else if ((insn & 0x0f900000) == 0x01000000
6916 && (insn & 0x00000090) != 0x00000090) {
6917 /* miscellaneous instructions */
6918 op1 = (insn >> 21) & 3;
6919 sh = (insn >> 4) & 0xf;
6920 rm = insn & 0xf;
6921 switch (sh) {
6922 case 0x0: /* move program status register */
6923 if (op1 & 1) {
6924 /* PSR = reg */
2fbac54b 6925 tmp = load_reg(s, rm);
9ee6e8bb 6926 i = ((op1 & 2) != 0);
2fbac54b 6927 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6928 goto illegal_op;
6929 } else {
6930 /* reg = PSR */
6931 rd = (insn >> 12) & 0xf;
6932 if (op1 & 2) {
6933 if (IS_USER(s))
6934 goto illegal_op;
d9ba4830 6935 tmp = load_cpu_field(spsr);
9ee6e8bb 6936 } else {
7d1b0095 6937 tmp = tcg_temp_new_i32();
9ef39277 6938 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6939 }
d9ba4830 6940 store_reg(s, rd, tmp);
9ee6e8bb
PB
6941 }
6942 break;
6943 case 0x1:
6944 if (op1 == 1) {
6945 /* branch/exchange thumb (bx). */
be5e7a76 6946 ARCH(4T);
d9ba4830
PB
6947 tmp = load_reg(s, rm);
6948 gen_bx(s, tmp);
9ee6e8bb
PB
6949 } else if (op1 == 3) {
6950 /* clz */
be5e7a76 6951 ARCH(5);
9ee6e8bb 6952 rd = (insn >> 12) & 0xf;
1497c961
PB
6953 tmp = load_reg(s, rm);
6954 gen_helper_clz(tmp, tmp);
6955 store_reg(s, rd, tmp);
9ee6e8bb
PB
6956 } else {
6957 goto illegal_op;
6958 }
6959 break;
6960 case 0x2:
6961 if (op1 == 1) {
6962 ARCH(5J); /* bxj */
6963 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6964 tmp = load_reg(s, rm);
6965 gen_bx(s, tmp);
9ee6e8bb
PB
6966 } else {
6967 goto illegal_op;
6968 }
6969 break;
6970 case 0x3:
6971 if (op1 != 1)
6972 goto illegal_op;
6973
be5e7a76 6974 ARCH(5);
9ee6e8bb 6975 /* branch link/exchange thumb (blx) */
d9ba4830 6976 tmp = load_reg(s, rm);
7d1b0095 6977 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6978 tcg_gen_movi_i32(tmp2, s->pc);
6979 store_reg(s, 14, tmp2);
6980 gen_bx(s, tmp);
9ee6e8bb
PB
6981 break;
6982 case 0x5: /* saturating add/subtract */
be5e7a76 6983 ARCH(5TE);
9ee6e8bb
PB
6984 rd = (insn >> 12) & 0xf;
6985 rn = (insn >> 16) & 0xf;
b40d0353 6986 tmp = load_reg(s, rm);
5e3f878a 6987 tmp2 = load_reg(s, rn);
9ee6e8bb 6988 if (op1 & 2)
9ef39277 6989 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6990 if (op1 & 1)
9ef39277 6991 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6992 else
9ef39277 6993 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6994 tcg_temp_free_i32(tmp2);
5e3f878a 6995 store_reg(s, rd, tmp);
9ee6e8bb 6996 break;
49e14940
AL
6997 case 7:
6998 /* SMC instruction (op1 == 3)
6999 and undefined instructions (op1 == 0 || op1 == 2)
7000 will trap */
7001 if (op1 != 1) {
7002 goto illegal_op;
7003 }
7004 /* bkpt */
be5e7a76 7005 ARCH(5);
bc4a0de0 7006 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7007 break;
7008 case 0x8: /* signed multiply */
7009 case 0xa:
7010 case 0xc:
7011 case 0xe:
be5e7a76 7012 ARCH(5TE);
9ee6e8bb
PB
7013 rs = (insn >> 8) & 0xf;
7014 rn = (insn >> 12) & 0xf;
7015 rd = (insn >> 16) & 0xf;
7016 if (op1 == 1) {
7017 /* (32 * 16) >> 16 */
5e3f878a
PB
7018 tmp = load_reg(s, rm);
7019 tmp2 = load_reg(s, rs);
9ee6e8bb 7020 if (sh & 4)
5e3f878a 7021 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7022 else
5e3f878a 7023 gen_sxth(tmp2);
a7812ae4
PB
7024 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7025 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7026 tmp = tcg_temp_new_i32();
a7812ae4 7027 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7028 tcg_temp_free_i64(tmp64);
9ee6e8bb 7029 if ((sh & 2) == 0) {
5e3f878a 7030 tmp2 = load_reg(s, rn);
9ef39277 7031 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7032 tcg_temp_free_i32(tmp2);
9ee6e8bb 7033 }
5e3f878a 7034 store_reg(s, rd, tmp);
9ee6e8bb
PB
7035 } else {
7036 /* 16 * 16 */
5e3f878a
PB
7037 tmp = load_reg(s, rm);
7038 tmp2 = load_reg(s, rs);
7039 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7040 tcg_temp_free_i32(tmp2);
9ee6e8bb 7041 if (op1 == 2) {
a7812ae4
PB
7042 tmp64 = tcg_temp_new_i64();
7043 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7044 tcg_temp_free_i32(tmp);
a7812ae4
PB
7045 gen_addq(s, tmp64, rn, rd);
7046 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7047 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7048 } else {
7049 if (op1 == 0) {
5e3f878a 7050 tmp2 = load_reg(s, rn);
9ef39277 7051 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7052 tcg_temp_free_i32(tmp2);
9ee6e8bb 7053 }
5e3f878a 7054 store_reg(s, rd, tmp);
9ee6e8bb
PB
7055 }
7056 }
7057 break;
7058 default:
7059 goto illegal_op;
7060 }
7061 } else if (((insn & 0x0e000000) == 0 &&
7062 (insn & 0x00000090) != 0x90) ||
7063 ((insn & 0x0e000000) == (1 << 25))) {
7064 int set_cc, logic_cc, shiftop;
7065
7066 op1 = (insn >> 21) & 0xf;
7067 set_cc = (insn >> 20) & 1;
7068 logic_cc = table_logic_cc[op1] & set_cc;
7069
7070 /* data processing instruction */
7071 if (insn & (1 << 25)) {
7072 /* immediate operand */
7073 val = insn & 0xff;
7074 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7075 if (shift) {
9ee6e8bb 7076 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7077 }
7d1b0095 7078 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7079 tcg_gen_movi_i32(tmp2, val);
7080 if (logic_cc && shift) {
7081 gen_set_CF_bit31(tmp2);
7082 }
9ee6e8bb
PB
7083 } else {
7084 /* register */
7085 rm = (insn) & 0xf;
e9bb4aa9 7086 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7087 shiftop = (insn >> 5) & 3;
7088 if (!(insn & (1 << 4))) {
7089 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7090 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7091 } else {
7092 rs = (insn >> 8) & 0xf;
8984bd2e 7093 tmp = load_reg(s, rs);
e9bb4aa9 7094 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7095 }
7096 }
7097 if (op1 != 0x0f && op1 != 0x0d) {
7098 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7099 tmp = load_reg(s, rn);
7100 } else {
39d5492a 7101 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7102 }
7103 rd = (insn >> 12) & 0xf;
7104 switch(op1) {
7105 case 0x00:
e9bb4aa9
JR
7106 tcg_gen_and_i32(tmp, tmp, tmp2);
7107 if (logic_cc) {
7108 gen_logic_CC(tmp);
7109 }
21aeb343 7110 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7111 break;
7112 case 0x01:
e9bb4aa9
JR
7113 tcg_gen_xor_i32(tmp, tmp, tmp2);
7114 if (logic_cc) {
7115 gen_logic_CC(tmp);
7116 }
21aeb343 7117 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7118 break;
7119 case 0x02:
7120 if (set_cc && rd == 15) {
7121 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7122 if (IS_USER(s)) {
9ee6e8bb 7123 goto illegal_op;
e9bb4aa9 7124 }
72485ec4 7125 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7126 gen_exception_return(s, tmp);
9ee6e8bb 7127 } else {
e9bb4aa9 7128 if (set_cc) {
72485ec4 7129 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7130 } else {
7131 tcg_gen_sub_i32(tmp, tmp, tmp2);
7132 }
21aeb343 7133 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7134 }
7135 break;
7136 case 0x03:
e9bb4aa9 7137 if (set_cc) {
72485ec4 7138 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7139 } else {
7140 tcg_gen_sub_i32(tmp, tmp2, tmp);
7141 }
21aeb343 7142 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7143 break;
7144 case 0x04:
e9bb4aa9 7145 if (set_cc) {
72485ec4 7146 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7147 } else {
7148 tcg_gen_add_i32(tmp, tmp, tmp2);
7149 }
21aeb343 7150 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7151 break;
7152 case 0x05:
e9bb4aa9 7153 if (set_cc) {
49b4c31e 7154 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7155 } else {
7156 gen_add_carry(tmp, tmp, tmp2);
7157 }
21aeb343 7158 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7159 break;
7160 case 0x06:
e9bb4aa9 7161 if (set_cc) {
2de68a49 7162 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7163 } else {
7164 gen_sub_carry(tmp, tmp, tmp2);
7165 }
21aeb343 7166 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7167 break;
7168 case 0x07:
e9bb4aa9 7169 if (set_cc) {
2de68a49 7170 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7171 } else {
7172 gen_sub_carry(tmp, tmp2, tmp);
7173 }
21aeb343 7174 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7175 break;
7176 case 0x08:
7177 if (set_cc) {
e9bb4aa9
JR
7178 tcg_gen_and_i32(tmp, tmp, tmp2);
7179 gen_logic_CC(tmp);
9ee6e8bb 7180 }
7d1b0095 7181 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7182 break;
7183 case 0x09:
7184 if (set_cc) {
e9bb4aa9
JR
7185 tcg_gen_xor_i32(tmp, tmp, tmp2);
7186 gen_logic_CC(tmp);
9ee6e8bb 7187 }
7d1b0095 7188 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7189 break;
7190 case 0x0a:
7191 if (set_cc) {
72485ec4 7192 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7193 }
7d1b0095 7194 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7195 break;
7196 case 0x0b:
7197 if (set_cc) {
72485ec4 7198 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7199 }
7d1b0095 7200 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7201 break;
7202 case 0x0c:
e9bb4aa9
JR
7203 tcg_gen_or_i32(tmp, tmp, tmp2);
7204 if (logic_cc) {
7205 gen_logic_CC(tmp);
7206 }
21aeb343 7207 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7208 break;
7209 case 0x0d:
7210 if (logic_cc && rd == 15) {
7211 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7212 if (IS_USER(s)) {
9ee6e8bb 7213 goto illegal_op;
e9bb4aa9
JR
7214 }
7215 gen_exception_return(s, tmp2);
9ee6e8bb 7216 } else {
e9bb4aa9
JR
7217 if (logic_cc) {
7218 gen_logic_CC(tmp2);
7219 }
21aeb343 7220 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7221 }
7222 break;
7223 case 0x0e:
f669df27 7224 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7225 if (logic_cc) {
7226 gen_logic_CC(tmp);
7227 }
21aeb343 7228 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7229 break;
7230 default:
7231 case 0x0f:
e9bb4aa9
JR
7232 tcg_gen_not_i32(tmp2, tmp2);
7233 if (logic_cc) {
7234 gen_logic_CC(tmp2);
7235 }
21aeb343 7236 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7237 break;
7238 }
e9bb4aa9 7239 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7240 tcg_temp_free_i32(tmp2);
e9bb4aa9 7241 }
9ee6e8bb
PB
7242 } else {
7243 /* other instructions */
7244 op1 = (insn >> 24) & 0xf;
7245 switch(op1) {
7246 case 0x0:
7247 case 0x1:
7248 /* multiplies, extra load/stores */
7249 sh = (insn >> 5) & 3;
7250 if (sh == 0) {
7251 if (op1 == 0x0) {
7252 rd = (insn >> 16) & 0xf;
7253 rn = (insn >> 12) & 0xf;
7254 rs = (insn >> 8) & 0xf;
7255 rm = (insn) & 0xf;
7256 op1 = (insn >> 20) & 0xf;
7257 switch (op1) {
7258 case 0: case 1: case 2: case 3: case 6:
7259 /* 32 bit mul */
5e3f878a
PB
7260 tmp = load_reg(s, rs);
7261 tmp2 = load_reg(s, rm);
7262 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7263 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7264 if (insn & (1 << 22)) {
7265 /* Subtract (mls) */
7266 ARCH(6T2);
5e3f878a
PB
7267 tmp2 = load_reg(s, rn);
7268 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7269 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7270 } else if (insn & (1 << 21)) {
7271 /* Add */
5e3f878a
PB
7272 tmp2 = load_reg(s, rn);
7273 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7274 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7275 }
7276 if (insn & (1 << 20))
5e3f878a
PB
7277 gen_logic_CC(tmp);
7278 store_reg(s, rd, tmp);
9ee6e8bb 7279 break;
8aac08b1
AJ
7280 case 4:
7281 /* 64 bit mul double accumulate (UMAAL) */
7282 ARCH(6);
7283 tmp = load_reg(s, rs);
7284 tmp2 = load_reg(s, rm);
7285 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7286 gen_addq_lo(s, tmp64, rn);
7287 gen_addq_lo(s, tmp64, rd);
7288 gen_storeq_reg(s, rn, rd, tmp64);
7289 tcg_temp_free_i64(tmp64);
7290 break;
7291 case 8: case 9: case 10: case 11:
7292 case 12: case 13: case 14: case 15:
7293 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7294 tmp = load_reg(s, rs);
7295 tmp2 = load_reg(s, rm);
8aac08b1 7296 if (insn & (1 << 22)) {
c9f10124 7297 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7298 } else {
c9f10124 7299 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7300 }
7301 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7302 TCGv_i32 al = load_reg(s, rn);
7303 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7304 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7305 tcg_temp_free_i32(al);
7306 tcg_temp_free_i32(ah);
9ee6e8bb 7307 }
8aac08b1 7308 if (insn & (1 << 20)) {
c9f10124 7309 gen_logicq_cc(tmp, tmp2);
8aac08b1 7310 }
c9f10124
RH
7311 store_reg(s, rn, tmp);
7312 store_reg(s, rd, tmp2);
9ee6e8bb 7313 break;
8aac08b1
AJ
7314 default:
7315 goto illegal_op;
9ee6e8bb
PB
7316 }
7317 } else {
7318 rn = (insn >> 16) & 0xf;
7319 rd = (insn >> 12) & 0xf;
7320 if (insn & (1 << 23)) {
7321 /* load/store exclusive */
86753403
PB
7322 op1 = (insn >> 21) & 0x3;
7323 if (op1)
a47f43d2 7324 ARCH(6K);
86753403
PB
7325 else
7326 ARCH(6);
3174f8e9 7327 addr = tcg_temp_local_new_i32();
98a46317 7328 load_reg_var(s, addr, rn);
9ee6e8bb 7329 if (insn & (1 << 20)) {
86753403
PB
7330 switch (op1) {
7331 case 0: /* ldrex */
426f5abc 7332 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7333 break;
7334 case 1: /* ldrexd */
426f5abc 7335 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7336 break;
7337 case 2: /* ldrexb */
426f5abc 7338 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7339 break;
7340 case 3: /* ldrexh */
426f5abc 7341 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7342 break;
7343 default:
7344 abort();
7345 }
9ee6e8bb
PB
7346 } else {
7347 rm = insn & 0xf;
86753403
PB
7348 switch (op1) {
7349 case 0: /* strex */
426f5abc 7350 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7351 break;
7352 case 1: /* strexd */
502e64fe 7353 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7354 break;
7355 case 2: /* strexb */
426f5abc 7356 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7357 break;
7358 case 3: /* strexh */
426f5abc 7359 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7360 break;
7361 default:
7362 abort();
7363 }
9ee6e8bb 7364 }
39d5492a 7365 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7366 } else {
7367 /* SWP instruction */
7368 rm = (insn) & 0xf;
7369
8984bd2e
PB
7370 /* ??? This is not really atomic. However we know
7371 we never have multiple CPUs running in parallel,
7372 so it is good enough. */
7373 addr = load_reg(s, rn);
7374 tmp = load_reg(s, rm);
5a839c0d 7375 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7376 if (insn & (1 << 22)) {
5a839c0d
PM
7377 tcg_gen_qemu_ld8u(tmp2, addr, IS_USER(s));
7378 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7379 } else {
5a839c0d
PM
7380 tcg_gen_qemu_ld32u(tmp2, addr, IS_USER(s));
7381 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7382 }
5a839c0d 7383 tcg_temp_free_i32(tmp);
7d1b0095 7384 tcg_temp_free_i32(addr);
8984bd2e 7385 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7386 }
7387 }
7388 } else {
7389 int address_offset;
7390 int load;
7391 /* Misc load/store */
7392 rn = (insn >> 16) & 0xf;
7393 rd = (insn >> 12) & 0xf;
b0109805 7394 addr = load_reg(s, rn);
9ee6e8bb 7395 if (insn & (1 << 24))
b0109805 7396 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7397 address_offset = 0;
7398 if (insn & (1 << 20)) {
7399 /* load */
5a839c0d 7400 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7401 switch(sh) {
7402 case 1:
5a839c0d 7403 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7404 break;
7405 case 2:
5a839c0d 7406 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7407 break;
7408 default:
7409 case 3:
5a839c0d 7410 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7411 break;
7412 }
7413 load = 1;
7414 } else if (sh & 2) {
be5e7a76 7415 ARCH(5TE);
9ee6e8bb
PB
7416 /* doubleword */
7417 if (sh & 1) {
7418 /* store */
b0109805 7419 tmp = load_reg(s, rd);
5a839c0d
PM
7420 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7421 tcg_temp_free_i32(tmp);
b0109805
PB
7422 tcg_gen_addi_i32(addr, addr, 4);
7423 tmp = load_reg(s, rd + 1);
5a839c0d
PM
7424 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7425 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7426 load = 0;
7427 } else {
7428 /* load */
5a839c0d
PM
7429 tmp = tcg_temp_new_i32();
7430 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7431 store_reg(s, rd, tmp);
7432 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d
PM
7433 tmp = tcg_temp_new_i32();
7434 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7435 rd++;
7436 load = 1;
7437 }
7438 address_offset = -4;
7439 } else {
7440 /* store */
b0109805 7441 tmp = load_reg(s, rd);
5a839c0d
PM
7442 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
7443 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7444 load = 0;
7445 }
7446 /* Perform base writeback before the loaded value to
7447 ensure correct behavior with overlapping index registers.
7448 ldrd with base writeback is is undefined if the
7449 destination and index registers overlap. */
7450 if (!(insn & (1 << 24))) {
b0109805
PB
7451 gen_add_datah_offset(s, insn, address_offset, addr);
7452 store_reg(s, rn, addr);
9ee6e8bb
PB
7453 } else if (insn & (1 << 21)) {
7454 if (address_offset)
b0109805
PB
7455 tcg_gen_addi_i32(addr, addr, address_offset);
7456 store_reg(s, rn, addr);
7457 } else {
7d1b0095 7458 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7459 }
7460 if (load) {
7461 /* Complete the load. */
b0109805 7462 store_reg(s, rd, tmp);
9ee6e8bb
PB
7463 }
7464 }
7465 break;
7466 case 0x4:
7467 case 0x5:
7468 goto do_ldst;
7469 case 0x6:
7470 case 0x7:
7471 if (insn & (1 << 4)) {
7472 ARCH(6);
7473 /* Armv6 Media instructions. */
7474 rm = insn & 0xf;
7475 rn = (insn >> 16) & 0xf;
2c0262af 7476 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7477 rs = (insn >> 8) & 0xf;
7478 switch ((insn >> 23) & 3) {
7479 case 0: /* Parallel add/subtract. */
7480 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7481 tmp = load_reg(s, rn);
7482 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7483 sh = (insn >> 5) & 7;
7484 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7485 goto illegal_op;
6ddbc6e4 7486 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7487 tcg_temp_free_i32(tmp2);
6ddbc6e4 7488 store_reg(s, rd, tmp);
9ee6e8bb
PB
7489 break;
7490 case 1:
7491 if ((insn & 0x00700020) == 0) {
6c95676b 7492 /* Halfword pack. */
3670669c
PB
7493 tmp = load_reg(s, rn);
7494 tmp2 = load_reg(s, rm);
9ee6e8bb 7495 shift = (insn >> 7) & 0x1f;
3670669c
PB
7496 if (insn & (1 << 6)) {
7497 /* pkhtb */
22478e79
AZ
7498 if (shift == 0)
7499 shift = 31;
7500 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7501 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7502 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7503 } else {
7504 /* pkhbt */
22478e79
AZ
7505 if (shift)
7506 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7507 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7508 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7509 }
7510 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7511 tcg_temp_free_i32(tmp2);
3670669c 7512 store_reg(s, rd, tmp);
9ee6e8bb
PB
7513 } else if ((insn & 0x00200020) == 0x00200000) {
7514 /* [us]sat */
6ddbc6e4 7515 tmp = load_reg(s, rm);
9ee6e8bb
PB
7516 shift = (insn >> 7) & 0x1f;
7517 if (insn & (1 << 6)) {
7518 if (shift == 0)
7519 shift = 31;
6ddbc6e4 7520 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7521 } else {
6ddbc6e4 7522 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7523 }
7524 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7525 tmp2 = tcg_const_i32(sh);
7526 if (insn & (1 << 22))
9ef39277 7527 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7528 else
9ef39277 7529 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7530 tcg_temp_free_i32(tmp2);
6ddbc6e4 7531 store_reg(s, rd, tmp);
9ee6e8bb
PB
7532 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7533 /* [us]sat16 */
6ddbc6e4 7534 tmp = load_reg(s, rm);
9ee6e8bb 7535 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7536 tmp2 = tcg_const_i32(sh);
7537 if (insn & (1 << 22))
9ef39277 7538 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7539 else
9ef39277 7540 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7541 tcg_temp_free_i32(tmp2);
6ddbc6e4 7542 store_reg(s, rd, tmp);
9ee6e8bb
PB
7543 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7544 /* Select bytes. */
6ddbc6e4
PB
7545 tmp = load_reg(s, rn);
7546 tmp2 = load_reg(s, rm);
7d1b0095 7547 tmp3 = tcg_temp_new_i32();
0ecb72a5 7548 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7549 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7550 tcg_temp_free_i32(tmp3);
7551 tcg_temp_free_i32(tmp2);
6ddbc6e4 7552 store_reg(s, rd, tmp);
9ee6e8bb 7553 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7554 tmp = load_reg(s, rm);
9ee6e8bb 7555 shift = (insn >> 10) & 3;
1301f322 7556 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7557 rotate, a shift is sufficient. */
7558 if (shift != 0)
f669df27 7559 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7560 op1 = (insn >> 20) & 7;
7561 switch (op1) {
5e3f878a
PB
7562 case 0: gen_sxtb16(tmp); break;
7563 case 2: gen_sxtb(tmp); break;
7564 case 3: gen_sxth(tmp); break;
7565 case 4: gen_uxtb16(tmp); break;
7566 case 6: gen_uxtb(tmp); break;
7567 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7568 default: goto illegal_op;
7569 }
7570 if (rn != 15) {
5e3f878a 7571 tmp2 = load_reg(s, rn);
9ee6e8bb 7572 if ((op1 & 3) == 0) {
5e3f878a 7573 gen_add16(tmp, tmp2);
9ee6e8bb 7574 } else {
5e3f878a 7575 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7576 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7577 }
7578 }
6c95676b 7579 store_reg(s, rd, tmp);
9ee6e8bb
PB
7580 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7581 /* rev */
b0109805 7582 tmp = load_reg(s, rm);
9ee6e8bb
PB
7583 if (insn & (1 << 22)) {
7584 if (insn & (1 << 7)) {
b0109805 7585 gen_revsh(tmp);
9ee6e8bb
PB
7586 } else {
7587 ARCH(6T2);
b0109805 7588 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7589 }
7590 } else {
7591 if (insn & (1 << 7))
b0109805 7592 gen_rev16(tmp);
9ee6e8bb 7593 else
66896cb8 7594 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7595 }
b0109805 7596 store_reg(s, rd, tmp);
9ee6e8bb
PB
7597 } else {
7598 goto illegal_op;
7599 }
7600 break;
7601 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7602 switch ((insn >> 20) & 0x7) {
7603 case 5:
7604 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7605 /* op2 not 00x or 11x : UNDEF */
7606 goto illegal_op;
7607 }
838fa72d
AJ
7608 /* Signed multiply most significant [accumulate].
7609 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7610 tmp = load_reg(s, rm);
7611 tmp2 = load_reg(s, rs);
a7812ae4 7612 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7613
955a7dd5 7614 if (rd != 15) {
838fa72d 7615 tmp = load_reg(s, rd);
9ee6e8bb 7616 if (insn & (1 << 6)) {
838fa72d 7617 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7618 } else {
838fa72d 7619 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7620 }
7621 }
838fa72d
AJ
7622 if (insn & (1 << 5)) {
7623 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7624 }
7625 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7626 tmp = tcg_temp_new_i32();
838fa72d
AJ
7627 tcg_gen_trunc_i64_i32(tmp, tmp64);
7628 tcg_temp_free_i64(tmp64);
955a7dd5 7629 store_reg(s, rn, tmp);
41e9564d
PM
7630 break;
7631 case 0:
7632 case 4:
7633 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7634 if (insn & (1 << 7)) {
7635 goto illegal_op;
7636 }
7637 tmp = load_reg(s, rm);
7638 tmp2 = load_reg(s, rs);
9ee6e8bb 7639 if (insn & (1 << 5))
5e3f878a
PB
7640 gen_swap_half(tmp2);
7641 gen_smul_dual(tmp, tmp2);
5e3f878a 7642 if (insn & (1 << 6)) {
e1d177b9 7643 /* This subtraction cannot overflow. */
5e3f878a
PB
7644 tcg_gen_sub_i32(tmp, tmp, tmp2);
7645 } else {
e1d177b9
PM
7646 /* This addition cannot overflow 32 bits;
7647 * however it may overflow considered as a signed
7648 * operation, in which case we must set the Q flag.
7649 */
9ef39277 7650 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7651 }
7d1b0095 7652 tcg_temp_free_i32(tmp2);
9ee6e8bb 7653 if (insn & (1 << 22)) {
5e3f878a 7654 /* smlald, smlsld */
a7812ae4
PB
7655 tmp64 = tcg_temp_new_i64();
7656 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7657 tcg_temp_free_i32(tmp);
a7812ae4
PB
7658 gen_addq(s, tmp64, rd, rn);
7659 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7660 tcg_temp_free_i64(tmp64);
9ee6e8bb 7661 } else {
5e3f878a 7662 /* smuad, smusd, smlad, smlsd */
22478e79 7663 if (rd != 15)
9ee6e8bb 7664 {
22478e79 7665 tmp2 = load_reg(s, rd);
9ef39277 7666 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7667 tcg_temp_free_i32(tmp2);
9ee6e8bb 7668 }
22478e79 7669 store_reg(s, rn, tmp);
9ee6e8bb 7670 }
41e9564d 7671 break;
b8b8ea05
PM
7672 case 1:
7673 case 3:
7674 /* SDIV, UDIV */
7675 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7676 goto illegal_op;
7677 }
7678 if (((insn >> 5) & 7) || (rd != 15)) {
7679 goto illegal_op;
7680 }
7681 tmp = load_reg(s, rm);
7682 tmp2 = load_reg(s, rs);
7683 if (insn & (1 << 21)) {
7684 gen_helper_udiv(tmp, tmp, tmp2);
7685 } else {
7686 gen_helper_sdiv(tmp, tmp, tmp2);
7687 }
7688 tcg_temp_free_i32(tmp2);
7689 store_reg(s, rn, tmp);
7690 break;
41e9564d
PM
7691 default:
7692 goto illegal_op;
9ee6e8bb
PB
7693 }
7694 break;
7695 case 3:
7696 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7697 switch (op1) {
7698 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7699 ARCH(6);
7700 tmp = load_reg(s, rm);
7701 tmp2 = load_reg(s, rs);
7702 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7703 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7704 if (rd != 15) {
7705 tmp2 = load_reg(s, rd);
6ddbc6e4 7706 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7707 tcg_temp_free_i32(tmp2);
9ee6e8bb 7708 }
ded9d295 7709 store_reg(s, rn, tmp);
9ee6e8bb
PB
7710 break;
7711 case 0x20: case 0x24: case 0x28: case 0x2c:
7712 /* Bitfield insert/clear. */
7713 ARCH(6T2);
7714 shift = (insn >> 7) & 0x1f;
7715 i = (insn >> 16) & 0x1f;
7716 i = i + 1 - shift;
7717 if (rm == 15) {
7d1b0095 7718 tmp = tcg_temp_new_i32();
5e3f878a 7719 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7720 } else {
5e3f878a 7721 tmp = load_reg(s, rm);
9ee6e8bb
PB
7722 }
7723 if (i != 32) {
5e3f878a 7724 tmp2 = load_reg(s, rd);
d593c48e 7725 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7726 tcg_temp_free_i32(tmp2);
9ee6e8bb 7727 }
5e3f878a 7728 store_reg(s, rd, tmp);
9ee6e8bb
PB
7729 break;
7730 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7731 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7732 ARCH(6T2);
5e3f878a 7733 tmp = load_reg(s, rm);
9ee6e8bb
PB
7734 shift = (insn >> 7) & 0x1f;
7735 i = ((insn >> 16) & 0x1f) + 1;
7736 if (shift + i > 32)
7737 goto illegal_op;
7738 if (i < 32) {
7739 if (op1 & 0x20) {
5e3f878a 7740 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7741 } else {
5e3f878a 7742 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7743 }
7744 }
5e3f878a 7745 store_reg(s, rd, tmp);
9ee6e8bb
PB
7746 break;
7747 default:
7748 goto illegal_op;
7749 }
7750 break;
7751 }
7752 break;
7753 }
7754 do_ldst:
7755 /* Check for undefined extension instructions
7756 * per the ARM Bible IE:
7757 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7758 */
7759 sh = (0xf << 20) | (0xf << 4);
7760 if (op1 == 0x7 && ((insn & sh) == sh))
7761 {
7762 goto illegal_op;
7763 }
7764 /* load/store byte/word */
7765 rn = (insn >> 16) & 0xf;
7766 rd = (insn >> 12) & 0xf;
b0109805 7767 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7768 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7769 if (insn & (1 << 24))
b0109805 7770 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7771 if (insn & (1 << 20)) {
7772 /* load */
5a839c0d 7773 tmp = tcg_temp_new_i32();
9ee6e8bb 7774 if (insn & (1 << 22)) {
5a839c0d 7775 tcg_gen_qemu_ld8u(tmp, tmp2, i);
9ee6e8bb 7776 } else {
5a839c0d 7777 tcg_gen_qemu_ld32u(tmp, tmp2, i);
9ee6e8bb 7778 }
9ee6e8bb
PB
7779 } else {
7780 /* store */
b0109805 7781 tmp = load_reg(s, rd);
5a839c0d
PM
7782 if (insn & (1 << 22)) {
7783 tcg_gen_qemu_st8(tmp, tmp2, i);
7784 } else {
7785 tcg_gen_qemu_st32(tmp, tmp2, i);
7786 }
7787 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7788 }
7789 if (!(insn & (1 << 24))) {
b0109805
PB
7790 gen_add_data_offset(s, insn, tmp2);
7791 store_reg(s, rn, tmp2);
7792 } else if (insn & (1 << 21)) {
7793 store_reg(s, rn, tmp2);
7794 } else {
7d1b0095 7795 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7796 }
7797 if (insn & (1 << 20)) {
7798 /* Complete the load. */
be5e7a76 7799 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7800 }
7801 break;
7802 case 0x08:
7803 case 0x09:
7804 {
7805 int j, n, user, loaded_base;
39d5492a 7806 TCGv_i32 loaded_var;
9ee6e8bb
PB
7807 /* load/store multiple words */
7808 /* XXX: store correct base if write back */
7809 user = 0;
7810 if (insn & (1 << 22)) {
7811 if (IS_USER(s))
7812 goto illegal_op; /* only usable in supervisor mode */
7813
7814 if ((insn & (1 << 15)) == 0)
7815 user = 1;
7816 }
7817 rn = (insn >> 16) & 0xf;
b0109805 7818 addr = load_reg(s, rn);
9ee6e8bb
PB
7819
7820 /* compute total size */
7821 loaded_base = 0;
39d5492a 7822 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7823 n = 0;
7824 for(i=0;i<16;i++) {
7825 if (insn & (1 << i))
7826 n++;
7827 }
7828 /* XXX: test invalid n == 0 case ? */
7829 if (insn & (1 << 23)) {
7830 if (insn & (1 << 24)) {
7831 /* pre increment */
b0109805 7832 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7833 } else {
7834 /* post increment */
7835 }
7836 } else {
7837 if (insn & (1 << 24)) {
7838 /* pre decrement */
b0109805 7839 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7840 } else {
7841 /* post decrement */
7842 if (n != 1)
b0109805 7843 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7844 }
7845 }
7846 j = 0;
7847 for(i=0;i<16;i++) {
7848 if (insn & (1 << i)) {
7849 if (insn & (1 << 20)) {
7850 /* load */
5a839c0d
PM
7851 tmp = tcg_temp_new_i32();
7852 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7853 if (user) {
b75263d6 7854 tmp2 = tcg_const_i32(i);
1ce94f81 7855 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7856 tcg_temp_free_i32(tmp2);
7d1b0095 7857 tcg_temp_free_i32(tmp);
9ee6e8bb 7858 } else if (i == rn) {
b0109805 7859 loaded_var = tmp;
9ee6e8bb
PB
7860 loaded_base = 1;
7861 } else {
be5e7a76 7862 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7863 }
7864 } else {
7865 /* store */
7866 if (i == 15) {
7867 /* special case: r15 = PC + 8 */
7868 val = (long)s->pc + 4;
7d1b0095 7869 tmp = tcg_temp_new_i32();
b0109805 7870 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7871 } else if (user) {
7d1b0095 7872 tmp = tcg_temp_new_i32();
b75263d6 7873 tmp2 = tcg_const_i32(i);
9ef39277 7874 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7875 tcg_temp_free_i32(tmp2);
9ee6e8bb 7876 } else {
b0109805 7877 tmp = load_reg(s, i);
9ee6e8bb 7878 }
5a839c0d
PM
7879 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
7880 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7881 }
7882 j++;
7883 /* no need to add after the last transfer */
7884 if (j != n)
b0109805 7885 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7886 }
7887 }
7888 if (insn & (1 << 21)) {
7889 /* write back */
7890 if (insn & (1 << 23)) {
7891 if (insn & (1 << 24)) {
7892 /* pre increment */
7893 } else {
7894 /* post increment */
b0109805 7895 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7896 }
7897 } else {
7898 if (insn & (1 << 24)) {
7899 /* pre decrement */
7900 if (n != 1)
b0109805 7901 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7902 } else {
7903 /* post decrement */
b0109805 7904 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7905 }
7906 }
b0109805
PB
7907 store_reg(s, rn, addr);
7908 } else {
7d1b0095 7909 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7910 }
7911 if (loaded_base) {
b0109805 7912 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7913 }
7914 if ((insn & (1 << 22)) && !user) {
7915 /* Restore CPSR from SPSR. */
d9ba4830
PB
7916 tmp = load_cpu_field(spsr);
7917 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7918 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7919 s->is_jmp = DISAS_UPDATE;
7920 }
7921 }
7922 break;
7923 case 0xa:
7924 case 0xb:
7925 {
7926 int32_t offset;
7927
7928 /* branch (and link) */
7929 val = (int32_t)s->pc;
7930 if (insn & (1 << 24)) {
7d1b0095 7931 tmp = tcg_temp_new_i32();
5e3f878a
PB
7932 tcg_gen_movi_i32(tmp, val);
7933 store_reg(s, 14, tmp);
9ee6e8bb
PB
7934 }
7935 offset = (((int32_t)insn << 8) >> 8);
7936 val += (offset << 2) + 4;
7937 gen_jmp(s, val);
7938 }
7939 break;
7940 case 0xc:
7941 case 0xd:
7942 case 0xe:
7943 /* Coprocessor. */
7944 if (disas_coproc_insn(env, s, insn))
7945 goto illegal_op;
7946 break;
7947 case 0xf:
7948 /* swi */
5e3f878a 7949 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7950 s->is_jmp = DISAS_SWI;
7951 break;
7952 default:
7953 illegal_op:
bc4a0de0 7954 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7955 break;
7956 }
7957 }
7958}
7959
7960/* Return true if this is a Thumb-2 logical op. */
7961static int
7962thumb2_logic_op(int op)
7963{
7964 return (op < 8);
7965}
7966
7967/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7968 then set condition code flags based on the result of the operation.
7969 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7970 to the high bit of T1.
7971 Returns zero if the opcode is valid. */
7972
7973static int
39d5492a
PM
7974gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
7975 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
7976{
7977 int logic_cc;
7978
7979 logic_cc = 0;
7980 switch (op) {
7981 case 0: /* and */
396e467c 7982 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7983 logic_cc = conds;
7984 break;
7985 case 1: /* bic */
f669df27 7986 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7987 logic_cc = conds;
7988 break;
7989 case 2: /* orr */
396e467c 7990 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7991 logic_cc = conds;
7992 break;
7993 case 3: /* orn */
29501f1b 7994 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7995 logic_cc = conds;
7996 break;
7997 case 4: /* eor */
396e467c 7998 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7999 logic_cc = conds;
8000 break;
8001 case 8: /* add */
8002 if (conds)
72485ec4 8003 gen_add_CC(t0, t0, t1);
9ee6e8bb 8004 else
396e467c 8005 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8006 break;
8007 case 10: /* adc */
8008 if (conds)
49b4c31e 8009 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8010 else
396e467c 8011 gen_adc(t0, t1);
9ee6e8bb
PB
8012 break;
8013 case 11: /* sbc */
2de68a49
RH
8014 if (conds) {
8015 gen_sbc_CC(t0, t0, t1);
8016 } else {
396e467c 8017 gen_sub_carry(t0, t0, t1);
2de68a49 8018 }
9ee6e8bb
PB
8019 break;
8020 case 13: /* sub */
8021 if (conds)
72485ec4 8022 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8023 else
396e467c 8024 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8025 break;
8026 case 14: /* rsb */
8027 if (conds)
72485ec4 8028 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8029 else
396e467c 8030 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8031 break;
8032 default: /* 5, 6, 7, 9, 12, 15. */
8033 return 1;
8034 }
8035 if (logic_cc) {
396e467c 8036 gen_logic_CC(t0);
9ee6e8bb 8037 if (shifter_out)
396e467c 8038 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8039 }
8040 return 0;
8041}
8042
8043/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8044 is not legal. */
0ecb72a5 8045static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8046{
b0109805 8047 uint32_t insn, imm, shift, offset;
9ee6e8bb 8048 uint32_t rd, rn, rm, rs;
39d5492a
PM
8049 TCGv_i32 tmp;
8050 TCGv_i32 tmp2;
8051 TCGv_i32 tmp3;
8052 TCGv_i32 addr;
a7812ae4 8053 TCGv_i64 tmp64;
9ee6e8bb
PB
8054 int op;
8055 int shiftop;
8056 int conds;
8057 int logic_cc;
8058
8059 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8060 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8061 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8062 16-bit instructions to get correct prefetch abort behavior. */
8063 insn = insn_hw1;
8064 if ((insn & (1 << 12)) == 0) {
be5e7a76 8065 ARCH(5);
9ee6e8bb
PB
8066 /* Second half of blx. */
8067 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8068 tmp = load_reg(s, 14);
8069 tcg_gen_addi_i32(tmp, tmp, offset);
8070 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8071
7d1b0095 8072 tmp2 = tcg_temp_new_i32();
b0109805 8073 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8074 store_reg(s, 14, tmp2);
8075 gen_bx(s, tmp);
9ee6e8bb
PB
8076 return 0;
8077 }
8078 if (insn & (1 << 11)) {
8079 /* Second half of bl. */
8080 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8081 tmp = load_reg(s, 14);
6a0d8a1d 8082 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8083
7d1b0095 8084 tmp2 = tcg_temp_new_i32();
b0109805 8085 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8086 store_reg(s, 14, tmp2);
8087 gen_bx(s, tmp);
9ee6e8bb
PB
8088 return 0;
8089 }
8090 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8091 /* Instruction spans a page boundary. Implement it as two
8092 16-bit instructions in case the second half causes an
8093 prefetch abort. */
8094 offset = ((int32_t)insn << 21) >> 9;
396e467c 8095 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8096 return 0;
8097 }
8098 /* Fall through to 32-bit decode. */
8099 }
8100
d31dd73e 8101 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8102 s->pc += 2;
8103 insn |= (uint32_t)insn_hw1 << 16;
8104
8105 if ((insn & 0xf800e800) != 0xf000e800) {
8106 ARCH(6T2);
8107 }
8108
8109 rn = (insn >> 16) & 0xf;
8110 rs = (insn >> 12) & 0xf;
8111 rd = (insn >> 8) & 0xf;
8112 rm = insn & 0xf;
8113 switch ((insn >> 25) & 0xf) {
8114 case 0: case 1: case 2: case 3:
8115 /* 16-bit instructions. Should never happen. */
8116 abort();
8117 case 4:
8118 if (insn & (1 << 22)) {
8119 /* Other load/store, table branch. */
8120 if (insn & 0x01200000) {
8121 /* Load/store doubleword. */
8122 if (rn == 15) {
7d1b0095 8123 addr = tcg_temp_new_i32();
b0109805 8124 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8125 } else {
b0109805 8126 addr = load_reg(s, rn);
9ee6e8bb
PB
8127 }
8128 offset = (insn & 0xff) * 4;
8129 if ((insn & (1 << 23)) == 0)
8130 offset = -offset;
8131 if (insn & (1 << 24)) {
b0109805 8132 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8133 offset = 0;
8134 }
8135 if (insn & (1 << 20)) {
8136 /* ldrd */
b0109805
PB
8137 tmp = gen_ld32(addr, IS_USER(s));
8138 store_reg(s, rs, tmp);
8139 tcg_gen_addi_i32(addr, addr, 4);
8140 tmp = gen_ld32(addr, IS_USER(s));
8141 store_reg(s, rd, tmp);
9ee6e8bb
PB
8142 } else {
8143 /* strd */
b0109805
PB
8144 tmp = load_reg(s, rs);
8145 gen_st32(tmp, addr, IS_USER(s));
8146 tcg_gen_addi_i32(addr, addr, 4);
8147 tmp = load_reg(s, rd);
8148 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8149 }
8150 if (insn & (1 << 21)) {
8151 /* Base writeback. */
8152 if (rn == 15)
8153 goto illegal_op;
b0109805
PB
8154 tcg_gen_addi_i32(addr, addr, offset - 4);
8155 store_reg(s, rn, addr);
8156 } else {
7d1b0095 8157 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8158 }
8159 } else if ((insn & (1 << 23)) == 0) {
8160 /* Load/store exclusive word. */
39d5492a 8161 addr = tcg_temp_local_new_i32();
98a46317 8162 load_reg_var(s, addr, rn);
426f5abc 8163 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8164 if (insn & (1 << 20)) {
426f5abc 8165 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8166 } else {
426f5abc 8167 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8168 }
39d5492a 8169 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8170 } else if ((insn & (1 << 6)) == 0) {
8171 /* Table Branch. */
8172 if (rn == 15) {
7d1b0095 8173 addr = tcg_temp_new_i32();
b0109805 8174 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8175 } else {
b0109805 8176 addr = load_reg(s, rn);
9ee6e8bb 8177 }
b26eefb6 8178 tmp = load_reg(s, rm);
b0109805 8179 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8180 if (insn & (1 << 4)) {
8181 /* tbh */
b0109805 8182 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8183 tcg_temp_free_i32(tmp);
b0109805 8184 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8185 } else { /* tbb */
7d1b0095 8186 tcg_temp_free_i32(tmp);
b0109805 8187 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8188 }
7d1b0095 8189 tcg_temp_free_i32(addr);
b0109805
PB
8190 tcg_gen_shli_i32(tmp, tmp, 1);
8191 tcg_gen_addi_i32(tmp, tmp, s->pc);
8192 store_reg(s, 15, tmp);
9ee6e8bb
PB
8193 } else {
8194 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8195 ARCH(7);
9ee6e8bb 8196 op = (insn >> 4) & 0x3;
426f5abc
PB
8197 if (op == 2) {
8198 goto illegal_op;
8199 }
39d5492a 8200 addr = tcg_temp_local_new_i32();
98a46317 8201 load_reg_var(s, addr, rn);
9ee6e8bb 8202 if (insn & (1 << 20)) {
426f5abc 8203 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8204 } else {
426f5abc 8205 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8206 }
39d5492a 8207 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8208 }
8209 } else {
8210 /* Load/store multiple, RFE, SRS. */
8211 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8212 /* RFE, SRS: not available in user mode or on M profile */
8213 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8214 goto illegal_op;
00115976 8215 }
9ee6e8bb
PB
8216 if (insn & (1 << 20)) {
8217 /* rfe */
b0109805
PB
8218 addr = load_reg(s, rn);
8219 if ((insn & (1 << 24)) == 0)
8220 tcg_gen_addi_i32(addr, addr, -8);
8221 /* Load PC into tmp and CPSR into tmp2. */
8222 tmp = gen_ld32(addr, 0);
8223 tcg_gen_addi_i32(addr, addr, 4);
8224 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8225 if (insn & (1 << 21)) {
8226 /* Base writeback. */
b0109805
PB
8227 if (insn & (1 << 24)) {
8228 tcg_gen_addi_i32(addr, addr, 4);
8229 } else {
8230 tcg_gen_addi_i32(addr, addr, -4);
8231 }
8232 store_reg(s, rn, addr);
8233 } else {
7d1b0095 8234 tcg_temp_free_i32(addr);
9ee6e8bb 8235 }
b0109805 8236 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8237 } else {
8238 /* srs */
81465888
PM
8239 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8240 insn & (1 << 21));
9ee6e8bb
PB
8241 }
8242 } else {
5856d44e 8243 int i, loaded_base = 0;
39d5492a 8244 TCGv_i32 loaded_var;
9ee6e8bb 8245 /* Load/store multiple. */
b0109805 8246 addr = load_reg(s, rn);
9ee6e8bb
PB
8247 offset = 0;
8248 for (i = 0; i < 16; i++) {
8249 if (insn & (1 << i))
8250 offset += 4;
8251 }
8252 if (insn & (1 << 24)) {
b0109805 8253 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8254 }
8255
39d5492a 8256 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8257 for (i = 0; i < 16; i++) {
8258 if ((insn & (1 << i)) == 0)
8259 continue;
8260 if (insn & (1 << 20)) {
8261 /* Load. */
b0109805 8262 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8263 if (i == 15) {
b0109805 8264 gen_bx(s, tmp);
5856d44e
YO
8265 } else if (i == rn) {
8266 loaded_var = tmp;
8267 loaded_base = 1;
9ee6e8bb 8268 } else {
b0109805 8269 store_reg(s, i, tmp);
9ee6e8bb
PB
8270 }
8271 } else {
8272 /* Store. */
b0109805
PB
8273 tmp = load_reg(s, i);
8274 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8275 }
b0109805 8276 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8277 }
5856d44e
YO
8278 if (loaded_base) {
8279 store_reg(s, rn, loaded_var);
8280 }
9ee6e8bb
PB
8281 if (insn & (1 << 21)) {
8282 /* Base register writeback. */
8283 if (insn & (1 << 24)) {
b0109805 8284 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8285 }
8286 /* Fault if writeback register is in register list. */
8287 if (insn & (1 << rn))
8288 goto illegal_op;
b0109805
PB
8289 store_reg(s, rn, addr);
8290 } else {
7d1b0095 8291 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8292 }
8293 }
8294 }
8295 break;
2af9ab77
JB
8296 case 5:
8297
9ee6e8bb 8298 op = (insn >> 21) & 0xf;
2af9ab77
JB
8299 if (op == 6) {
8300 /* Halfword pack. */
8301 tmp = load_reg(s, rn);
8302 tmp2 = load_reg(s, rm);
8303 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8304 if (insn & (1 << 5)) {
8305 /* pkhtb */
8306 if (shift == 0)
8307 shift = 31;
8308 tcg_gen_sari_i32(tmp2, tmp2, shift);
8309 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8310 tcg_gen_ext16u_i32(tmp2, tmp2);
8311 } else {
8312 /* pkhbt */
8313 if (shift)
8314 tcg_gen_shli_i32(tmp2, tmp2, shift);
8315 tcg_gen_ext16u_i32(tmp, tmp);
8316 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8317 }
8318 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8319 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8320 store_reg(s, rd, tmp);
8321 } else {
2af9ab77
JB
8322 /* Data processing register constant shift. */
8323 if (rn == 15) {
7d1b0095 8324 tmp = tcg_temp_new_i32();
2af9ab77
JB
8325 tcg_gen_movi_i32(tmp, 0);
8326 } else {
8327 tmp = load_reg(s, rn);
8328 }
8329 tmp2 = load_reg(s, rm);
8330
8331 shiftop = (insn >> 4) & 3;
8332 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8333 conds = (insn & (1 << 20)) != 0;
8334 logic_cc = (conds && thumb2_logic_op(op));
8335 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8336 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8337 goto illegal_op;
7d1b0095 8338 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8339 if (rd != 15) {
8340 store_reg(s, rd, tmp);
8341 } else {
7d1b0095 8342 tcg_temp_free_i32(tmp);
2af9ab77 8343 }
3174f8e9 8344 }
9ee6e8bb
PB
8345 break;
8346 case 13: /* Misc data processing. */
8347 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8348 if (op < 4 && (insn & 0xf000) != 0xf000)
8349 goto illegal_op;
8350 switch (op) {
8351 case 0: /* Register controlled shift. */
8984bd2e
PB
8352 tmp = load_reg(s, rn);
8353 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8354 if ((insn & 0x70) != 0)
8355 goto illegal_op;
8356 op = (insn >> 21) & 3;
8984bd2e
PB
8357 logic_cc = (insn & (1 << 20)) != 0;
8358 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8359 if (logic_cc)
8360 gen_logic_CC(tmp);
21aeb343 8361 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8362 break;
8363 case 1: /* Sign/zero extend. */
5e3f878a 8364 tmp = load_reg(s, rm);
9ee6e8bb 8365 shift = (insn >> 4) & 3;
1301f322 8366 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8367 rotate, a shift is sufficient. */
8368 if (shift != 0)
f669df27 8369 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8370 op = (insn >> 20) & 7;
8371 switch (op) {
5e3f878a
PB
8372 case 0: gen_sxth(tmp); break;
8373 case 1: gen_uxth(tmp); break;
8374 case 2: gen_sxtb16(tmp); break;
8375 case 3: gen_uxtb16(tmp); break;
8376 case 4: gen_sxtb(tmp); break;
8377 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8378 default: goto illegal_op;
8379 }
8380 if (rn != 15) {
5e3f878a 8381 tmp2 = load_reg(s, rn);
9ee6e8bb 8382 if ((op >> 1) == 1) {
5e3f878a 8383 gen_add16(tmp, tmp2);
9ee6e8bb 8384 } else {
5e3f878a 8385 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8386 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8387 }
8388 }
5e3f878a 8389 store_reg(s, rd, tmp);
9ee6e8bb
PB
8390 break;
8391 case 2: /* SIMD add/subtract. */
8392 op = (insn >> 20) & 7;
8393 shift = (insn >> 4) & 7;
8394 if ((op & 3) == 3 || (shift & 3) == 3)
8395 goto illegal_op;
6ddbc6e4
PB
8396 tmp = load_reg(s, rn);
8397 tmp2 = load_reg(s, rm);
8398 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8399 tcg_temp_free_i32(tmp2);
6ddbc6e4 8400 store_reg(s, rd, tmp);
9ee6e8bb
PB
8401 break;
8402 case 3: /* Other data processing. */
8403 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8404 if (op < 4) {
8405 /* Saturating add/subtract. */
d9ba4830
PB
8406 tmp = load_reg(s, rn);
8407 tmp2 = load_reg(s, rm);
9ee6e8bb 8408 if (op & 1)
9ef39277 8409 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8410 if (op & 2)
9ef39277 8411 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8412 else
9ef39277 8413 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8414 tcg_temp_free_i32(tmp2);
9ee6e8bb 8415 } else {
d9ba4830 8416 tmp = load_reg(s, rn);
9ee6e8bb
PB
8417 switch (op) {
8418 case 0x0a: /* rbit */
d9ba4830 8419 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8420 break;
8421 case 0x08: /* rev */
66896cb8 8422 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8423 break;
8424 case 0x09: /* rev16 */
d9ba4830 8425 gen_rev16(tmp);
9ee6e8bb
PB
8426 break;
8427 case 0x0b: /* revsh */
d9ba4830 8428 gen_revsh(tmp);
9ee6e8bb
PB
8429 break;
8430 case 0x10: /* sel */
d9ba4830 8431 tmp2 = load_reg(s, rm);
7d1b0095 8432 tmp3 = tcg_temp_new_i32();
0ecb72a5 8433 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8434 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8435 tcg_temp_free_i32(tmp3);
8436 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8437 break;
8438 case 0x18: /* clz */
d9ba4830 8439 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8440 break;
8441 default:
8442 goto illegal_op;
8443 }
8444 }
d9ba4830 8445 store_reg(s, rd, tmp);
9ee6e8bb
PB
8446 break;
8447 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8448 op = (insn >> 4) & 0xf;
d9ba4830
PB
8449 tmp = load_reg(s, rn);
8450 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8451 switch ((insn >> 20) & 7) {
8452 case 0: /* 32 x 32 -> 32 */
d9ba4830 8453 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8454 tcg_temp_free_i32(tmp2);
9ee6e8bb 8455 if (rs != 15) {
d9ba4830 8456 tmp2 = load_reg(s, rs);
9ee6e8bb 8457 if (op)
d9ba4830 8458 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8459 else
d9ba4830 8460 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8461 tcg_temp_free_i32(tmp2);
9ee6e8bb 8462 }
9ee6e8bb
PB
8463 break;
8464 case 1: /* 16 x 16 -> 32 */
d9ba4830 8465 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8466 tcg_temp_free_i32(tmp2);
9ee6e8bb 8467 if (rs != 15) {
d9ba4830 8468 tmp2 = load_reg(s, rs);
9ef39277 8469 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8470 tcg_temp_free_i32(tmp2);
9ee6e8bb 8471 }
9ee6e8bb
PB
8472 break;
8473 case 2: /* Dual multiply add. */
8474 case 4: /* Dual multiply subtract. */
8475 if (op)
d9ba4830
PB
8476 gen_swap_half(tmp2);
8477 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8478 if (insn & (1 << 22)) {
e1d177b9 8479 /* This subtraction cannot overflow. */
d9ba4830 8480 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8481 } else {
e1d177b9
PM
8482 /* This addition cannot overflow 32 bits;
8483 * however it may overflow considered as a signed
8484 * operation, in which case we must set the Q flag.
8485 */
9ef39277 8486 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8487 }
7d1b0095 8488 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8489 if (rs != 15)
8490 {
d9ba4830 8491 tmp2 = load_reg(s, rs);
9ef39277 8492 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8493 tcg_temp_free_i32(tmp2);
9ee6e8bb 8494 }
9ee6e8bb
PB
8495 break;
8496 case 3: /* 32 * 16 -> 32msb */
8497 if (op)
d9ba4830 8498 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8499 else
d9ba4830 8500 gen_sxth(tmp2);
a7812ae4
PB
8501 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8502 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8503 tmp = tcg_temp_new_i32();
a7812ae4 8504 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8505 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8506 if (rs != 15)
8507 {
d9ba4830 8508 tmp2 = load_reg(s, rs);
9ef39277 8509 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8510 tcg_temp_free_i32(tmp2);
9ee6e8bb 8511 }
9ee6e8bb 8512 break;
838fa72d
AJ
8513 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8514 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8515 if (rs != 15) {
838fa72d
AJ
8516 tmp = load_reg(s, rs);
8517 if (insn & (1 << 20)) {
8518 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8519 } else {
838fa72d 8520 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8521 }
2c0262af 8522 }
838fa72d
AJ
8523 if (insn & (1 << 4)) {
8524 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8525 }
8526 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8527 tmp = tcg_temp_new_i32();
838fa72d
AJ
8528 tcg_gen_trunc_i64_i32(tmp, tmp64);
8529 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8530 break;
8531 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8532 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8533 tcg_temp_free_i32(tmp2);
9ee6e8bb 8534 if (rs != 15) {
d9ba4830
PB
8535 tmp2 = load_reg(s, rs);
8536 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8537 tcg_temp_free_i32(tmp2);
5fd46862 8538 }
9ee6e8bb 8539 break;
2c0262af 8540 }
d9ba4830 8541 store_reg(s, rd, tmp);
2c0262af 8542 break;
9ee6e8bb
PB
8543 case 6: case 7: /* 64-bit multiply, Divide. */
8544 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8545 tmp = load_reg(s, rn);
8546 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8547 if ((op & 0x50) == 0x10) {
8548 /* sdiv, udiv */
47789990 8549 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8550 goto illegal_op;
47789990 8551 }
9ee6e8bb 8552 if (op & 0x20)
5e3f878a 8553 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8554 else
5e3f878a 8555 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8556 tcg_temp_free_i32(tmp2);
5e3f878a 8557 store_reg(s, rd, tmp);
9ee6e8bb
PB
8558 } else if ((op & 0xe) == 0xc) {
8559 /* Dual multiply accumulate long. */
8560 if (op & 1)
5e3f878a
PB
8561 gen_swap_half(tmp2);
8562 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8563 if (op & 0x10) {
5e3f878a 8564 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8565 } else {
5e3f878a 8566 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8567 }
7d1b0095 8568 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8569 /* BUGFIX */
8570 tmp64 = tcg_temp_new_i64();
8571 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8572 tcg_temp_free_i32(tmp);
a7812ae4
PB
8573 gen_addq(s, tmp64, rs, rd);
8574 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8575 tcg_temp_free_i64(tmp64);
2c0262af 8576 } else {
9ee6e8bb
PB
8577 if (op & 0x20) {
8578 /* Unsigned 64-bit multiply */
a7812ae4 8579 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8580 } else {
9ee6e8bb
PB
8581 if (op & 8) {
8582 /* smlalxy */
5e3f878a 8583 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8584 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8585 tmp64 = tcg_temp_new_i64();
8586 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8587 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8588 } else {
8589 /* Signed 64-bit multiply */
a7812ae4 8590 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8591 }
b5ff1b31 8592 }
9ee6e8bb
PB
8593 if (op & 4) {
8594 /* umaal */
a7812ae4
PB
8595 gen_addq_lo(s, tmp64, rs);
8596 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8597 } else if (op & 0x40) {
8598 /* 64-bit accumulate. */
a7812ae4 8599 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8600 }
a7812ae4 8601 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8602 tcg_temp_free_i64(tmp64);
5fd46862 8603 }
2c0262af 8604 break;
9ee6e8bb
PB
8605 }
8606 break;
8607 case 6: case 7: case 14: case 15:
8608 /* Coprocessor. */
8609 if (((insn >> 24) & 3) == 3) {
8610 /* Translate into the equivalent ARM encoding. */
f06053e3 8611 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8612 if (disas_neon_data_insn(env, s, insn))
8613 goto illegal_op;
8614 } else {
8615 if (insn & (1 << 28))
8616 goto illegal_op;
8617 if (disas_coproc_insn (env, s, insn))
8618 goto illegal_op;
8619 }
8620 break;
8621 case 8: case 9: case 10: case 11:
8622 if (insn & (1 << 15)) {
8623 /* Branches, misc control. */
8624 if (insn & 0x5000) {
8625 /* Unconditional branch. */
8626 /* signextend(hw1[10:0]) -> offset[:12]. */
8627 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8628 /* hw1[10:0] -> offset[11:1]. */
8629 offset |= (insn & 0x7ff) << 1;
8630 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8631 offset[24:22] already have the same value because of the
8632 sign extension above. */
8633 offset ^= ((~insn) & (1 << 13)) << 10;
8634 offset ^= ((~insn) & (1 << 11)) << 11;
8635
9ee6e8bb
PB
8636 if (insn & (1 << 14)) {
8637 /* Branch and link. */
3174f8e9 8638 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8639 }
3b46e624 8640
b0109805 8641 offset += s->pc;
9ee6e8bb
PB
8642 if (insn & (1 << 12)) {
8643 /* b/bl */
b0109805 8644 gen_jmp(s, offset);
9ee6e8bb
PB
8645 } else {
8646 /* blx */
b0109805 8647 offset &= ~(uint32_t)2;
be5e7a76 8648 /* thumb2 bx, no need to check */
b0109805 8649 gen_bx_im(s, offset);
2c0262af 8650 }
9ee6e8bb
PB
8651 } else if (((insn >> 23) & 7) == 7) {
8652 /* Misc control */
8653 if (insn & (1 << 13))
8654 goto illegal_op;
8655
8656 if (insn & (1 << 26)) {
8657 /* Secure monitor call (v6Z) */
8658 goto illegal_op; /* not implemented. */
2c0262af 8659 } else {
9ee6e8bb
PB
8660 op = (insn >> 20) & 7;
8661 switch (op) {
8662 case 0: /* msr cpsr. */
8663 if (IS_M(env)) {
8984bd2e
PB
8664 tmp = load_reg(s, rn);
8665 addr = tcg_const_i32(insn & 0xff);
8666 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8667 tcg_temp_free_i32(addr);
7d1b0095 8668 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8669 gen_lookup_tb(s);
8670 break;
8671 }
8672 /* fall through */
8673 case 1: /* msr spsr. */
8674 if (IS_M(env))
8675 goto illegal_op;
2fbac54b
FN
8676 tmp = load_reg(s, rn);
8677 if (gen_set_psr(s,
9ee6e8bb 8678 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8679 op == 1, tmp))
9ee6e8bb
PB
8680 goto illegal_op;
8681 break;
8682 case 2: /* cps, nop-hint. */
8683 if (((insn >> 8) & 7) == 0) {
8684 gen_nop_hint(s, insn & 0xff);
8685 }
8686 /* Implemented as NOP in user mode. */
8687 if (IS_USER(s))
8688 break;
8689 offset = 0;
8690 imm = 0;
8691 if (insn & (1 << 10)) {
8692 if (insn & (1 << 7))
8693 offset |= CPSR_A;
8694 if (insn & (1 << 6))
8695 offset |= CPSR_I;
8696 if (insn & (1 << 5))
8697 offset |= CPSR_F;
8698 if (insn & (1 << 9))
8699 imm = CPSR_A | CPSR_I | CPSR_F;
8700 }
8701 if (insn & (1 << 8)) {
8702 offset |= 0x1f;
8703 imm |= (insn & 0x1f);
8704 }
8705 if (offset) {
2fbac54b 8706 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8707 }
8708 break;
8709 case 3: /* Special control operations. */
426f5abc 8710 ARCH(7);
9ee6e8bb
PB
8711 op = (insn >> 4) & 0xf;
8712 switch (op) {
8713 case 2: /* clrex */
426f5abc 8714 gen_clrex(s);
9ee6e8bb
PB
8715 break;
8716 case 4: /* dsb */
8717 case 5: /* dmb */
8718 case 6: /* isb */
8719 /* These execute as NOPs. */
9ee6e8bb
PB
8720 break;
8721 default:
8722 goto illegal_op;
8723 }
8724 break;
8725 case 4: /* bxj */
8726 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8727 tmp = load_reg(s, rn);
8728 gen_bx(s, tmp);
9ee6e8bb
PB
8729 break;
8730 case 5: /* Exception return. */
b8b45b68
RV
8731 if (IS_USER(s)) {
8732 goto illegal_op;
8733 }
8734 if (rn != 14 || rd != 15) {
8735 goto illegal_op;
8736 }
8737 tmp = load_reg(s, rn);
8738 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8739 gen_exception_return(s, tmp);
8740 break;
9ee6e8bb 8741 case 6: /* mrs cpsr. */
7d1b0095 8742 tmp = tcg_temp_new_i32();
9ee6e8bb 8743 if (IS_M(env)) {
8984bd2e
PB
8744 addr = tcg_const_i32(insn & 0xff);
8745 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8746 tcg_temp_free_i32(addr);
9ee6e8bb 8747 } else {
9ef39277 8748 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8749 }
8984bd2e 8750 store_reg(s, rd, tmp);
9ee6e8bb
PB
8751 break;
8752 case 7: /* mrs spsr. */
8753 /* Not accessible in user mode. */
8754 if (IS_USER(s) || IS_M(env))
8755 goto illegal_op;
d9ba4830
PB
8756 tmp = load_cpu_field(spsr);
8757 store_reg(s, rd, tmp);
9ee6e8bb 8758 break;
2c0262af
FB
8759 }
8760 }
9ee6e8bb
PB
8761 } else {
8762 /* Conditional branch. */
8763 op = (insn >> 22) & 0xf;
8764 /* Generate a conditional jump to next instruction. */
8765 s->condlabel = gen_new_label();
d9ba4830 8766 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8767 s->condjmp = 1;
8768
8769 /* offset[11:1] = insn[10:0] */
8770 offset = (insn & 0x7ff) << 1;
8771 /* offset[17:12] = insn[21:16]. */
8772 offset |= (insn & 0x003f0000) >> 4;
8773 /* offset[31:20] = insn[26]. */
8774 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8775 /* offset[18] = insn[13]. */
8776 offset |= (insn & (1 << 13)) << 5;
8777 /* offset[19] = insn[11]. */
8778 offset |= (insn & (1 << 11)) << 8;
8779
8780 /* jump to the offset */
b0109805 8781 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8782 }
8783 } else {
8784 /* Data processing immediate. */
8785 if (insn & (1 << 25)) {
8786 if (insn & (1 << 24)) {
8787 if (insn & (1 << 20))
8788 goto illegal_op;
8789 /* Bitfield/Saturate. */
8790 op = (insn >> 21) & 7;
8791 imm = insn & 0x1f;
8792 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8793 if (rn == 15) {
7d1b0095 8794 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8795 tcg_gen_movi_i32(tmp, 0);
8796 } else {
8797 tmp = load_reg(s, rn);
8798 }
9ee6e8bb
PB
8799 switch (op) {
8800 case 2: /* Signed bitfield extract. */
8801 imm++;
8802 if (shift + imm > 32)
8803 goto illegal_op;
8804 if (imm < 32)
6ddbc6e4 8805 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8806 break;
8807 case 6: /* Unsigned bitfield extract. */
8808 imm++;
8809 if (shift + imm > 32)
8810 goto illegal_op;
8811 if (imm < 32)
6ddbc6e4 8812 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8813 break;
8814 case 3: /* Bitfield insert/clear. */
8815 if (imm < shift)
8816 goto illegal_op;
8817 imm = imm + 1 - shift;
8818 if (imm != 32) {
6ddbc6e4 8819 tmp2 = load_reg(s, rd);
d593c48e 8820 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8821 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8822 }
8823 break;
8824 case 7:
8825 goto illegal_op;
8826 default: /* Saturate. */
9ee6e8bb
PB
8827 if (shift) {
8828 if (op & 1)
6ddbc6e4 8829 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8830 else
6ddbc6e4 8831 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8832 }
6ddbc6e4 8833 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8834 if (op & 4) {
8835 /* Unsigned. */
9ee6e8bb 8836 if ((op & 1) && shift == 0)
9ef39277 8837 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8838 else
9ef39277 8839 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8840 } else {
9ee6e8bb 8841 /* Signed. */
9ee6e8bb 8842 if ((op & 1) && shift == 0)
9ef39277 8843 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8844 else
9ef39277 8845 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8846 }
b75263d6 8847 tcg_temp_free_i32(tmp2);
9ee6e8bb 8848 break;
2c0262af 8849 }
6ddbc6e4 8850 store_reg(s, rd, tmp);
9ee6e8bb
PB
8851 } else {
8852 imm = ((insn & 0x04000000) >> 15)
8853 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8854 if (insn & (1 << 22)) {
8855 /* 16-bit immediate. */
8856 imm |= (insn >> 4) & 0xf000;
8857 if (insn & (1 << 23)) {
8858 /* movt */
5e3f878a 8859 tmp = load_reg(s, rd);
86831435 8860 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8861 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8862 } else {
9ee6e8bb 8863 /* movw */
7d1b0095 8864 tmp = tcg_temp_new_i32();
5e3f878a 8865 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8866 }
8867 } else {
9ee6e8bb
PB
8868 /* Add/sub 12-bit immediate. */
8869 if (rn == 15) {
b0109805 8870 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8871 if (insn & (1 << 23))
b0109805 8872 offset -= imm;
9ee6e8bb 8873 else
b0109805 8874 offset += imm;
7d1b0095 8875 tmp = tcg_temp_new_i32();
5e3f878a 8876 tcg_gen_movi_i32(tmp, offset);
2c0262af 8877 } else {
5e3f878a 8878 tmp = load_reg(s, rn);
9ee6e8bb 8879 if (insn & (1 << 23))
5e3f878a 8880 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8881 else
5e3f878a 8882 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8883 }
9ee6e8bb 8884 }
5e3f878a 8885 store_reg(s, rd, tmp);
191abaa2 8886 }
9ee6e8bb
PB
8887 } else {
8888 int shifter_out = 0;
8889 /* modified 12-bit immediate. */
8890 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8891 imm = (insn & 0xff);
8892 switch (shift) {
8893 case 0: /* XY */
8894 /* Nothing to do. */
8895 break;
8896 case 1: /* 00XY00XY */
8897 imm |= imm << 16;
8898 break;
8899 case 2: /* XY00XY00 */
8900 imm |= imm << 16;
8901 imm <<= 8;
8902 break;
8903 case 3: /* XYXYXYXY */
8904 imm |= imm << 16;
8905 imm |= imm << 8;
8906 break;
8907 default: /* Rotated constant. */
8908 shift = (shift << 1) | (imm >> 7);
8909 imm |= 0x80;
8910 imm = imm << (32 - shift);
8911 shifter_out = 1;
8912 break;
b5ff1b31 8913 }
7d1b0095 8914 tmp2 = tcg_temp_new_i32();
3174f8e9 8915 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8916 rn = (insn >> 16) & 0xf;
3174f8e9 8917 if (rn == 15) {
7d1b0095 8918 tmp = tcg_temp_new_i32();
3174f8e9
FN
8919 tcg_gen_movi_i32(tmp, 0);
8920 } else {
8921 tmp = load_reg(s, rn);
8922 }
9ee6e8bb
PB
8923 op = (insn >> 21) & 0xf;
8924 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8925 shifter_out, tmp, tmp2))
9ee6e8bb 8926 goto illegal_op;
7d1b0095 8927 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8928 rd = (insn >> 8) & 0xf;
8929 if (rd != 15) {
3174f8e9
FN
8930 store_reg(s, rd, tmp);
8931 } else {
7d1b0095 8932 tcg_temp_free_i32(tmp);
2c0262af 8933 }
2c0262af 8934 }
9ee6e8bb
PB
8935 }
8936 break;
8937 case 12: /* Load/store single data item. */
8938 {
8939 int postinc = 0;
8940 int writeback = 0;
b0109805 8941 int user;
9ee6e8bb
PB
8942 if ((insn & 0x01100000) == 0x01000000) {
8943 if (disas_neon_ls_insn(env, s, insn))
c1713132 8944 goto illegal_op;
9ee6e8bb
PB
8945 break;
8946 }
a2fdc890
PM
8947 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8948 if (rs == 15) {
8949 if (!(insn & (1 << 20))) {
8950 goto illegal_op;
8951 }
8952 if (op != 2) {
8953 /* Byte or halfword load space with dest == r15 : memory hints.
8954 * Catch them early so we don't emit pointless addressing code.
8955 * This space is a mix of:
8956 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8957 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8958 * cores)
8959 * unallocated hints, which must be treated as NOPs
8960 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8961 * which is easiest for the decoding logic
8962 * Some space which must UNDEF
8963 */
8964 int op1 = (insn >> 23) & 3;
8965 int op2 = (insn >> 6) & 0x3f;
8966 if (op & 2) {
8967 goto illegal_op;
8968 }
8969 if (rn == 15) {
02afbf64
PM
8970 /* UNPREDICTABLE, unallocated hint or
8971 * PLD/PLDW/PLI (literal)
8972 */
a2fdc890
PM
8973 return 0;
8974 }
8975 if (op1 & 1) {
02afbf64 8976 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8977 }
8978 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8979 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8980 }
8981 /* UNDEF space, or an UNPREDICTABLE */
8982 return 1;
8983 }
8984 }
b0109805 8985 user = IS_USER(s);
9ee6e8bb 8986 if (rn == 15) {
7d1b0095 8987 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8988 /* PC relative. */
8989 /* s->pc has already been incremented by 4. */
8990 imm = s->pc & 0xfffffffc;
8991 if (insn & (1 << 23))
8992 imm += insn & 0xfff;
8993 else
8994 imm -= insn & 0xfff;
b0109805 8995 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8996 } else {
b0109805 8997 addr = load_reg(s, rn);
9ee6e8bb
PB
8998 if (insn & (1 << 23)) {
8999 /* Positive offset. */
9000 imm = insn & 0xfff;
b0109805 9001 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9002 } else {
9ee6e8bb 9003 imm = insn & 0xff;
2a0308c5
PM
9004 switch ((insn >> 8) & 0xf) {
9005 case 0x0: /* Shifted Register. */
9ee6e8bb 9006 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9007 if (shift > 3) {
9008 tcg_temp_free_i32(addr);
18c9b560 9009 goto illegal_op;
2a0308c5 9010 }
b26eefb6 9011 tmp = load_reg(s, rm);
9ee6e8bb 9012 if (shift)
b26eefb6 9013 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9014 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9015 tcg_temp_free_i32(tmp);
9ee6e8bb 9016 break;
2a0308c5 9017 case 0xc: /* Negative offset. */
b0109805 9018 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9019 break;
2a0308c5 9020 case 0xe: /* User privilege. */
b0109805
PB
9021 tcg_gen_addi_i32(addr, addr, imm);
9022 user = 1;
9ee6e8bb 9023 break;
2a0308c5 9024 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9025 imm = -imm;
9026 /* Fall through. */
2a0308c5 9027 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9028 postinc = 1;
9029 writeback = 1;
9030 break;
2a0308c5 9031 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9032 imm = -imm;
9033 /* Fall through. */
2a0308c5 9034 case 0xf: /* Pre-increment. */
b0109805 9035 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9036 writeback = 1;
9037 break;
9038 default:
2a0308c5 9039 tcg_temp_free_i32(addr);
b7bcbe95 9040 goto illegal_op;
9ee6e8bb
PB
9041 }
9042 }
9043 }
9ee6e8bb
PB
9044 if (insn & (1 << 20)) {
9045 /* Load. */
5a839c0d 9046 tmp = tcg_temp_new_i32();
a2fdc890 9047 switch (op) {
5a839c0d
PM
9048 case 0:
9049 tcg_gen_qemu_ld8u(tmp, addr, user);
9050 break;
9051 case 4:
9052 tcg_gen_qemu_ld8s(tmp, addr, user);
9053 break;
9054 case 1:
9055 tcg_gen_qemu_ld16u(tmp, addr, user);
9056 break;
9057 case 5:
9058 tcg_gen_qemu_ld16s(tmp, addr, user);
9059 break;
9060 case 2:
9061 tcg_gen_qemu_ld32u(tmp, addr, user);
9062 break;
2a0308c5 9063 default:
5a839c0d 9064 tcg_temp_free_i32(tmp);
2a0308c5
PM
9065 tcg_temp_free_i32(addr);
9066 goto illegal_op;
a2fdc890
PM
9067 }
9068 if (rs == 15) {
9069 gen_bx(s, tmp);
9ee6e8bb 9070 } else {
a2fdc890 9071 store_reg(s, rs, tmp);
9ee6e8bb
PB
9072 }
9073 } else {
9074 /* Store. */
b0109805 9075 tmp = load_reg(s, rs);
9ee6e8bb 9076 switch (op) {
5a839c0d
PM
9077 case 0:
9078 tcg_gen_qemu_st8(tmp, addr, user);
9079 break;
9080 case 1:
9081 tcg_gen_qemu_st16(tmp, addr, user);
9082 break;
9083 case 2:
9084 tcg_gen_qemu_st32(tmp, addr, user);
9085 break;
2a0308c5 9086 default:
5a839c0d 9087 tcg_temp_free_i32(tmp);
2a0308c5
PM
9088 tcg_temp_free_i32(addr);
9089 goto illegal_op;
b7bcbe95 9090 }
5a839c0d 9091 tcg_temp_free_i32(tmp);
2c0262af 9092 }
9ee6e8bb 9093 if (postinc)
b0109805
PB
9094 tcg_gen_addi_i32(addr, addr, imm);
9095 if (writeback) {
9096 store_reg(s, rn, addr);
9097 } else {
7d1b0095 9098 tcg_temp_free_i32(addr);
b0109805 9099 }
9ee6e8bb
PB
9100 }
9101 break;
9102 default:
9103 goto illegal_op;
2c0262af 9104 }
9ee6e8bb
PB
9105 return 0;
9106illegal_op:
9107 return 1;
2c0262af
FB
9108}
9109
0ecb72a5 9110static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9111{
9112 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9113 int32_t offset;
9114 int i;
39d5492a
PM
9115 TCGv_i32 tmp;
9116 TCGv_i32 tmp2;
9117 TCGv_i32 addr;
99c475ab 9118
9ee6e8bb
PB
9119 if (s->condexec_mask) {
9120 cond = s->condexec_cond;
bedd2912
JB
9121 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9122 s->condlabel = gen_new_label();
9123 gen_test_cc(cond ^ 1, s->condlabel);
9124 s->condjmp = 1;
9125 }
9ee6e8bb
PB
9126 }
9127
d31dd73e 9128 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9129 s->pc += 2;
b5ff1b31 9130
99c475ab
FB
9131 switch (insn >> 12) {
9132 case 0: case 1:
396e467c 9133
99c475ab
FB
9134 rd = insn & 7;
9135 op = (insn >> 11) & 3;
9136 if (op == 3) {
9137 /* add/subtract */
9138 rn = (insn >> 3) & 7;
396e467c 9139 tmp = load_reg(s, rn);
99c475ab
FB
9140 if (insn & (1 << 10)) {
9141 /* immediate */
7d1b0095 9142 tmp2 = tcg_temp_new_i32();
396e467c 9143 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9144 } else {
9145 /* reg */
9146 rm = (insn >> 6) & 7;
396e467c 9147 tmp2 = load_reg(s, rm);
99c475ab 9148 }
9ee6e8bb
PB
9149 if (insn & (1 << 9)) {
9150 if (s->condexec_mask)
396e467c 9151 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9152 else
72485ec4 9153 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9154 } else {
9155 if (s->condexec_mask)
396e467c 9156 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9157 else
72485ec4 9158 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9159 }
7d1b0095 9160 tcg_temp_free_i32(tmp2);
396e467c 9161 store_reg(s, rd, tmp);
99c475ab
FB
9162 } else {
9163 /* shift immediate */
9164 rm = (insn >> 3) & 7;
9165 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9166 tmp = load_reg(s, rm);
9167 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9168 if (!s->condexec_mask)
9169 gen_logic_CC(tmp);
9170 store_reg(s, rd, tmp);
99c475ab
FB
9171 }
9172 break;
9173 case 2: case 3:
9174 /* arithmetic large immediate */
9175 op = (insn >> 11) & 3;
9176 rd = (insn >> 8) & 0x7;
396e467c 9177 if (op == 0) { /* mov */
7d1b0095 9178 tmp = tcg_temp_new_i32();
396e467c 9179 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9180 if (!s->condexec_mask)
396e467c
FN
9181 gen_logic_CC(tmp);
9182 store_reg(s, rd, tmp);
9183 } else {
9184 tmp = load_reg(s, rd);
7d1b0095 9185 tmp2 = tcg_temp_new_i32();
396e467c
FN
9186 tcg_gen_movi_i32(tmp2, insn & 0xff);
9187 switch (op) {
9188 case 1: /* cmp */
72485ec4 9189 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9190 tcg_temp_free_i32(tmp);
9191 tcg_temp_free_i32(tmp2);
396e467c
FN
9192 break;
9193 case 2: /* add */
9194 if (s->condexec_mask)
9195 tcg_gen_add_i32(tmp, tmp, tmp2);
9196 else
72485ec4 9197 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9198 tcg_temp_free_i32(tmp2);
396e467c
FN
9199 store_reg(s, rd, tmp);
9200 break;
9201 case 3: /* sub */
9202 if (s->condexec_mask)
9203 tcg_gen_sub_i32(tmp, tmp, tmp2);
9204 else
72485ec4 9205 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9206 tcg_temp_free_i32(tmp2);
396e467c
FN
9207 store_reg(s, rd, tmp);
9208 break;
9209 }
99c475ab 9210 }
99c475ab
FB
9211 break;
9212 case 4:
9213 if (insn & (1 << 11)) {
9214 rd = (insn >> 8) & 7;
5899f386
FB
9215 /* load pc-relative. Bit 1 of PC is ignored. */
9216 val = s->pc + 2 + ((insn & 0xff) * 4);
9217 val &= ~(uint32_t)2;
7d1b0095 9218 addr = tcg_temp_new_i32();
b0109805 9219 tcg_gen_movi_i32(addr, val);
c40c8556
PM
9220 tmp = tcg_temp_new_i32();
9221 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9222 tcg_temp_free_i32(addr);
b0109805 9223 store_reg(s, rd, tmp);
99c475ab
FB
9224 break;
9225 }
9226 if (insn & (1 << 10)) {
9227 /* data processing extended or blx */
9228 rd = (insn & 7) | ((insn >> 4) & 8);
9229 rm = (insn >> 3) & 0xf;
9230 op = (insn >> 8) & 3;
9231 switch (op) {
9232 case 0: /* add */
396e467c
FN
9233 tmp = load_reg(s, rd);
9234 tmp2 = load_reg(s, rm);
9235 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9236 tcg_temp_free_i32(tmp2);
396e467c 9237 store_reg(s, rd, tmp);
99c475ab
FB
9238 break;
9239 case 1: /* cmp */
396e467c
FN
9240 tmp = load_reg(s, rd);
9241 tmp2 = load_reg(s, rm);
72485ec4 9242 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9243 tcg_temp_free_i32(tmp2);
9244 tcg_temp_free_i32(tmp);
99c475ab
FB
9245 break;
9246 case 2: /* mov/cpy */
396e467c
FN
9247 tmp = load_reg(s, rm);
9248 store_reg(s, rd, tmp);
99c475ab
FB
9249 break;
9250 case 3:/* branch [and link] exchange thumb register */
b0109805 9251 tmp = load_reg(s, rm);
99c475ab 9252 if (insn & (1 << 7)) {
be5e7a76 9253 ARCH(5);
99c475ab 9254 val = (uint32_t)s->pc | 1;
7d1b0095 9255 tmp2 = tcg_temp_new_i32();
b0109805
PB
9256 tcg_gen_movi_i32(tmp2, val);
9257 store_reg(s, 14, tmp2);
99c475ab 9258 }
be5e7a76 9259 /* already thumb, no need to check */
d9ba4830 9260 gen_bx(s, tmp);
99c475ab
FB
9261 break;
9262 }
9263 break;
9264 }
9265
9266 /* data processing register */
9267 rd = insn & 7;
9268 rm = (insn >> 3) & 7;
9269 op = (insn >> 6) & 0xf;
9270 if (op == 2 || op == 3 || op == 4 || op == 7) {
9271 /* the shift/rotate ops want the operands backwards */
9272 val = rm;
9273 rm = rd;
9274 rd = val;
9275 val = 1;
9276 } else {
9277 val = 0;
9278 }
9279
396e467c 9280 if (op == 9) { /* neg */
7d1b0095 9281 tmp = tcg_temp_new_i32();
396e467c
FN
9282 tcg_gen_movi_i32(tmp, 0);
9283 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9284 tmp = load_reg(s, rd);
9285 } else {
39d5492a 9286 TCGV_UNUSED_I32(tmp);
396e467c 9287 }
99c475ab 9288
396e467c 9289 tmp2 = load_reg(s, rm);
5899f386 9290 switch (op) {
99c475ab 9291 case 0x0: /* and */
396e467c 9292 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9293 if (!s->condexec_mask)
396e467c 9294 gen_logic_CC(tmp);
99c475ab
FB
9295 break;
9296 case 0x1: /* eor */
396e467c 9297 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9298 if (!s->condexec_mask)
396e467c 9299 gen_logic_CC(tmp);
99c475ab
FB
9300 break;
9301 case 0x2: /* lsl */
9ee6e8bb 9302 if (s->condexec_mask) {
365af80e 9303 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9304 } else {
9ef39277 9305 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9306 gen_logic_CC(tmp2);
9ee6e8bb 9307 }
99c475ab
FB
9308 break;
9309 case 0x3: /* lsr */
9ee6e8bb 9310 if (s->condexec_mask) {
365af80e 9311 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9312 } else {
9ef39277 9313 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9314 gen_logic_CC(tmp2);
9ee6e8bb 9315 }
99c475ab
FB
9316 break;
9317 case 0x4: /* asr */
9ee6e8bb 9318 if (s->condexec_mask) {
365af80e 9319 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9320 } else {
9ef39277 9321 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9322 gen_logic_CC(tmp2);
9ee6e8bb 9323 }
99c475ab
FB
9324 break;
9325 case 0x5: /* adc */
49b4c31e 9326 if (s->condexec_mask) {
396e467c 9327 gen_adc(tmp, tmp2);
49b4c31e
RH
9328 } else {
9329 gen_adc_CC(tmp, tmp, tmp2);
9330 }
99c475ab
FB
9331 break;
9332 case 0x6: /* sbc */
2de68a49 9333 if (s->condexec_mask) {
396e467c 9334 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9335 } else {
9336 gen_sbc_CC(tmp, tmp, tmp2);
9337 }
99c475ab
FB
9338 break;
9339 case 0x7: /* ror */
9ee6e8bb 9340 if (s->condexec_mask) {
f669df27
AJ
9341 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9342 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9343 } else {
9ef39277 9344 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9345 gen_logic_CC(tmp2);
9ee6e8bb 9346 }
99c475ab
FB
9347 break;
9348 case 0x8: /* tst */
396e467c
FN
9349 tcg_gen_and_i32(tmp, tmp, tmp2);
9350 gen_logic_CC(tmp);
99c475ab 9351 rd = 16;
5899f386 9352 break;
99c475ab 9353 case 0x9: /* neg */
9ee6e8bb 9354 if (s->condexec_mask)
396e467c 9355 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9356 else
72485ec4 9357 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9358 break;
9359 case 0xa: /* cmp */
72485ec4 9360 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9361 rd = 16;
9362 break;
9363 case 0xb: /* cmn */
72485ec4 9364 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9365 rd = 16;
9366 break;
9367 case 0xc: /* orr */
396e467c 9368 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9369 if (!s->condexec_mask)
396e467c 9370 gen_logic_CC(tmp);
99c475ab
FB
9371 break;
9372 case 0xd: /* mul */
7b2919a0 9373 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9374 if (!s->condexec_mask)
396e467c 9375 gen_logic_CC(tmp);
99c475ab
FB
9376 break;
9377 case 0xe: /* bic */
f669df27 9378 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9379 if (!s->condexec_mask)
396e467c 9380 gen_logic_CC(tmp);
99c475ab
FB
9381 break;
9382 case 0xf: /* mvn */
396e467c 9383 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9384 if (!s->condexec_mask)
396e467c 9385 gen_logic_CC(tmp2);
99c475ab 9386 val = 1;
5899f386 9387 rm = rd;
99c475ab
FB
9388 break;
9389 }
9390 if (rd != 16) {
396e467c
FN
9391 if (val) {
9392 store_reg(s, rm, tmp2);
9393 if (op != 0xf)
7d1b0095 9394 tcg_temp_free_i32(tmp);
396e467c
FN
9395 } else {
9396 store_reg(s, rd, tmp);
7d1b0095 9397 tcg_temp_free_i32(tmp2);
396e467c
FN
9398 }
9399 } else {
7d1b0095
PM
9400 tcg_temp_free_i32(tmp);
9401 tcg_temp_free_i32(tmp2);
99c475ab
FB
9402 }
9403 break;
9404
9405 case 5:
9406 /* load/store register offset. */
9407 rd = insn & 7;
9408 rn = (insn >> 3) & 7;
9409 rm = (insn >> 6) & 7;
9410 op = (insn >> 9) & 7;
b0109805 9411 addr = load_reg(s, rn);
b26eefb6 9412 tmp = load_reg(s, rm);
b0109805 9413 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9414 tcg_temp_free_i32(tmp);
99c475ab 9415
c40c8556 9416 if (op < 3) { /* store */
b0109805 9417 tmp = load_reg(s, rd);
c40c8556
PM
9418 } else {
9419 tmp = tcg_temp_new_i32();
9420 }
99c475ab
FB
9421
9422 switch (op) {
9423 case 0: /* str */
c40c8556 9424 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9425 break;
9426 case 1: /* strh */
c40c8556 9427 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9428 break;
9429 case 2: /* strb */
c40c8556 9430 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9431 break;
9432 case 3: /* ldrsb */
c40c8556 9433 tcg_gen_qemu_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9434 break;
9435 case 4: /* ldr */
c40c8556 9436 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9437 break;
9438 case 5: /* ldrh */
c40c8556 9439 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9440 break;
9441 case 6: /* ldrb */
c40c8556 9442 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9443 break;
9444 case 7: /* ldrsh */
c40c8556 9445 tcg_gen_qemu_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9446 break;
9447 }
c40c8556 9448 if (op >= 3) { /* load */
b0109805 9449 store_reg(s, rd, tmp);
c40c8556
PM
9450 } else {
9451 tcg_temp_free_i32(tmp);
9452 }
7d1b0095 9453 tcg_temp_free_i32(addr);
99c475ab
FB
9454 break;
9455
9456 case 6:
9457 /* load/store word immediate offset */
9458 rd = insn & 7;
9459 rn = (insn >> 3) & 7;
b0109805 9460 addr = load_reg(s, rn);
99c475ab 9461 val = (insn >> 4) & 0x7c;
b0109805 9462 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9463
9464 if (insn & (1 << 11)) {
9465 /* load */
c40c8556
PM
9466 tmp = tcg_temp_new_i32();
9467 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9468 store_reg(s, rd, tmp);
99c475ab
FB
9469 } else {
9470 /* store */
b0109805 9471 tmp = load_reg(s, rd);
c40c8556
PM
9472 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9473 tcg_temp_free_i32(tmp);
99c475ab 9474 }
7d1b0095 9475 tcg_temp_free_i32(addr);
99c475ab
FB
9476 break;
9477
9478 case 7:
9479 /* load/store byte immediate offset */
9480 rd = insn & 7;
9481 rn = (insn >> 3) & 7;
b0109805 9482 addr = load_reg(s, rn);
99c475ab 9483 val = (insn >> 6) & 0x1f;
b0109805 9484 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9485
9486 if (insn & (1 << 11)) {
9487 /* load */
c40c8556
PM
9488 tmp = tcg_temp_new_i32();
9489 tcg_gen_qemu_ld8u(tmp, addr, IS_USER(s));
b0109805 9490 store_reg(s, rd, tmp);
99c475ab
FB
9491 } else {
9492 /* store */
b0109805 9493 tmp = load_reg(s, rd);
c40c8556
PM
9494 tcg_gen_qemu_st8(tmp, addr, IS_USER(s));
9495 tcg_temp_free_i32(tmp);
99c475ab 9496 }
7d1b0095 9497 tcg_temp_free_i32(addr);
99c475ab
FB
9498 break;
9499
9500 case 8:
9501 /* load/store halfword immediate offset */
9502 rd = insn & 7;
9503 rn = (insn >> 3) & 7;
b0109805 9504 addr = load_reg(s, rn);
99c475ab 9505 val = (insn >> 5) & 0x3e;
b0109805 9506 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9507
9508 if (insn & (1 << 11)) {
9509 /* load */
c40c8556
PM
9510 tmp = tcg_temp_new_i32();
9511 tcg_gen_qemu_ld16u(tmp, addr, IS_USER(s));
b0109805 9512 store_reg(s, rd, tmp);
99c475ab
FB
9513 } else {
9514 /* store */
b0109805 9515 tmp = load_reg(s, rd);
c40c8556
PM
9516 tcg_gen_qemu_st16(tmp, addr, IS_USER(s));
9517 tcg_temp_free_i32(tmp);
99c475ab 9518 }
7d1b0095 9519 tcg_temp_free_i32(addr);
99c475ab
FB
9520 break;
9521
9522 case 9:
9523 /* load/store from stack */
9524 rd = (insn >> 8) & 7;
b0109805 9525 addr = load_reg(s, 13);
99c475ab 9526 val = (insn & 0xff) * 4;
b0109805 9527 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9528
9529 if (insn & (1 << 11)) {
9530 /* load */
c40c8556
PM
9531 tmp = tcg_temp_new_i32();
9532 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9533 store_reg(s, rd, tmp);
99c475ab
FB
9534 } else {
9535 /* store */
b0109805 9536 tmp = load_reg(s, rd);
c40c8556
PM
9537 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9538 tcg_temp_free_i32(tmp);
99c475ab 9539 }
7d1b0095 9540 tcg_temp_free_i32(addr);
99c475ab
FB
9541 break;
9542
9543 case 10:
9544 /* add to high reg */
9545 rd = (insn >> 8) & 7;
5899f386
FB
9546 if (insn & (1 << 11)) {
9547 /* SP */
5e3f878a 9548 tmp = load_reg(s, 13);
5899f386
FB
9549 } else {
9550 /* PC. bit 1 is ignored. */
7d1b0095 9551 tmp = tcg_temp_new_i32();
5e3f878a 9552 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9553 }
99c475ab 9554 val = (insn & 0xff) * 4;
5e3f878a
PB
9555 tcg_gen_addi_i32(tmp, tmp, val);
9556 store_reg(s, rd, tmp);
99c475ab
FB
9557 break;
9558
9559 case 11:
9560 /* misc */
9561 op = (insn >> 8) & 0xf;
9562 switch (op) {
9563 case 0:
9564 /* adjust stack pointer */
b26eefb6 9565 tmp = load_reg(s, 13);
99c475ab
FB
9566 val = (insn & 0x7f) * 4;
9567 if (insn & (1 << 7))
6a0d8a1d 9568 val = -(int32_t)val;
b26eefb6
PB
9569 tcg_gen_addi_i32(tmp, tmp, val);
9570 store_reg(s, 13, tmp);
99c475ab
FB
9571 break;
9572
9ee6e8bb
PB
9573 case 2: /* sign/zero extend. */
9574 ARCH(6);
9575 rd = insn & 7;
9576 rm = (insn >> 3) & 7;
b0109805 9577 tmp = load_reg(s, rm);
9ee6e8bb 9578 switch ((insn >> 6) & 3) {
b0109805
PB
9579 case 0: gen_sxth(tmp); break;
9580 case 1: gen_sxtb(tmp); break;
9581 case 2: gen_uxth(tmp); break;
9582 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9583 }
b0109805 9584 store_reg(s, rd, tmp);
9ee6e8bb 9585 break;
99c475ab
FB
9586 case 4: case 5: case 0xc: case 0xd:
9587 /* push/pop */
b0109805 9588 addr = load_reg(s, 13);
5899f386
FB
9589 if (insn & (1 << 8))
9590 offset = 4;
99c475ab 9591 else
5899f386
FB
9592 offset = 0;
9593 for (i = 0; i < 8; i++) {
9594 if (insn & (1 << i))
9595 offset += 4;
9596 }
9597 if ((insn & (1 << 11)) == 0) {
b0109805 9598 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9599 }
99c475ab
FB
9600 for (i = 0; i < 8; i++) {
9601 if (insn & (1 << i)) {
9602 if (insn & (1 << 11)) {
9603 /* pop */
c40c8556
PM
9604 tmp = tcg_temp_new_i32();
9605 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
b0109805 9606 store_reg(s, i, tmp);
99c475ab
FB
9607 } else {
9608 /* push */
b0109805 9609 tmp = load_reg(s, i);
c40c8556
PM
9610 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9611 tcg_temp_free_i32(tmp);
99c475ab 9612 }
5899f386 9613 /* advance to the next address. */
b0109805 9614 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9615 }
9616 }
39d5492a 9617 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9618 if (insn & (1 << 8)) {
9619 if (insn & (1 << 11)) {
9620 /* pop pc */
c40c8556
PM
9621 tmp = tcg_temp_new_i32();
9622 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9623 /* don't set the pc until the rest of the instruction
9624 has completed */
9625 } else {
9626 /* push lr */
b0109805 9627 tmp = load_reg(s, 14);
c40c8556
PM
9628 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9629 tcg_temp_free_i32(tmp);
99c475ab 9630 }
b0109805 9631 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9632 }
5899f386 9633 if ((insn & (1 << 11)) == 0) {
b0109805 9634 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9635 }
99c475ab 9636 /* write back the new stack pointer */
b0109805 9637 store_reg(s, 13, addr);
99c475ab 9638 /* set the new PC value */
be5e7a76
DES
9639 if ((insn & 0x0900) == 0x0900) {
9640 store_reg_from_load(env, s, 15, tmp);
9641 }
99c475ab
FB
9642 break;
9643
9ee6e8bb
PB
9644 case 1: case 3: case 9: case 11: /* czb */
9645 rm = insn & 7;
d9ba4830 9646 tmp = load_reg(s, rm);
9ee6e8bb
PB
9647 s->condlabel = gen_new_label();
9648 s->condjmp = 1;
9649 if (insn & (1 << 11))
cb63669a 9650 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9651 else
cb63669a 9652 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9653 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9654 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9655 val = (uint32_t)s->pc + 2;
9656 val += offset;
9657 gen_jmp(s, val);
9658 break;
9659
9660 case 15: /* IT, nop-hint. */
9661 if ((insn & 0xf) == 0) {
9662 gen_nop_hint(s, (insn >> 4) & 0xf);
9663 break;
9664 }
9665 /* If Then. */
9666 s->condexec_cond = (insn >> 4) & 0xe;
9667 s->condexec_mask = insn & 0x1f;
9668 /* No actual code generated for this insn, just setup state. */
9669 break;
9670
06c949e6 9671 case 0xe: /* bkpt */
be5e7a76 9672 ARCH(5);
bc4a0de0 9673 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9674 break;
9675
9ee6e8bb
PB
9676 case 0xa: /* rev */
9677 ARCH(6);
9678 rn = (insn >> 3) & 0x7;
9679 rd = insn & 0x7;
b0109805 9680 tmp = load_reg(s, rn);
9ee6e8bb 9681 switch ((insn >> 6) & 3) {
66896cb8 9682 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9683 case 1: gen_rev16(tmp); break;
9684 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9685 default: goto illegal_op;
9686 }
b0109805 9687 store_reg(s, rd, tmp);
9ee6e8bb
PB
9688 break;
9689
d9e028c1
PM
9690 case 6:
9691 switch ((insn >> 5) & 7) {
9692 case 2:
9693 /* setend */
9694 ARCH(6);
10962fd5
PM
9695 if (((insn >> 3) & 1) != s->bswap_code) {
9696 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9697 goto illegal_op;
9698 }
9ee6e8bb 9699 break;
d9e028c1
PM
9700 case 3:
9701 /* cps */
9702 ARCH(6);
9703 if (IS_USER(s)) {
9704 break;
8984bd2e 9705 }
d9e028c1
PM
9706 if (IS_M(env)) {
9707 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9708 /* FAULTMASK */
9709 if (insn & 1) {
9710 addr = tcg_const_i32(19);
9711 gen_helper_v7m_msr(cpu_env, addr, tmp);
9712 tcg_temp_free_i32(addr);
9713 }
9714 /* PRIMASK */
9715 if (insn & 2) {
9716 addr = tcg_const_i32(16);
9717 gen_helper_v7m_msr(cpu_env, addr, tmp);
9718 tcg_temp_free_i32(addr);
9719 }
9720 tcg_temp_free_i32(tmp);
9721 gen_lookup_tb(s);
9722 } else {
9723 if (insn & (1 << 4)) {
9724 shift = CPSR_A | CPSR_I | CPSR_F;
9725 } else {
9726 shift = 0;
9727 }
9728 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9729 }
d9e028c1
PM
9730 break;
9731 default:
9732 goto undef;
9ee6e8bb
PB
9733 }
9734 break;
9735
99c475ab
FB
9736 default:
9737 goto undef;
9738 }
9739 break;
9740
9741 case 12:
a7d3970d 9742 {
99c475ab 9743 /* load/store multiple */
39d5492a
PM
9744 TCGv_i32 loaded_var;
9745 TCGV_UNUSED_I32(loaded_var);
99c475ab 9746 rn = (insn >> 8) & 0x7;
b0109805 9747 addr = load_reg(s, rn);
99c475ab
FB
9748 for (i = 0; i < 8; i++) {
9749 if (insn & (1 << i)) {
99c475ab
FB
9750 if (insn & (1 << 11)) {
9751 /* load */
c40c8556
PM
9752 tmp = tcg_temp_new_i32();
9753 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9754 if (i == rn) {
9755 loaded_var = tmp;
9756 } else {
9757 store_reg(s, i, tmp);
9758 }
99c475ab
FB
9759 } else {
9760 /* store */
b0109805 9761 tmp = load_reg(s, i);
c40c8556
PM
9762 tcg_gen_qemu_st32(tmp, addr, IS_USER(s));
9763 tcg_temp_free_i32(tmp);
99c475ab 9764 }
5899f386 9765 /* advance to the next address */
b0109805 9766 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9767 }
9768 }
b0109805 9769 if ((insn & (1 << rn)) == 0) {
a7d3970d 9770 /* base reg not in list: base register writeback */
b0109805
PB
9771 store_reg(s, rn, addr);
9772 } else {
a7d3970d
PM
9773 /* base reg in list: if load, complete it now */
9774 if (insn & (1 << 11)) {
9775 store_reg(s, rn, loaded_var);
9776 }
7d1b0095 9777 tcg_temp_free_i32(addr);
b0109805 9778 }
99c475ab 9779 break;
a7d3970d 9780 }
99c475ab
FB
9781 case 13:
9782 /* conditional branch or swi */
9783 cond = (insn >> 8) & 0xf;
9784 if (cond == 0xe)
9785 goto undef;
9786
9787 if (cond == 0xf) {
9788 /* swi */
422ebf69 9789 gen_set_pc_im(s->pc);
9ee6e8bb 9790 s->is_jmp = DISAS_SWI;
99c475ab
FB
9791 break;
9792 }
9793 /* generate a conditional jump to next instruction */
e50e6a20 9794 s->condlabel = gen_new_label();
d9ba4830 9795 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9796 s->condjmp = 1;
99c475ab
FB
9797
9798 /* jump to the offset */
5899f386 9799 val = (uint32_t)s->pc + 2;
99c475ab 9800 offset = ((int32_t)insn << 24) >> 24;
5899f386 9801 val += offset << 1;
8aaca4c0 9802 gen_jmp(s, val);
99c475ab
FB
9803 break;
9804
9805 case 14:
358bf29e 9806 if (insn & (1 << 11)) {
9ee6e8bb
PB
9807 if (disas_thumb2_insn(env, s, insn))
9808 goto undef32;
358bf29e
PB
9809 break;
9810 }
9ee6e8bb 9811 /* unconditional branch */
99c475ab
FB
9812 val = (uint32_t)s->pc;
9813 offset = ((int32_t)insn << 21) >> 21;
9814 val += (offset << 1) + 2;
8aaca4c0 9815 gen_jmp(s, val);
99c475ab
FB
9816 break;
9817
9818 case 15:
9ee6e8bb 9819 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9820 goto undef32;
9ee6e8bb 9821 break;
99c475ab
FB
9822 }
9823 return;
9ee6e8bb 9824undef32:
bc4a0de0 9825 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9826 return;
9827illegal_op:
99c475ab 9828undef:
bc4a0de0 9829 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9830}
9831
2c0262af
FB
9832/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9833 basic block 'tb'. If search_pc is TRUE, also generate PC
9834 information for each intermediate instruction. */
0ecb72a5 9835static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9836 TranslationBlock *tb,
9837 int search_pc)
2c0262af
FB
9838{
9839 DisasContext dc1, *dc = &dc1;
a1d1bb31 9840 CPUBreakpoint *bp;
2c0262af
FB
9841 uint16_t *gen_opc_end;
9842 int j, lj;
0fa85d43 9843 target_ulong pc_start;
b5ff1b31 9844 uint32_t next_page_start;
2e70f6ef
PB
9845 int num_insns;
9846 int max_insns;
3b46e624 9847
2c0262af 9848 /* generate intermediate code */
0fa85d43 9849 pc_start = tb->pc;
3b46e624 9850
2c0262af
FB
9851 dc->tb = tb;
9852
92414b31 9853 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9854
9855 dc->is_jmp = DISAS_NEXT;
9856 dc->pc = pc_start;
8aaca4c0 9857 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9858 dc->condjmp = 0;
7204ab88 9859 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9860 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9861 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9862 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9863#if !defined(CONFIG_USER_ONLY)
61f74d6a 9864 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9865#endif
5df8bac1 9866 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9867 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9868 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9869 cpu_F0s = tcg_temp_new_i32();
9870 cpu_F1s = tcg_temp_new_i32();
9871 cpu_F0d = tcg_temp_new_i64();
9872 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9873 cpu_V0 = cpu_F0d;
9874 cpu_V1 = cpu_F1d;
e677137d 9875 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9876 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9877 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9878 lj = -1;
2e70f6ef
PB
9879 num_insns = 0;
9880 max_insns = tb->cflags & CF_COUNT_MASK;
9881 if (max_insns == 0)
9882 max_insns = CF_COUNT_MASK;
9883
806f352d 9884 gen_tb_start();
e12ce78d 9885
3849902c
PM
9886 tcg_clear_temp_count();
9887
e12ce78d
PM
9888 /* A note on handling of the condexec (IT) bits:
9889 *
9890 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9891 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9892 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9893 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9894 * to do it at the end of the block. (For example if we don't do this
9895 * it's hard to identify whether we can safely skip writing condexec
9896 * at the end of the TB, which we definitely want to do for the case
9897 * where a TB doesn't do anything with the IT state at all.)
9898 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9899 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9900 * This is done both for leaving the TB at the end, and for leaving
9901 * it because of an exception we know will happen, which is done in
9902 * gen_exception_insn(). The latter is necessary because we need to
9903 * leave the TB with the PC/IT state just prior to execution of the
9904 * instruction which caused the exception.
9905 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9906 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9907 * This is handled in the same way as restoration of the
9908 * PC in these situations: we will be called again with search_pc=1
9909 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9910 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9911 * this to restore the condexec bits.
e12ce78d
PM
9912 *
9913 * Note that there are no instructions which can read the condexec
9914 * bits, and none which can write non-static values to them, so
0ecb72a5 9915 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9916 * middle of a TB.
9917 */
9918
9ee6e8bb
PB
9919 /* Reset the conditional execution bits immediately. This avoids
9920 complications trying to do it at the end of the block. */
98eac7ca 9921 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9922 {
39d5492a 9923 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 9924 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9925 store_cpu_field(tmp, condexec_bits);
8f01245e 9926 }
2c0262af 9927 do {
fbb4a2e3
PB
9928#ifdef CONFIG_USER_ONLY
9929 /* Intercept jump to the magic kernel page. */
9930 if (dc->pc >= 0xffff0000) {
9931 /* We always get here via a jump, so know we are not in a
9932 conditional execution block. */
9933 gen_exception(EXCP_KERNEL_TRAP);
9934 dc->is_jmp = DISAS_UPDATE;
9935 break;
9936 }
9937#else
9ee6e8bb
PB
9938 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9939 /* We always get here via a jump, so know we are not in a
9940 conditional execution block. */
d9ba4830 9941 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9942 dc->is_jmp = DISAS_UPDATE;
9943 break;
9ee6e8bb
PB
9944 }
9945#endif
9946
72cf2d4f
BS
9947 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9948 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9949 if (bp->pc == dc->pc) {
bc4a0de0 9950 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9951 /* Advance PC so that clearing the breakpoint will
9952 invalidate this TB. */
9953 dc->pc += 2;
9954 goto done_generating;
1fddef4b
FB
9955 break;
9956 }
9957 }
9958 }
2c0262af 9959 if (search_pc) {
92414b31 9960 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9961 if (lj < j) {
9962 lj++;
9963 while (lj < j)
ab1103de 9964 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9965 }
25983cad 9966 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9967 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9968 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9969 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9970 }
e50e6a20 9971
2e70f6ef
PB
9972 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9973 gen_io_start();
9974
fdefe51c 9975 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9976 tcg_gen_debug_insn_start(dc->pc);
9977 }
9978
7204ab88 9979 if (dc->thumb) {
9ee6e8bb
PB
9980 disas_thumb_insn(env, dc);
9981 if (dc->condexec_mask) {
9982 dc->condexec_cond = (dc->condexec_cond & 0xe)
9983 | ((dc->condexec_mask >> 4) & 1);
9984 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9985 if (dc->condexec_mask == 0) {
9986 dc->condexec_cond = 0;
9987 }
9988 }
9989 } else {
9990 disas_arm_insn(env, dc);
9991 }
e50e6a20
FB
9992
9993 if (dc->condjmp && !dc->is_jmp) {
9994 gen_set_label(dc->condlabel);
9995 dc->condjmp = 0;
9996 }
3849902c
PM
9997
9998 if (tcg_check_temp_count()) {
9999 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
10000 }
10001
aaf2d97d 10002 /* Translation stops when a conditional branch is encountered.
e50e6a20 10003 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10004 * Also stop translation when a page boundary is reached. This
bf20dc07 10005 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10006 num_insns ++;
efd7f486 10007 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 10008 !env->singlestep_enabled &&
1b530a6d 10009 !singlestep &&
2e70f6ef
PB
10010 dc->pc < next_page_start &&
10011 num_insns < max_insns);
10012
10013 if (tb->cflags & CF_LAST_IO) {
10014 if (dc->condjmp) {
10015 /* FIXME: This can theoretically happen with self-modifying
10016 code. */
10017 cpu_abort(env, "IO on conditional branch instruction");
10018 }
10019 gen_io_end();
10020 }
9ee6e8bb 10021
b5ff1b31 10022 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10023 instruction was a conditional branch or trap, and the PC has
10024 already been written. */
551bd27f 10025 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 10026 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10027 if (dc->condjmp) {
9ee6e8bb
PB
10028 gen_set_condexec(dc);
10029 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10030 gen_exception(EXCP_SWI);
9ee6e8bb 10031 } else {
d9ba4830 10032 gen_exception(EXCP_DEBUG);
9ee6e8bb 10033 }
e50e6a20
FB
10034 gen_set_label(dc->condlabel);
10035 }
10036 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 10037 gen_set_pc_im(dc->pc);
e50e6a20 10038 dc->condjmp = 0;
8aaca4c0 10039 }
9ee6e8bb
PB
10040 gen_set_condexec(dc);
10041 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10042 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10043 } else {
10044 /* FIXME: Single stepping a WFI insn will not halt
10045 the CPU. */
d9ba4830 10046 gen_exception(EXCP_DEBUG);
9ee6e8bb 10047 }
8aaca4c0 10048 } else {
9ee6e8bb
PB
10049 /* While branches must always occur at the end of an IT block,
10050 there are a few other things that can cause us to terminate
65626741 10051 the TB in the middle of an IT block:
9ee6e8bb
PB
10052 - Exception generating instructions (bkpt, swi, undefined).
10053 - Page boundaries.
10054 - Hardware watchpoints.
10055 Hardware breakpoints have already been handled and skip this code.
10056 */
10057 gen_set_condexec(dc);
8aaca4c0 10058 switch(dc->is_jmp) {
8aaca4c0 10059 case DISAS_NEXT:
6e256c93 10060 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10061 break;
10062 default:
10063 case DISAS_JUMP:
10064 case DISAS_UPDATE:
10065 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10066 tcg_gen_exit_tb(0);
8aaca4c0
FB
10067 break;
10068 case DISAS_TB_JUMP:
10069 /* nothing more to generate */
10070 break;
9ee6e8bb 10071 case DISAS_WFI:
1ce94f81 10072 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10073 break;
10074 case DISAS_SWI:
d9ba4830 10075 gen_exception(EXCP_SWI);
9ee6e8bb 10076 break;
8aaca4c0 10077 }
e50e6a20
FB
10078 if (dc->condjmp) {
10079 gen_set_label(dc->condlabel);
9ee6e8bb 10080 gen_set_condexec(dc);
6e256c93 10081 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10082 dc->condjmp = 0;
10083 }
2c0262af 10084 }
2e70f6ef 10085
9ee6e8bb 10086done_generating:
806f352d 10087 gen_tb_end(tb, num_insns);
efd7f486 10088 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10089
10090#ifdef DEBUG_DISAS
8fec2b8c 10091 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10092 qemu_log("----------------\n");
10093 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10094 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10095 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10096 qemu_log("\n");
2c0262af
FB
10097 }
10098#endif
b5ff1b31 10099 if (search_pc) {
92414b31 10100 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10101 lj++;
10102 while (lj <= j)
ab1103de 10103 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10104 } else {
2c0262af 10105 tb->size = dc->pc - pc_start;
2e70f6ef 10106 tb->icount = num_insns;
b5ff1b31 10107 }
2c0262af
FB
10108}
10109
0ecb72a5 10110void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10111{
2cfc5f17 10112 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10113}
10114
0ecb72a5 10115void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10116{
2cfc5f17 10117 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10118}
10119
b5ff1b31
FB
10120static const char *cpu_mode_names[16] = {
10121 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10122 "???", "???", "???", "und", "???", "???", "???", "sys"
10123};
9ee6e8bb 10124
0ecb72a5 10125void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10126 int flags)
2c0262af
FB
10127{
10128 int i;
b5ff1b31 10129 uint32_t psr;
2c0262af
FB
10130
10131 for(i=0;i<16;i++) {
7fe48483 10132 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10133 if ((i % 4) == 3)
7fe48483 10134 cpu_fprintf(f, "\n");
2c0262af 10135 else
7fe48483 10136 cpu_fprintf(f, " ");
2c0262af 10137 }
b5ff1b31 10138 psr = cpsr_read(env);
687fa640
TS
10139 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10140 psr,
b5ff1b31
FB
10141 psr & (1 << 31) ? 'N' : '-',
10142 psr & (1 << 30) ? 'Z' : '-',
10143 psr & (1 << 29) ? 'C' : '-',
10144 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10145 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10146 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10147
f2617cfc
PM
10148 if (flags & CPU_DUMP_FPU) {
10149 int numvfpregs = 0;
10150 if (arm_feature(env, ARM_FEATURE_VFP)) {
10151 numvfpregs += 16;
10152 }
10153 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10154 numvfpregs += 16;
10155 }
10156 for (i = 0; i < numvfpregs; i++) {
10157 uint64_t v = float64_val(env->vfp.regs[i]);
10158 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10159 i * 2, (uint32_t)v,
10160 i * 2 + 1, (uint32_t)(v >> 32),
10161 i, v);
10162 }
10163 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10164 }
2c0262af 10165}
a6b025d3 10166
0ecb72a5 10167void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10168{
25983cad 10169 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10170 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10171}