]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Implement sbc_cc inline
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
1497c961 31
7b59220e 32#include "helper.h"
1497c961 33#define GEN_HELPER 1
7b59220e 34#include "helper.h"
2c0262af 35
be5e7a76
DES
36#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
37#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
38/* currently all emulated v5 cores are also v5TE, so don't bother */
39#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
40#define ENABLE_ARCH_5J 0
41#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
42#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
43#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
44#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 45
86753403 46#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 47
2c0262af
FB
48/* internal defines */
49typedef struct DisasContext {
0fa85d43 50 target_ulong pc;
2c0262af 51 int is_jmp;
e50e6a20
FB
52 /* Nonzero if this instruction has been conditionally skipped. */
53 int condjmp;
54 /* The label that will be jumped to when the instruction is skipped. */
55 int condlabel;
b90372ad 56 /* Thumb-2 conditional execution bits. */
9ee6e8bb
PB
57 int condexec_mask;
58 int condexec_cond;
2c0262af 59 struct TranslationBlock *tb;
8aaca4c0 60 int singlestep_enabled;
5899f386 61 int thumb;
d8fd2954 62 int bswap_code;
b5ff1b31
FB
63#if !defined(CONFIG_USER_ONLY)
64 int user;
65#endif
5df8bac1 66 int vfp_enabled;
69d1fc22
PM
67 int vec_len;
68 int vec_stride;
2c0262af
FB
69} DisasContext;
70
e12ce78d
PM
71static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
72
b5ff1b31
FB
73#if defined(CONFIG_USER_ONLY)
74#define IS_USER(s) 1
75#else
76#define IS_USER(s) (s->user)
77#endif
78
9ee6e8bb 79/* These instructions trap after executing, so defer them until after the
b90372ad 80 conditional execution state has been updated. */
9ee6e8bb
PB
81#define DISAS_WFI 4
82#define DISAS_SWI 5
2c0262af 83
a7812ae4 84static TCGv_ptr cpu_env;
ad69471c 85/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 86static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 87static TCGv_i32 cpu_R[16];
66c374de 88static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
89static TCGv_i32 cpu_exclusive_addr;
90static TCGv_i32 cpu_exclusive_val;
91static TCGv_i32 cpu_exclusive_high;
92#ifdef CONFIG_USER_ONLY
93static TCGv_i32 cpu_exclusive_test;
94static TCGv_i32 cpu_exclusive_info;
95#endif
ad69471c 96
b26eefb6 97/* FIXME: These should be removed. */
a7812ae4
PB
98static TCGv cpu_F0s, cpu_F1s;
99static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 100
022c62cb 101#include "exec/gen-icount.h"
2e70f6ef 102
155c3eac
FN
103static const char *regnames[] =
104 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
106
b26eefb6
PB
107/* initialize TCG globals. */
108void arm_translate_init(void)
109{
155c3eac
FN
110 int i;
111
a7812ae4
PB
112 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
113
155c3eac
FN
114 for (i = 0; i < 16; i++) {
115 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 116 offsetof(CPUARMState, regs[i]),
155c3eac
FN
117 regnames[i]);
118 }
66c374de
AJ
119 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
120 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
121 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
122 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
123
426f5abc 124 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 125 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 126 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 127 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 128 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 129 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
130#ifdef CONFIG_USER_ONLY
131 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 132 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 133 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 134 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 135#endif
155c3eac 136
a7812ae4 137#define GEN_HELPER 2
7b59220e 138#include "helper.h"
b26eefb6
PB
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
7d1b0095 143 TCGv tmp = tcg_temp_new_i32();
d9ba4830
PB
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
0ecb72a5 148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830
PB
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 153 tcg_temp_free_i32(var);
d9ba4830
PB
154}
155
156#define store_cpu_field(var, name) \
0ecb72a5 157 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
b90372ad 164 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
155c3eac 171 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
7d1b0095 178 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
155c3eac 191 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 192 tcg_temp_free_i32(var);
b26eefb6
PB
193}
194
b26eefb6 195/* Value extensions. */
86831435
PB
196#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
197#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
198#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
199#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
200
1497c961
PB
201#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
202#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 203
b26eefb6 204
b75263d6
JR
205static inline void gen_set_cpsr(TCGv var, uint32_t mask)
206{
207 TCGv tmp_mask = tcg_const_i32(mask);
1ce94f81 208 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
209 tcg_temp_free_i32(tmp_mask);
210}
d9ba4830
PB
211/* Set NZCV flags from the high 4 bits of var. */
212#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
213
214static void gen_exception(int excp)
215{
7d1b0095 216 TCGv tmp = tcg_temp_new_i32();
d9ba4830 217 tcg_gen_movi_i32(tmp, excp);
1ce94f81 218 gen_helper_exception(cpu_env, tmp);
7d1b0095 219 tcg_temp_free_i32(tmp);
d9ba4830
PB
220}
221
3670669c
PB
222static void gen_smul_dual(TCGv a, TCGv b)
223{
7d1b0095
PM
224 TCGv tmp1 = tcg_temp_new_i32();
225 TCGv tmp2 = tcg_temp_new_i32();
22478e79
AZ
226 tcg_gen_ext16s_i32(tmp1, a);
227 tcg_gen_ext16s_i32(tmp2, b);
3670669c 228 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 229 tcg_temp_free_i32(tmp2);
3670669c
PB
230 tcg_gen_sari_i32(a, a, 16);
231 tcg_gen_sari_i32(b, b, 16);
232 tcg_gen_mul_i32(b, b, a);
233 tcg_gen_mov_i32(a, tmp1);
7d1b0095 234 tcg_temp_free_i32(tmp1);
3670669c
PB
235}
236
237/* Byteswap each halfword. */
238static void gen_rev16(TCGv var)
239{
7d1b0095 240 TCGv tmp = tcg_temp_new_i32();
3670669c
PB
241 tcg_gen_shri_i32(tmp, var, 8);
242 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
243 tcg_gen_shli_i32(var, var, 8);
244 tcg_gen_andi_i32(var, var, 0xff00ff00);
245 tcg_gen_or_i32(var, var, tmp);
7d1b0095 246 tcg_temp_free_i32(tmp);
3670669c
PB
247}
248
249/* Byteswap low halfword and sign extend. */
250static void gen_revsh(TCGv var)
251{
1a855029
AJ
252 tcg_gen_ext16u_i32(var, var);
253 tcg_gen_bswap16_i32(var, var);
254 tcg_gen_ext16s_i32(var, var);
3670669c
PB
255}
256
257/* Unsigned bitfield extract. */
258static void gen_ubfx(TCGv var, int shift, uint32_t mask)
259{
260 if (shift)
261 tcg_gen_shri_i32(var, var, shift);
262 tcg_gen_andi_i32(var, var, mask);
263}
264
265/* Signed bitfield extract. */
266static void gen_sbfx(TCGv var, int shift, int width)
267{
268 uint32_t signbit;
269
270 if (shift)
271 tcg_gen_sari_i32(var, var, shift);
272 if (shift + width < 32) {
273 signbit = 1u << (width - 1);
274 tcg_gen_andi_i32(var, var, (1u << width) - 1);
275 tcg_gen_xori_i32(var, var, signbit);
276 tcg_gen_subi_i32(var, var, signbit);
277 }
278}
279
838fa72d
AJ
280/* Return (b << 32) + a. Mark inputs as dead */
281static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
3670669c 282{
838fa72d
AJ
283 TCGv_i64 tmp64 = tcg_temp_new_i64();
284
285 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 286 tcg_temp_free_i32(b);
838fa72d
AJ
287 tcg_gen_shli_i64(tmp64, tmp64, 32);
288 tcg_gen_add_i64(a, tmp64, a);
289
290 tcg_temp_free_i64(tmp64);
291 return a;
292}
293
294/* Return (b << 32) - a. Mark inputs as dead. */
295static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
296{
297 TCGv_i64 tmp64 = tcg_temp_new_i64();
298
299 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 300 tcg_temp_free_i32(b);
838fa72d
AJ
301 tcg_gen_shli_i64(tmp64, tmp64, 32);
302 tcg_gen_sub_i64(a, tmp64, a);
303
304 tcg_temp_free_i64(tmp64);
305 return a;
3670669c
PB
306}
307
5e3f878a 308/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 309static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 310{
831d7fe8
RH
311 TCGv lo = tcg_temp_new_i32();
312 TCGv hi = tcg_temp_new_i32();
313 TCGv_i64 ret;
5e3f878a 314
831d7fe8 315 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 316 tcg_temp_free_i32(a);
7d1b0095 317 tcg_temp_free_i32(b);
831d7fe8
RH
318
319 ret = tcg_temp_new_i64();
320 tcg_gen_concat_i32_i64(ret, lo, hi);
321 tcg_temp_free(lo);
322 tcg_temp_free(hi);
323
324 return ret;
5e3f878a
PB
325}
326
a7812ae4 327static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
831d7fe8
RH
329 TCGv lo = tcg_temp_new_i32();
330 TCGv hi = tcg_temp_new_i32();
331 TCGv_i64 ret;
5e3f878a 332
831d7fe8 333 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 334 tcg_temp_free_i32(a);
7d1b0095 335 tcg_temp_free_i32(b);
831d7fe8
RH
336
337 ret = tcg_temp_new_i64();
338 tcg_gen_concat_i32_i64(ret, lo, hi);
339 tcg_temp_free(lo);
340 tcg_temp_free(hi);
341
342 return ret;
5e3f878a
PB
343}
344
8f01245e
PB
345/* Swap low and high halfwords. */
346static void gen_swap_half(TCGv var)
347{
7d1b0095 348 TCGv tmp = tcg_temp_new_i32();
8f01245e
PB
349 tcg_gen_shri_i32(tmp, var, 16);
350 tcg_gen_shli_i32(var, var, 16);
351 tcg_gen_or_i32(var, var, tmp);
7d1b0095 352 tcg_temp_free_i32(tmp);
8f01245e
PB
353}
354
b26eefb6
PB
355/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
356 tmp = (t0 ^ t1) & 0x8000;
357 t0 &= ~0x8000;
358 t1 &= ~0x8000;
359 t0 = (t0 + t1) ^ tmp;
360 */
361
362static void gen_add16(TCGv t0, TCGv t1)
363{
7d1b0095 364 TCGv tmp = tcg_temp_new_i32();
b26eefb6
PB
365 tcg_gen_xor_i32(tmp, t0, t1);
366 tcg_gen_andi_i32(tmp, tmp, 0x8000);
367 tcg_gen_andi_i32(t0, t0, ~0x8000);
368 tcg_gen_andi_i32(t1, t1, ~0x8000);
369 tcg_gen_add_i32(t0, t0, t1);
370 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
371 tcg_temp_free_i32(tmp);
372 tcg_temp_free_i32(t1);
b26eefb6
PB
373}
374
375/* Set CF to the top bit of var. */
376static void gen_set_CF_bit31(TCGv var)
377{
66c374de 378 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
379}
380
381/* Set N and Z flags from var. */
382static inline void gen_logic_CC(TCGv var)
383{
66c374de
AJ
384 tcg_gen_mov_i32(cpu_NF, var);
385 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
386}
387
388/* T0 += T1 + CF. */
396e467c 389static void gen_adc(TCGv t0, TCGv t1)
b26eefb6 390{
396e467c 391 tcg_gen_add_i32(t0, t0, t1);
66c374de 392 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
393}
394
e9bb4aa9
JR
395/* dest = T0 + T1 + CF. */
396static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
397{
e9bb4aa9 398 tcg_gen_add_i32(dest, t0, t1);
66c374de 399 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
400}
401
3670669c
PB
402/* dest = T0 - T1 + CF - 1. */
403static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
404{
3670669c 405 tcg_gen_sub_i32(dest, t0, t1);
66c374de 406 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 407 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
408}
409
72485ec4
AJ
410/* dest = T0 + T1. Compute C, N, V and Z flags */
411static void gen_add_CC(TCGv dest, TCGv t0, TCGv t1)
412{
e3482cb8
RH
413 TCGv tmp = tcg_temp_new_i32();
414 tcg_gen_movi_i32(tmp, 0);
415 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 416 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 417 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
418 tcg_gen_xor_i32(tmp, t0, t1);
419 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
420 tcg_temp_free_i32(tmp);
421 tcg_gen_mov_i32(dest, cpu_NF);
422}
423
49b4c31e
RH
424/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
425static void gen_adc_CC(TCGv dest, TCGv t0, TCGv t1)
426{
427 TCGv tmp = tcg_temp_new_i32();
428 if (TCG_TARGET_HAS_add2_i32) {
429 tcg_gen_movi_i32(tmp, 0);
430 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
431 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, cpu_CF, t1, tmp);
432 } else {
433 TCGv_i64 q0 = tcg_temp_new_i64();
434 TCGv_i64 q1 = tcg_temp_new_i64();
435 tcg_gen_extu_i32_i64(q0, t0);
436 tcg_gen_extu_i32_i64(q1, t1);
437 tcg_gen_add_i64(q0, q0, q1);
438 tcg_gen_extu_i32_i64(q1, cpu_CF);
439 tcg_gen_add_i64(q0, q0, q1);
440 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
441 tcg_temp_free_i64(q0);
442 tcg_temp_free_i64(q1);
443 }
444 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
445 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
446 tcg_gen_xor_i32(tmp, t0, t1);
447 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
448 tcg_temp_free_i32(tmp);
449 tcg_gen_mov_i32(dest, cpu_NF);
450}
451
72485ec4
AJ
452/* dest = T0 - T1. Compute C, N, V and Z flags */
453static void gen_sub_CC(TCGv dest, TCGv t0, TCGv t1)
454{
455 TCGv tmp;
456 tcg_gen_sub_i32(cpu_NF, t0, t1);
457 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
458 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
459 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
460 tmp = tcg_temp_new_i32();
461 tcg_gen_xor_i32(tmp, t0, t1);
462 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
463 tcg_temp_free_i32(tmp);
464 tcg_gen_mov_i32(dest, cpu_NF);
465}
466
2de68a49
RH
467/* dest = T0 + ~T1 + CF = T0 - T1 + CF - 1. Compute C, N, V and Z flags */
468static void gen_sbc_CC(TCGv dest, TCGv t0, TCGv t1)
469{
470 TCGv tmp = tcg_temp_new_i32();
471 tcg_gen_subi_i32(cpu_CF, cpu_CF, 1);
472 if (TCG_TARGET_HAS_add2_i32) {
473 tcg_gen_movi_i32(tmp, 0);
474 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
475 tcg_gen_sub2_i32(cpu_NF, cpu_CF, t0, cpu_CF, t1, tmp);
476 } else {
477 TCGv_i64 q0 = tcg_temp_new_i64();
478 TCGv_i64 q1 = tcg_temp_new_i64();
479 tcg_gen_extu_i32_i64(q0, t0);
480 tcg_gen_extu_i32_i64(q1, t1);
481 tcg_gen_sub_i64(q0, q0, q1);
482 tcg_gen_extu_i32_i64(q1, cpu_CF);
483 tcg_gen_add_i64(q0, q0, q1);
484 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
485 tcg_temp_free_i64(q0);
486 tcg_temp_free_i64(q1);
487 }
488 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
489 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
490 tcg_gen_xor_i32(tmp, t0, t1);
491 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
492 tcg_temp_free_i32(tmp);
493 tcg_gen_mov_i32(dest, cpu_NF);
494}
495
365af80e
AJ
496#define GEN_SHIFT(name) \
497static void gen_##name(TCGv dest, TCGv t0, TCGv t1) \
498{ \
499 TCGv tmp1, tmp2, tmp3; \
500 tmp1 = tcg_temp_new_i32(); \
501 tcg_gen_andi_i32(tmp1, t1, 0xff); \
502 tmp2 = tcg_const_i32(0); \
503 tmp3 = tcg_const_i32(0x1f); \
504 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
505 tcg_temp_free_i32(tmp3); \
506 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
507 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
508 tcg_temp_free_i32(tmp2); \
509 tcg_temp_free_i32(tmp1); \
510}
511GEN_SHIFT(shl)
512GEN_SHIFT(shr)
513#undef GEN_SHIFT
514
515static void gen_sar(TCGv dest, TCGv t0, TCGv t1)
516{
517 TCGv tmp1, tmp2;
518 tmp1 = tcg_temp_new_i32();
519 tcg_gen_andi_i32(tmp1, t1, 0xff);
520 tmp2 = tcg_const_i32(0x1f);
521 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
522 tcg_temp_free_i32(tmp2);
523 tcg_gen_sar_i32(dest, t0, tmp1);
524 tcg_temp_free_i32(tmp1);
525}
526
36c91fd1
PM
527static void tcg_gen_abs_i32(TCGv dest, TCGv src)
528{
529 TCGv c0 = tcg_const_i32(0);
530 TCGv tmp = tcg_temp_new_i32();
531 tcg_gen_neg_i32(tmp, src);
532 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
533 tcg_temp_free_i32(c0);
534 tcg_temp_free_i32(tmp);
535}
ad69471c 536
9a119ff6 537static void shifter_out_im(TCGv var, int shift)
b26eefb6 538{
9a119ff6 539 if (shift == 0) {
66c374de 540 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 541 } else {
66c374de
AJ
542 tcg_gen_shri_i32(cpu_CF, var, shift);
543 if (shift != 31) {
544 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
545 }
9a119ff6 546 }
9a119ff6 547}
b26eefb6 548
9a119ff6
PB
549/* Shift by immediate. Includes special handling for shift == 0. */
550static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
551{
552 switch (shiftop) {
553 case 0: /* LSL */
554 if (shift != 0) {
555 if (flags)
556 shifter_out_im(var, 32 - shift);
557 tcg_gen_shli_i32(var, var, shift);
558 }
559 break;
560 case 1: /* LSR */
561 if (shift == 0) {
562 if (flags) {
66c374de 563 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
564 }
565 tcg_gen_movi_i32(var, 0);
566 } else {
567 if (flags)
568 shifter_out_im(var, shift - 1);
569 tcg_gen_shri_i32(var, var, shift);
570 }
571 break;
572 case 2: /* ASR */
573 if (shift == 0)
574 shift = 32;
575 if (flags)
576 shifter_out_im(var, shift - 1);
577 if (shift == 32)
578 shift = 31;
579 tcg_gen_sari_i32(var, var, shift);
580 break;
581 case 3: /* ROR/RRX */
582 if (shift != 0) {
583 if (flags)
584 shifter_out_im(var, shift - 1);
f669df27 585 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 586 } else {
66c374de 587 TCGv tmp = tcg_temp_new_i32();
b6348f29 588 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
589 if (flags)
590 shifter_out_im(var, 0);
591 tcg_gen_shri_i32(var, var, 1);
b26eefb6 592 tcg_gen_or_i32(var, var, tmp);
7d1b0095 593 tcg_temp_free_i32(tmp);
b26eefb6
PB
594 }
595 }
596};
597
8984bd2e
PB
598static inline void gen_arm_shift_reg(TCGv var, int shiftop,
599 TCGv shift, int flags)
600{
601 if (flags) {
602 switch (shiftop) {
9ef39277
BS
603 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
604 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
605 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
606 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
607 }
608 } else {
609 switch (shiftop) {
365af80e
AJ
610 case 0:
611 gen_shl(var, var, shift);
612 break;
613 case 1:
614 gen_shr(var, var, shift);
615 break;
616 case 2:
617 gen_sar(var, var, shift);
618 break;
f669df27
AJ
619 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
620 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
621 }
622 }
7d1b0095 623 tcg_temp_free_i32(shift);
8984bd2e
PB
624}
625
6ddbc6e4
PB
626#define PAS_OP(pfx) \
627 switch (op2) { \
628 case 0: gen_pas_helper(glue(pfx,add16)); break; \
629 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
631 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 4: gen_pas_helper(glue(pfx,add8)); break; \
633 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
634 }
d9ba4830 635static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 636{
a7812ae4 637 TCGv_ptr tmp;
6ddbc6e4
PB
638
639 switch (op1) {
640#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
641 case 1:
a7812ae4 642 tmp = tcg_temp_new_ptr();
0ecb72a5 643 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 644 PAS_OP(s)
b75263d6 645 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
646 break;
647 case 5:
a7812ae4 648 tmp = tcg_temp_new_ptr();
0ecb72a5 649 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 650 PAS_OP(u)
b75263d6 651 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
652 break;
653#undef gen_pas_helper
654#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
655 case 2:
656 PAS_OP(q);
657 break;
658 case 3:
659 PAS_OP(sh);
660 break;
661 case 6:
662 PAS_OP(uq);
663 break;
664 case 7:
665 PAS_OP(uh);
666 break;
667#undef gen_pas_helper
668 }
669}
9ee6e8bb
PB
670#undef PAS_OP
671
6ddbc6e4
PB
672/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
673#define PAS_OP(pfx) \
ed89a2f1 674 switch (op1) { \
6ddbc6e4
PB
675 case 0: gen_pas_helper(glue(pfx,add8)); break; \
676 case 1: gen_pas_helper(glue(pfx,add16)); break; \
677 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
678 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
679 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
680 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
681 }
d9ba4830 682static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 683{
a7812ae4 684 TCGv_ptr tmp;
6ddbc6e4 685
ed89a2f1 686 switch (op2) {
6ddbc6e4
PB
687#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
688 case 0:
a7812ae4 689 tmp = tcg_temp_new_ptr();
0ecb72a5 690 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 691 PAS_OP(s)
b75263d6 692 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
693 break;
694 case 4:
a7812ae4 695 tmp = tcg_temp_new_ptr();
0ecb72a5 696 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 697 PAS_OP(u)
b75263d6 698 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
699 break;
700#undef gen_pas_helper
701#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
702 case 1:
703 PAS_OP(q);
704 break;
705 case 2:
706 PAS_OP(sh);
707 break;
708 case 5:
709 PAS_OP(uq);
710 break;
711 case 6:
712 PAS_OP(uh);
713 break;
714#undef gen_pas_helper
715 }
716}
9ee6e8bb
PB
717#undef PAS_OP
718
d9ba4830
PB
719static void gen_test_cc(int cc, int label)
720{
721 TCGv tmp;
d9ba4830
PB
722 int inv;
723
d9ba4830
PB
724 switch (cc) {
725 case 0: /* eq: Z */
66c374de 726 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
727 break;
728 case 1: /* ne: !Z */
66c374de 729 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
730 break;
731 case 2: /* cs: C */
66c374de 732 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
733 break;
734 case 3: /* cc: !C */
66c374de 735 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
736 break;
737 case 4: /* mi: N */
66c374de 738 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
739 break;
740 case 5: /* pl: !N */
66c374de 741 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
742 break;
743 case 6: /* vs: V */
66c374de 744 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
745 break;
746 case 7: /* vc: !V */
66c374de 747 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
748 break;
749 case 8: /* hi: C && !Z */
750 inv = gen_new_label();
66c374de
AJ
751 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
752 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
753 gen_set_label(inv);
754 break;
755 case 9: /* ls: !C || Z */
66c374de
AJ
756 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
757 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
758 break;
759 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
760 tmp = tcg_temp_new_i32();
761 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 762 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 763 tcg_temp_free_i32(tmp);
d9ba4830
PB
764 break;
765 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
766 tmp = tcg_temp_new_i32();
767 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 768 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 769 tcg_temp_free_i32(tmp);
d9ba4830
PB
770 break;
771 case 12: /* gt: !Z && N == V */
772 inv = gen_new_label();
66c374de
AJ
773 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
774 tmp = tcg_temp_new_i32();
775 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 776 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 777 tcg_temp_free_i32(tmp);
d9ba4830
PB
778 gen_set_label(inv);
779 break;
780 case 13: /* le: Z || N != V */
66c374de
AJ
781 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
782 tmp = tcg_temp_new_i32();
783 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 784 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 785 tcg_temp_free_i32(tmp);
d9ba4830
PB
786 break;
787 default:
788 fprintf(stderr, "Bad condition code 0x%x\n", cc);
789 abort();
790 }
d9ba4830 791}
2c0262af 792
b1d8e52e 793static const uint8_t table_logic_cc[16] = {
2c0262af
FB
794 1, /* and */
795 1, /* xor */
796 0, /* sub */
797 0, /* rsb */
798 0, /* add */
799 0, /* adc */
800 0, /* sbc */
801 0, /* rsc */
802 1, /* andl */
803 1, /* xorl */
804 0, /* cmp */
805 0, /* cmn */
806 1, /* orr */
807 1, /* mov */
808 1, /* bic */
809 1, /* mvn */
810};
3b46e624 811
d9ba4830
PB
812/* Set PC and Thumb state from an immediate address. */
813static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 814{
b26eefb6 815 TCGv tmp;
99c475ab 816
b26eefb6 817 s->is_jmp = DISAS_UPDATE;
d9ba4830 818 if (s->thumb != (addr & 1)) {
7d1b0095 819 tmp = tcg_temp_new_i32();
d9ba4830 820 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 821 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 822 tcg_temp_free_i32(tmp);
d9ba4830 823 }
155c3eac 824 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
825}
826
827/* Set PC and Thumb state from var. var is marked as dead. */
828static inline void gen_bx(DisasContext *s, TCGv var)
829{
d9ba4830 830 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
831 tcg_gen_andi_i32(cpu_R[15], var, ~1);
832 tcg_gen_andi_i32(var, var, 1);
833 store_cpu_field(var, thumb);
d9ba4830
PB
834}
835
21aeb343
JR
836/* Variant of store_reg which uses branch&exchange logic when storing
837 to r15 in ARM architecture v7 and above. The source must be a temporary
838 and will be marked as dead. */
0ecb72a5 839static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
21aeb343
JR
840 int reg, TCGv var)
841{
842 if (reg == 15 && ENABLE_ARCH_7) {
843 gen_bx(s, var);
844 } else {
845 store_reg(s, reg, var);
846 }
847}
848
be5e7a76
DES
849/* Variant of store_reg which uses branch&exchange logic when storing
850 * to r15 in ARM architecture v5T and above. This is used for storing
851 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
852 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 853static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
be5e7a76
DES
854 int reg, TCGv var)
855{
856 if (reg == 15 && ENABLE_ARCH_5) {
857 gen_bx(s, var);
858 } else {
859 store_reg(s, reg, var);
860 }
861}
862
b0109805
PB
863static inline TCGv gen_ld8s(TCGv addr, int index)
864{
7d1b0095 865 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
866 tcg_gen_qemu_ld8s(tmp, addr, index);
867 return tmp;
868}
869static inline TCGv gen_ld8u(TCGv addr, int index)
870{
7d1b0095 871 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
872 tcg_gen_qemu_ld8u(tmp, addr, index);
873 return tmp;
874}
875static inline TCGv gen_ld16s(TCGv addr, int index)
876{
7d1b0095 877 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
878 tcg_gen_qemu_ld16s(tmp, addr, index);
879 return tmp;
880}
881static inline TCGv gen_ld16u(TCGv addr, int index)
882{
7d1b0095 883 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
884 tcg_gen_qemu_ld16u(tmp, addr, index);
885 return tmp;
886}
887static inline TCGv gen_ld32(TCGv addr, int index)
888{
7d1b0095 889 TCGv tmp = tcg_temp_new_i32();
b0109805
PB
890 tcg_gen_qemu_ld32u(tmp, addr, index);
891 return tmp;
892}
84496233
JR
893static inline TCGv_i64 gen_ld64(TCGv addr, int index)
894{
895 TCGv_i64 tmp = tcg_temp_new_i64();
896 tcg_gen_qemu_ld64(tmp, addr, index);
897 return tmp;
898}
b0109805
PB
899static inline void gen_st8(TCGv val, TCGv addr, int index)
900{
901 tcg_gen_qemu_st8(val, addr, index);
7d1b0095 902 tcg_temp_free_i32(val);
b0109805
PB
903}
904static inline void gen_st16(TCGv val, TCGv addr, int index)
905{
906 tcg_gen_qemu_st16(val, addr, index);
7d1b0095 907 tcg_temp_free_i32(val);
b0109805
PB
908}
909static inline void gen_st32(TCGv val, TCGv addr, int index)
910{
911 tcg_gen_qemu_st32(val, addr, index);
7d1b0095 912 tcg_temp_free_i32(val);
b0109805 913}
84496233
JR
914static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
915{
916 tcg_gen_qemu_st64(val, addr, index);
917 tcg_temp_free_i64(val);
918}
b5ff1b31 919
5e3f878a
PB
920static inline void gen_set_pc_im(uint32_t val)
921{
155c3eac 922 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
923}
924
b5ff1b31
FB
925/* Force a TB lookup after an instruction that changes the CPU state. */
926static inline void gen_lookup_tb(DisasContext *s)
927{
a6445c52 928 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
929 s->is_jmp = DISAS_UPDATE;
930}
931
b0109805
PB
932static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
933 TCGv var)
2c0262af 934{
1e8d4eec 935 int val, rm, shift, shiftop;
b26eefb6 936 TCGv offset;
2c0262af
FB
937
938 if (!(insn & (1 << 25))) {
939 /* immediate */
940 val = insn & 0xfff;
941 if (!(insn & (1 << 23)))
942 val = -val;
537730b9 943 if (val != 0)
b0109805 944 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
945 } else {
946 /* shift/register */
947 rm = (insn) & 0xf;
948 shift = (insn >> 7) & 0x1f;
1e8d4eec 949 shiftop = (insn >> 5) & 3;
b26eefb6 950 offset = load_reg(s, rm);
9a119ff6 951 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 952 if (!(insn & (1 << 23)))
b0109805 953 tcg_gen_sub_i32(var, var, offset);
2c0262af 954 else
b0109805 955 tcg_gen_add_i32(var, var, offset);
7d1b0095 956 tcg_temp_free_i32(offset);
2c0262af
FB
957 }
958}
959
191f9a93 960static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 961 int extra, TCGv var)
2c0262af
FB
962{
963 int val, rm;
b26eefb6 964 TCGv offset;
3b46e624 965
2c0262af
FB
966 if (insn & (1 << 22)) {
967 /* immediate */
968 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
969 if (!(insn & (1 << 23)))
970 val = -val;
18acad92 971 val += extra;
537730b9 972 if (val != 0)
b0109805 973 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
974 } else {
975 /* register */
191f9a93 976 if (extra)
b0109805 977 tcg_gen_addi_i32(var, var, extra);
2c0262af 978 rm = (insn) & 0xf;
b26eefb6 979 offset = load_reg(s, rm);
2c0262af 980 if (!(insn & (1 << 23)))
b0109805 981 tcg_gen_sub_i32(var, var, offset);
2c0262af 982 else
b0109805 983 tcg_gen_add_i32(var, var, offset);
7d1b0095 984 tcg_temp_free_i32(offset);
2c0262af
FB
985 }
986}
987
5aaebd13
PM
988static TCGv_ptr get_fpstatus_ptr(int neon)
989{
990 TCGv_ptr statusptr = tcg_temp_new_ptr();
991 int offset;
992 if (neon) {
0ecb72a5 993 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 994 } else {
0ecb72a5 995 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
996 }
997 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
998 return statusptr;
999}
1000
4373f3ce
PB
1001#define VFP_OP2(name) \
1002static inline void gen_vfp_##name(int dp) \
1003{ \
ae1857ec
PM
1004 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1005 if (dp) { \
1006 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1007 } else { \
1008 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1009 } \
1010 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1011}
1012
4373f3ce
PB
1013VFP_OP2(add)
1014VFP_OP2(sub)
1015VFP_OP2(mul)
1016VFP_OP2(div)
1017
1018#undef VFP_OP2
1019
605a6aed
PM
1020static inline void gen_vfp_F1_mul(int dp)
1021{
1022 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1023 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1024 if (dp) {
ae1857ec 1025 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1026 } else {
ae1857ec 1027 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1028 }
ae1857ec 1029 tcg_temp_free_ptr(fpst);
605a6aed
PM
1030}
1031
1032static inline void gen_vfp_F1_neg(int dp)
1033{
1034 /* Like gen_vfp_neg() but put result in F1 */
1035 if (dp) {
1036 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1037 } else {
1038 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1039 }
1040}
1041
4373f3ce
PB
1042static inline void gen_vfp_abs(int dp)
1043{
1044 if (dp)
1045 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1046 else
1047 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1048}
1049
1050static inline void gen_vfp_neg(int dp)
1051{
1052 if (dp)
1053 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1054 else
1055 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1056}
1057
1058static inline void gen_vfp_sqrt(int dp)
1059{
1060 if (dp)
1061 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1062 else
1063 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1064}
1065
1066static inline void gen_vfp_cmp(int dp)
1067{
1068 if (dp)
1069 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1070 else
1071 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1072}
1073
1074static inline void gen_vfp_cmpe(int dp)
1075{
1076 if (dp)
1077 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1078 else
1079 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1080}
1081
1082static inline void gen_vfp_F1_ld0(int dp)
1083{
1084 if (dp)
5b340b51 1085 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1086 else
5b340b51 1087 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1088}
1089
5500b06c
PM
1090#define VFP_GEN_ITOF(name) \
1091static inline void gen_vfp_##name(int dp, int neon) \
1092{ \
5aaebd13 1093 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1094 if (dp) { \
1095 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1096 } else { \
1097 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1098 } \
b7fa9214 1099 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1100}
1101
5500b06c
PM
1102VFP_GEN_ITOF(uito)
1103VFP_GEN_ITOF(sito)
1104#undef VFP_GEN_ITOF
4373f3ce 1105
5500b06c
PM
1106#define VFP_GEN_FTOI(name) \
1107static inline void gen_vfp_##name(int dp, int neon) \
1108{ \
5aaebd13 1109 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1110 if (dp) { \
1111 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1112 } else { \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1114 } \
b7fa9214 1115 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1116}
1117
5500b06c
PM
1118VFP_GEN_FTOI(toui)
1119VFP_GEN_FTOI(touiz)
1120VFP_GEN_FTOI(tosi)
1121VFP_GEN_FTOI(tosiz)
1122#undef VFP_GEN_FTOI
4373f3ce
PB
1123
1124#define VFP_GEN_FIX(name) \
5500b06c 1125static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1126{ \
b75263d6 1127 TCGv tmp_shift = tcg_const_i32(shift); \
5aaebd13 1128 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1129 if (dp) { \
1130 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1131 } else { \
1132 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1133 } \
b75263d6 1134 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1135 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1136}
4373f3ce
PB
1137VFP_GEN_FIX(tosh)
1138VFP_GEN_FIX(tosl)
1139VFP_GEN_FIX(touh)
1140VFP_GEN_FIX(toul)
1141VFP_GEN_FIX(shto)
1142VFP_GEN_FIX(slto)
1143VFP_GEN_FIX(uhto)
1144VFP_GEN_FIX(ulto)
1145#undef VFP_GEN_FIX
9ee6e8bb 1146
312eea9f 1147static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1148{
1149 if (dp)
312eea9f 1150 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1151 else
312eea9f 1152 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1153}
1154
312eea9f 1155static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
b5ff1b31
FB
1156{
1157 if (dp)
312eea9f 1158 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
b5ff1b31 1159 else
312eea9f 1160 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
b5ff1b31
FB
1161}
1162
8e96005d
FB
1163static inline long
1164vfp_reg_offset (int dp, int reg)
1165{
1166 if (dp)
1167 return offsetof(CPUARMState, vfp.regs[reg]);
1168 else if (reg & 1) {
1169 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1170 + offsetof(CPU_DoubleU, l.upper);
1171 } else {
1172 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1173 + offsetof(CPU_DoubleU, l.lower);
1174 }
1175}
9ee6e8bb
PB
1176
1177/* Return the offset of a 32-bit piece of a NEON register.
1178 zero is the least significant end of the register. */
1179static inline long
1180neon_reg_offset (int reg, int n)
1181{
1182 int sreg;
1183 sreg = reg * 2 + n;
1184 return vfp_reg_offset(0, sreg);
1185}
1186
8f8e3aa4
PB
1187static TCGv neon_load_reg(int reg, int pass)
1188{
7d1b0095 1189 TCGv tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1190 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1191 return tmp;
1192}
1193
1194static void neon_store_reg(int reg, int pass, TCGv var)
1195{
1196 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1197 tcg_temp_free_i32(var);
8f8e3aa4
PB
1198}
1199
a7812ae4 1200static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1201{
1202 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1203}
1204
a7812ae4 1205static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1206{
1207 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1208}
1209
4373f3ce
PB
1210#define tcg_gen_ld_f32 tcg_gen_ld_i32
1211#define tcg_gen_ld_f64 tcg_gen_ld_i64
1212#define tcg_gen_st_f32 tcg_gen_st_i32
1213#define tcg_gen_st_f64 tcg_gen_st_i64
1214
b7bcbe95
FB
1215static inline void gen_mov_F0_vreg(int dp, int reg)
1216{
1217 if (dp)
4373f3ce 1218 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1219 else
4373f3ce 1220 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1221}
1222
1223static inline void gen_mov_F1_vreg(int dp, int reg)
1224{
1225 if (dp)
4373f3ce 1226 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1227 else
4373f3ce 1228 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1229}
1230
1231static inline void gen_mov_vreg_F0(int dp, int reg)
1232{
1233 if (dp)
4373f3ce 1234 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1235 else
4373f3ce 1236 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1237}
1238
18c9b560
AZ
1239#define ARM_CP_RW_BIT (1 << 20)
1240
a7812ae4 1241static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1242{
0ecb72a5 1243 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1244}
1245
a7812ae4 1246static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1247{
0ecb72a5 1248 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1249}
1250
da6b5335 1251static inline TCGv iwmmxt_load_creg(int reg)
e677137d 1252{
7d1b0095 1253 TCGv var = tcg_temp_new_i32();
0ecb72a5 1254 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1255 return var;
e677137d
PB
1256}
1257
da6b5335 1258static inline void iwmmxt_store_creg(int reg, TCGv var)
e677137d 1259{
0ecb72a5 1260 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1261 tcg_temp_free_i32(var);
e677137d
PB
1262}
1263
1264static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1265{
1266 iwmmxt_store_reg(cpu_M0, rn);
1267}
1268
1269static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1270{
1271 iwmmxt_load_reg(cpu_M0, rn);
1272}
1273
1274static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1275{
1276 iwmmxt_load_reg(cpu_V1, rn);
1277 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1278}
1279
1280static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1281{
1282 iwmmxt_load_reg(cpu_V1, rn);
1283 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1284}
1285
1286static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1287{
1288 iwmmxt_load_reg(cpu_V1, rn);
1289 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1290}
1291
1292#define IWMMXT_OP(name) \
1293static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1294{ \
1295 iwmmxt_load_reg(cpu_V1, rn); \
1296 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1297}
1298
477955bd
PM
1299#define IWMMXT_OP_ENV(name) \
1300static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1301{ \
1302 iwmmxt_load_reg(cpu_V1, rn); \
1303 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1304}
1305
1306#define IWMMXT_OP_ENV_SIZE(name) \
1307IWMMXT_OP_ENV(name##b) \
1308IWMMXT_OP_ENV(name##w) \
1309IWMMXT_OP_ENV(name##l)
e677137d 1310
477955bd 1311#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1312static inline void gen_op_iwmmxt_##name##_M0(void) \
1313{ \
477955bd 1314 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1315}
1316
1317IWMMXT_OP(maddsq)
1318IWMMXT_OP(madduq)
1319IWMMXT_OP(sadb)
1320IWMMXT_OP(sadw)
1321IWMMXT_OP(mulslw)
1322IWMMXT_OP(mulshw)
1323IWMMXT_OP(mululw)
1324IWMMXT_OP(muluhw)
1325IWMMXT_OP(macsw)
1326IWMMXT_OP(macuw)
1327
477955bd
PM
1328IWMMXT_OP_ENV_SIZE(unpackl)
1329IWMMXT_OP_ENV_SIZE(unpackh)
1330
1331IWMMXT_OP_ENV1(unpacklub)
1332IWMMXT_OP_ENV1(unpackluw)
1333IWMMXT_OP_ENV1(unpacklul)
1334IWMMXT_OP_ENV1(unpackhub)
1335IWMMXT_OP_ENV1(unpackhuw)
1336IWMMXT_OP_ENV1(unpackhul)
1337IWMMXT_OP_ENV1(unpacklsb)
1338IWMMXT_OP_ENV1(unpacklsw)
1339IWMMXT_OP_ENV1(unpacklsl)
1340IWMMXT_OP_ENV1(unpackhsb)
1341IWMMXT_OP_ENV1(unpackhsw)
1342IWMMXT_OP_ENV1(unpackhsl)
1343
1344IWMMXT_OP_ENV_SIZE(cmpeq)
1345IWMMXT_OP_ENV_SIZE(cmpgtu)
1346IWMMXT_OP_ENV_SIZE(cmpgts)
1347
1348IWMMXT_OP_ENV_SIZE(mins)
1349IWMMXT_OP_ENV_SIZE(minu)
1350IWMMXT_OP_ENV_SIZE(maxs)
1351IWMMXT_OP_ENV_SIZE(maxu)
1352
1353IWMMXT_OP_ENV_SIZE(subn)
1354IWMMXT_OP_ENV_SIZE(addn)
1355IWMMXT_OP_ENV_SIZE(subu)
1356IWMMXT_OP_ENV_SIZE(addu)
1357IWMMXT_OP_ENV_SIZE(subs)
1358IWMMXT_OP_ENV_SIZE(adds)
1359
1360IWMMXT_OP_ENV(avgb0)
1361IWMMXT_OP_ENV(avgb1)
1362IWMMXT_OP_ENV(avgw0)
1363IWMMXT_OP_ENV(avgw1)
e677137d
PB
1364
1365IWMMXT_OP(msadb)
1366
477955bd
PM
1367IWMMXT_OP_ENV(packuw)
1368IWMMXT_OP_ENV(packul)
1369IWMMXT_OP_ENV(packuq)
1370IWMMXT_OP_ENV(packsw)
1371IWMMXT_OP_ENV(packsl)
1372IWMMXT_OP_ENV(packsq)
e677137d 1373
e677137d
PB
1374static void gen_op_iwmmxt_set_mup(void)
1375{
1376 TCGv tmp;
1377 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1378 tcg_gen_ori_i32(tmp, tmp, 2);
1379 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1380}
1381
1382static void gen_op_iwmmxt_set_cup(void)
1383{
1384 TCGv tmp;
1385 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1386 tcg_gen_ori_i32(tmp, tmp, 1);
1387 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1388}
1389
1390static void gen_op_iwmmxt_setpsr_nz(void)
1391{
7d1b0095 1392 TCGv tmp = tcg_temp_new_i32();
e677137d
PB
1393 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1394 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1395}
1396
1397static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1398{
1399 iwmmxt_load_reg(cpu_V1, rn);
86831435 1400 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1401 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1402}
1403
da6b5335 1404static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
18c9b560
AZ
1405{
1406 int rd;
1407 uint32_t offset;
da6b5335 1408 TCGv tmp;
18c9b560
AZ
1409
1410 rd = (insn >> 16) & 0xf;
da6b5335 1411 tmp = load_reg(s, rd);
18c9b560
AZ
1412
1413 offset = (insn & 0xff) << ((insn >> 7) & 2);
1414 if (insn & (1 << 24)) {
1415 /* Pre indexed */
1416 if (insn & (1 << 23))
da6b5335 1417 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1418 else
da6b5335
FN
1419 tcg_gen_addi_i32(tmp, tmp, -offset);
1420 tcg_gen_mov_i32(dest, tmp);
18c9b560 1421 if (insn & (1 << 21))
da6b5335
FN
1422 store_reg(s, rd, tmp);
1423 else
7d1b0095 1424 tcg_temp_free_i32(tmp);
18c9b560
AZ
1425 } else if (insn & (1 << 21)) {
1426 /* Post indexed */
da6b5335 1427 tcg_gen_mov_i32(dest, tmp);
18c9b560 1428 if (insn & (1 << 23))
da6b5335 1429 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1430 else
da6b5335
FN
1431 tcg_gen_addi_i32(tmp, tmp, -offset);
1432 store_reg(s, rd, tmp);
18c9b560
AZ
1433 } else if (!(insn & (1 << 23)))
1434 return 1;
1435 return 0;
1436}
1437
da6b5335 1438static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
18c9b560
AZ
1439{
1440 int rd = (insn >> 0) & 0xf;
da6b5335 1441 TCGv tmp;
18c9b560 1442
da6b5335
FN
1443 if (insn & (1 << 8)) {
1444 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1445 return 1;
da6b5335
FN
1446 } else {
1447 tmp = iwmmxt_load_creg(rd);
1448 }
1449 } else {
7d1b0095 1450 tmp = tcg_temp_new_i32();
da6b5335
FN
1451 iwmmxt_load_reg(cpu_V0, rd);
1452 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1453 }
1454 tcg_gen_andi_i32(tmp, tmp, mask);
1455 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1456 tcg_temp_free_i32(tmp);
18c9b560
AZ
1457 return 0;
1458}
1459
a1c7273b 1460/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1461 (ie. an undefined instruction). */
0ecb72a5 1462static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1463{
1464 int rd, wrd;
1465 int rdhi, rdlo, rd0, rd1, i;
da6b5335
FN
1466 TCGv addr;
1467 TCGv tmp, tmp2, tmp3;
18c9b560
AZ
1468
1469 if ((insn & 0x0e000e00) == 0x0c000000) {
1470 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1471 wrd = insn & 0xf;
1472 rdlo = (insn >> 12) & 0xf;
1473 rdhi = (insn >> 16) & 0xf;
1474 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1475 iwmmxt_load_reg(cpu_V0, wrd);
1476 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1477 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1478 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1479 } else { /* TMCRR */
da6b5335
FN
1480 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1481 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1482 gen_op_iwmmxt_set_mup();
1483 }
1484 return 0;
1485 }
1486
1487 wrd = (insn >> 12) & 0xf;
7d1b0095 1488 addr = tcg_temp_new_i32();
da6b5335 1489 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1490 tcg_temp_free_i32(addr);
18c9b560 1491 return 1;
da6b5335 1492 }
18c9b560
AZ
1493 if (insn & ARM_CP_RW_BIT) {
1494 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1495 tmp = tcg_temp_new_i32();
da6b5335
FN
1496 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1497 iwmmxt_store_creg(wrd, tmp);
18c9b560 1498 } else {
e677137d
PB
1499 i = 1;
1500 if (insn & (1 << 8)) {
1501 if (insn & (1 << 22)) { /* WLDRD */
da6b5335 1502 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1503 i = 0;
1504 } else { /* WLDRW wRd */
da6b5335 1505 tmp = gen_ld32(addr, IS_USER(s));
e677137d
PB
1506 }
1507 } else {
1508 if (insn & (1 << 22)) { /* WLDRH */
da6b5335 1509 tmp = gen_ld16u(addr, IS_USER(s));
e677137d 1510 } else { /* WLDRB */
da6b5335 1511 tmp = gen_ld8u(addr, IS_USER(s));
e677137d
PB
1512 }
1513 }
1514 if (i) {
1515 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1516 tcg_temp_free_i32(tmp);
e677137d 1517 }
18c9b560
AZ
1518 gen_op_iwmmxt_movq_wRn_M0(wrd);
1519 }
1520 } else {
1521 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335
FN
1522 tmp = iwmmxt_load_creg(wrd);
1523 gen_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1524 } else {
1525 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1526 tmp = tcg_temp_new_i32();
e677137d
PB
1527 if (insn & (1 << 8)) {
1528 if (insn & (1 << 22)) { /* WSTRD */
7d1b0095 1529 tcg_temp_free_i32(tmp);
da6b5335 1530 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1531 } else { /* WSTRW wRd */
1532 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1533 gen_st32(tmp, addr, IS_USER(s));
e677137d
PB
1534 }
1535 } else {
1536 if (insn & (1 << 22)) { /* WSTRH */
1537 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1538 gen_st16(tmp, addr, IS_USER(s));
e677137d
PB
1539 } else { /* WSTRB */
1540 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
da6b5335 1541 gen_st8(tmp, addr, IS_USER(s));
e677137d
PB
1542 }
1543 }
18c9b560
AZ
1544 }
1545 }
7d1b0095 1546 tcg_temp_free_i32(addr);
18c9b560
AZ
1547 return 0;
1548 }
1549
1550 if ((insn & 0x0f000000) != 0x0e000000)
1551 return 1;
1552
1553 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1554 case 0x000: /* WOR */
1555 wrd = (insn >> 12) & 0xf;
1556 rd0 = (insn >> 0) & 0xf;
1557 rd1 = (insn >> 16) & 0xf;
1558 gen_op_iwmmxt_movq_M0_wRn(rd0);
1559 gen_op_iwmmxt_orq_M0_wRn(rd1);
1560 gen_op_iwmmxt_setpsr_nz();
1561 gen_op_iwmmxt_movq_wRn_M0(wrd);
1562 gen_op_iwmmxt_set_mup();
1563 gen_op_iwmmxt_set_cup();
1564 break;
1565 case 0x011: /* TMCR */
1566 if (insn & 0xf)
1567 return 1;
1568 rd = (insn >> 12) & 0xf;
1569 wrd = (insn >> 16) & 0xf;
1570 switch (wrd) {
1571 case ARM_IWMMXT_wCID:
1572 case ARM_IWMMXT_wCASF:
1573 break;
1574 case ARM_IWMMXT_wCon:
1575 gen_op_iwmmxt_set_cup();
1576 /* Fall through. */
1577 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1578 tmp = iwmmxt_load_creg(wrd);
1579 tmp2 = load_reg(s, rd);
f669df27 1580 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1581 tcg_temp_free_i32(tmp2);
da6b5335 1582 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1583 break;
1584 case ARM_IWMMXT_wCGR0:
1585 case ARM_IWMMXT_wCGR1:
1586 case ARM_IWMMXT_wCGR2:
1587 case ARM_IWMMXT_wCGR3:
1588 gen_op_iwmmxt_set_cup();
da6b5335
FN
1589 tmp = load_reg(s, rd);
1590 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1591 break;
1592 default:
1593 return 1;
1594 }
1595 break;
1596 case 0x100: /* WXOR */
1597 wrd = (insn >> 12) & 0xf;
1598 rd0 = (insn >> 0) & 0xf;
1599 rd1 = (insn >> 16) & 0xf;
1600 gen_op_iwmmxt_movq_M0_wRn(rd0);
1601 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1602 gen_op_iwmmxt_setpsr_nz();
1603 gen_op_iwmmxt_movq_wRn_M0(wrd);
1604 gen_op_iwmmxt_set_mup();
1605 gen_op_iwmmxt_set_cup();
1606 break;
1607 case 0x111: /* TMRC */
1608 if (insn & 0xf)
1609 return 1;
1610 rd = (insn >> 12) & 0xf;
1611 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1612 tmp = iwmmxt_load_creg(wrd);
1613 store_reg(s, rd, tmp);
18c9b560
AZ
1614 break;
1615 case 0x300: /* WANDN */
1616 wrd = (insn >> 12) & 0xf;
1617 rd0 = (insn >> 0) & 0xf;
1618 rd1 = (insn >> 16) & 0xf;
1619 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1620 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1621 gen_op_iwmmxt_andq_M0_wRn(rd1);
1622 gen_op_iwmmxt_setpsr_nz();
1623 gen_op_iwmmxt_movq_wRn_M0(wrd);
1624 gen_op_iwmmxt_set_mup();
1625 gen_op_iwmmxt_set_cup();
1626 break;
1627 case 0x200: /* WAND */
1628 wrd = (insn >> 12) & 0xf;
1629 rd0 = (insn >> 0) & 0xf;
1630 rd1 = (insn >> 16) & 0xf;
1631 gen_op_iwmmxt_movq_M0_wRn(rd0);
1632 gen_op_iwmmxt_andq_M0_wRn(rd1);
1633 gen_op_iwmmxt_setpsr_nz();
1634 gen_op_iwmmxt_movq_wRn_M0(wrd);
1635 gen_op_iwmmxt_set_mup();
1636 gen_op_iwmmxt_set_cup();
1637 break;
1638 case 0x810: case 0xa10: /* WMADD */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 0) & 0xf;
1641 rd1 = (insn >> 16) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
1643 if (insn & (1 << 21))
1644 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1645 else
1646 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 break;
1650 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1651 wrd = (insn >> 12) & 0xf;
1652 rd0 = (insn >> 16) & 0xf;
1653 rd1 = (insn >> 0) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 16) & 0xf;
1675 rd1 = (insn >> 0) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
1677 switch ((insn >> 22) & 3) {
1678 case 0:
1679 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1680 break;
1681 case 1:
1682 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1683 break;
1684 case 2:
1685 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1686 break;
1687 case 3:
1688 return 1;
1689 }
1690 gen_op_iwmmxt_movq_wRn_M0(wrd);
1691 gen_op_iwmmxt_set_mup();
1692 gen_op_iwmmxt_set_cup();
1693 break;
1694 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 16) & 0xf;
1697 rd1 = (insn >> 0) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 if (insn & (1 << 22))
1700 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1701 else
1702 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1703 if (!(insn & (1 << 20)))
1704 gen_op_iwmmxt_addl_M0_wRn(wrd);
1705 gen_op_iwmmxt_movq_wRn_M0(wrd);
1706 gen_op_iwmmxt_set_mup();
1707 break;
1708 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1709 wrd = (insn >> 12) & 0xf;
1710 rd0 = (insn >> 16) & 0xf;
1711 rd1 = (insn >> 0) & 0xf;
1712 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1713 if (insn & (1 << 21)) {
1714 if (insn & (1 << 20))
1715 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1716 else
1717 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1718 } else {
1719 if (insn & (1 << 20))
1720 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1721 else
1722 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1723 }
18c9b560
AZ
1724 gen_op_iwmmxt_movq_wRn_M0(wrd);
1725 gen_op_iwmmxt_set_mup();
1726 break;
1727 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1728 wrd = (insn >> 12) & 0xf;
1729 rd0 = (insn >> 16) & 0xf;
1730 rd1 = (insn >> 0) & 0xf;
1731 gen_op_iwmmxt_movq_M0_wRn(rd0);
1732 if (insn & (1 << 21))
1733 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1736 if (!(insn & (1 << 20))) {
e677137d
PB
1737 iwmmxt_load_reg(cpu_V1, wrd);
1738 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1739 }
1740 gen_op_iwmmxt_movq_wRn_M0(wrd);
1741 gen_op_iwmmxt_set_mup();
1742 break;
1743 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1744 wrd = (insn >> 12) & 0xf;
1745 rd0 = (insn >> 16) & 0xf;
1746 rd1 = (insn >> 0) & 0xf;
1747 gen_op_iwmmxt_movq_M0_wRn(rd0);
1748 switch ((insn >> 22) & 3) {
1749 case 0:
1750 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1751 break;
1752 case 1:
1753 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1754 break;
1755 case 2:
1756 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1757 break;
1758 case 3:
1759 return 1;
1760 }
1761 gen_op_iwmmxt_movq_wRn_M0(wrd);
1762 gen_op_iwmmxt_set_mup();
1763 gen_op_iwmmxt_set_cup();
1764 break;
1765 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1766 wrd = (insn >> 12) & 0xf;
1767 rd0 = (insn >> 16) & 0xf;
1768 rd1 = (insn >> 0) & 0xf;
1769 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1770 if (insn & (1 << 22)) {
1771 if (insn & (1 << 20))
1772 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1773 else
1774 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1775 } else {
1776 if (insn & (1 << 20))
1777 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1778 else
1779 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1780 }
18c9b560
AZ
1781 gen_op_iwmmxt_movq_wRn_M0(wrd);
1782 gen_op_iwmmxt_set_mup();
1783 gen_op_iwmmxt_set_cup();
1784 break;
1785 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1786 wrd = (insn >> 12) & 0xf;
1787 rd0 = (insn >> 16) & 0xf;
1788 rd1 = (insn >> 0) & 0xf;
1789 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1790 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1791 tcg_gen_andi_i32(tmp, tmp, 7);
1792 iwmmxt_load_reg(cpu_V1, rd1);
1793 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1794 tcg_temp_free_i32(tmp);
18c9b560
AZ
1795 gen_op_iwmmxt_movq_wRn_M0(wrd);
1796 gen_op_iwmmxt_set_mup();
1797 break;
1798 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1799 if (((insn >> 6) & 3) == 3)
1800 return 1;
18c9b560
AZ
1801 rd = (insn >> 12) & 0xf;
1802 wrd = (insn >> 16) & 0xf;
da6b5335 1803 tmp = load_reg(s, rd);
18c9b560
AZ
1804 gen_op_iwmmxt_movq_M0_wRn(wrd);
1805 switch ((insn >> 6) & 3) {
1806 case 0:
da6b5335
FN
1807 tmp2 = tcg_const_i32(0xff);
1808 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1809 break;
1810 case 1:
da6b5335
FN
1811 tmp2 = tcg_const_i32(0xffff);
1812 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1813 break;
1814 case 2:
da6b5335
FN
1815 tmp2 = tcg_const_i32(0xffffffff);
1816 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1817 break;
da6b5335
FN
1818 default:
1819 TCGV_UNUSED(tmp2);
1820 TCGV_UNUSED(tmp3);
18c9b560 1821 }
da6b5335
FN
1822 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1823 tcg_temp_free(tmp3);
1824 tcg_temp_free(tmp2);
7d1b0095 1825 tcg_temp_free_i32(tmp);
18c9b560
AZ
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
da6b5335 1832 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1833 return 1;
1834 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1835 tmp = tcg_temp_new_i32();
18c9b560
AZ
1836 switch ((insn >> 22) & 3) {
1837 case 0:
da6b5335
FN
1838 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1839 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1840 if (insn & 8) {
1841 tcg_gen_ext8s_i32(tmp, tmp);
1842 } else {
1843 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1844 }
1845 break;
1846 case 1:
da6b5335
FN
1847 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1848 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1849 if (insn & 8) {
1850 tcg_gen_ext16s_i32(tmp, tmp);
1851 } else {
1852 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1853 }
1854 break;
1855 case 2:
da6b5335
FN
1856 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1857 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1858 break;
18c9b560 1859 }
da6b5335 1860 store_reg(s, rd, tmp);
18c9b560
AZ
1861 break;
1862 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1863 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1864 return 1;
da6b5335 1865 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1866 switch ((insn >> 22) & 3) {
1867 case 0:
da6b5335 1868 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1869 break;
1870 case 1:
da6b5335 1871 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1872 break;
1873 case 2:
da6b5335 1874 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1875 break;
18c9b560 1876 }
da6b5335
FN
1877 tcg_gen_shli_i32(tmp, tmp, 28);
1878 gen_set_nzcv(tmp);
7d1b0095 1879 tcg_temp_free_i32(tmp);
18c9b560
AZ
1880 break;
1881 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1882 if (((insn >> 6) & 3) == 3)
1883 return 1;
18c9b560
AZ
1884 rd = (insn >> 12) & 0xf;
1885 wrd = (insn >> 16) & 0xf;
da6b5335 1886 tmp = load_reg(s, rd);
18c9b560
AZ
1887 switch ((insn >> 6) & 3) {
1888 case 0:
da6b5335 1889 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1890 break;
1891 case 1:
da6b5335 1892 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1893 break;
1894 case 2:
da6b5335 1895 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1896 break;
18c9b560 1897 }
7d1b0095 1898 tcg_temp_free_i32(tmp);
18c9b560
AZ
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 break;
1902 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1903 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1904 return 1;
da6b5335 1905 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1906 tmp2 = tcg_temp_new_i32();
da6b5335 1907 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1908 switch ((insn >> 22) & 3) {
1909 case 0:
1910 for (i = 0; i < 7; i ++) {
da6b5335
FN
1911 tcg_gen_shli_i32(tmp2, tmp2, 4);
1912 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1913 }
1914 break;
1915 case 1:
1916 for (i = 0; i < 3; i ++) {
da6b5335
FN
1917 tcg_gen_shli_i32(tmp2, tmp2, 8);
1918 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1919 }
1920 break;
1921 case 2:
da6b5335
FN
1922 tcg_gen_shli_i32(tmp2, tmp2, 16);
1923 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1924 break;
18c9b560 1925 }
da6b5335 1926 gen_set_nzcv(tmp);
7d1b0095
PM
1927 tcg_temp_free_i32(tmp2);
1928 tcg_temp_free_i32(tmp);
18c9b560
AZ
1929 break;
1930 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1931 wrd = (insn >> 12) & 0xf;
1932 rd0 = (insn >> 16) & 0xf;
1933 gen_op_iwmmxt_movq_M0_wRn(rd0);
1934 switch ((insn >> 22) & 3) {
1935 case 0:
e677137d 1936 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1937 break;
1938 case 1:
e677137d 1939 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1940 break;
1941 case 2:
e677137d 1942 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1943 break;
1944 case 3:
1945 return 1;
1946 }
1947 gen_op_iwmmxt_movq_wRn_M0(wrd);
1948 gen_op_iwmmxt_set_mup();
1949 break;
1950 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1951 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1952 return 1;
da6b5335 1953 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1954 tmp2 = tcg_temp_new_i32();
da6b5335 1955 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1956 switch ((insn >> 22) & 3) {
1957 case 0:
1958 for (i = 0; i < 7; i ++) {
da6b5335
FN
1959 tcg_gen_shli_i32(tmp2, tmp2, 4);
1960 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1961 }
1962 break;
1963 case 1:
1964 for (i = 0; i < 3; i ++) {
da6b5335
FN
1965 tcg_gen_shli_i32(tmp2, tmp2, 8);
1966 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1967 }
1968 break;
1969 case 2:
da6b5335
FN
1970 tcg_gen_shli_i32(tmp2, tmp2, 16);
1971 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1972 break;
18c9b560 1973 }
da6b5335 1974 gen_set_nzcv(tmp);
7d1b0095
PM
1975 tcg_temp_free_i32(tmp2);
1976 tcg_temp_free_i32(tmp);
18c9b560
AZ
1977 break;
1978 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1979 rd = (insn >> 12) & 0xf;
1980 rd0 = (insn >> 16) & 0xf;
da6b5335 1981 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1982 return 1;
1983 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1984 tmp = tcg_temp_new_i32();
18c9b560
AZ
1985 switch ((insn >> 22) & 3) {
1986 case 0:
da6b5335 1987 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1988 break;
1989 case 1:
da6b5335 1990 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1991 break;
1992 case 2:
da6b5335 1993 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1994 break;
18c9b560 1995 }
da6b5335 1996 store_reg(s, rd, tmp);
18c9b560
AZ
1997 break;
1998 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1999 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2000 wrd = (insn >> 12) & 0xf;
2001 rd0 = (insn >> 16) & 0xf;
2002 rd1 = (insn >> 0) & 0xf;
2003 gen_op_iwmmxt_movq_M0_wRn(rd0);
2004 switch ((insn >> 22) & 3) {
2005 case 0:
2006 if (insn & (1 << 21))
2007 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2008 else
2009 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2010 break;
2011 case 1:
2012 if (insn & (1 << 21))
2013 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2014 else
2015 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2016 break;
2017 case 2:
2018 if (insn & (1 << 21))
2019 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2020 else
2021 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2022 break;
2023 case 3:
2024 return 1;
2025 }
2026 gen_op_iwmmxt_movq_wRn_M0(wrd);
2027 gen_op_iwmmxt_set_mup();
2028 gen_op_iwmmxt_set_cup();
2029 break;
2030 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2031 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 switch ((insn >> 22) & 3) {
2036 case 0:
2037 if (insn & (1 << 21))
2038 gen_op_iwmmxt_unpacklsb_M0();
2039 else
2040 gen_op_iwmmxt_unpacklub_M0();
2041 break;
2042 case 1:
2043 if (insn & (1 << 21))
2044 gen_op_iwmmxt_unpacklsw_M0();
2045 else
2046 gen_op_iwmmxt_unpackluw_M0();
2047 break;
2048 case 2:
2049 if (insn & (1 << 21))
2050 gen_op_iwmmxt_unpacklsl_M0();
2051 else
2052 gen_op_iwmmxt_unpacklul_M0();
2053 break;
2054 case 3:
2055 return 1;
2056 }
2057 gen_op_iwmmxt_movq_wRn_M0(wrd);
2058 gen_op_iwmmxt_set_mup();
2059 gen_op_iwmmxt_set_cup();
2060 break;
2061 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2062 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2063 wrd = (insn >> 12) & 0xf;
2064 rd0 = (insn >> 16) & 0xf;
2065 gen_op_iwmmxt_movq_M0_wRn(rd0);
2066 switch ((insn >> 22) & 3) {
2067 case 0:
2068 if (insn & (1 << 21))
2069 gen_op_iwmmxt_unpackhsb_M0();
2070 else
2071 gen_op_iwmmxt_unpackhub_M0();
2072 break;
2073 case 1:
2074 if (insn & (1 << 21))
2075 gen_op_iwmmxt_unpackhsw_M0();
2076 else
2077 gen_op_iwmmxt_unpackhuw_M0();
2078 break;
2079 case 2:
2080 if (insn & (1 << 21))
2081 gen_op_iwmmxt_unpackhsl_M0();
2082 else
2083 gen_op_iwmmxt_unpackhul_M0();
2084 break;
2085 case 3:
2086 return 1;
2087 }
2088 gen_op_iwmmxt_movq_wRn_M0(wrd);
2089 gen_op_iwmmxt_set_mup();
2090 gen_op_iwmmxt_set_cup();
2091 break;
2092 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2093 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2094 if (((insn >> 22) & 3) == 0)
2095 return 1;
18c9b560
AZ
2096 wrd = (insn >> 12) & 0xf;
2097 rd0 = (insn >> 16) & 0xf;
2098 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2099 tmp = tcg_temp_new_i32();
da6b5335 2100 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2101 tcg_temp_free_i32(tmp);
18c9b560 2102 return 1;
da6b5335 2103 }
18c9b560 2104 switch ((insn >> 22) & 3) {
18c9b560 2105 case 1:
477955bd 2106 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2107 break;
2108 case 2:
477955bd 2109 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2110 break;
2111 case 3:
477955bd 2112 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2113 break;
2114 }
7d1b0095 2115 tcg_temp_free_i32(tmp);
18c9b560
AZ
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2121 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2122 if (((insn >> 22) & 3) == 0)
2123 return 1;
18c9b560
AZ
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2127 tmp = tcg_temp_new_i32();
da6b5335 2128 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2129 tcg_temp_free_i32(tmp);
18c9b560 2130 return 1;
da6b5335 2131 }
18c9b560 2132 switch ((insn >> 22) & 3) {
18c9b560 2133 case 1:
477955bd 2134 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2135 break;
2136 case 2:
477955bd 2137 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2138 break;
2139 case 3:
477955bd 2140 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2141 break;
2142 }
7d1b0095 2143 tcg_temp_free_i32(tmp);
18c9b560
AZ
2144 gen_op_iwmmxt_movq_wRn_M0(wrd);
2145 gen_op_iwmmxt_set_mup();
2146 gen_op_iwmmxt_set_cup();
2147 break;
2148 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2149 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2150 if (((insn >> 22) & 3) == 0)
2151 return 1;
18c9b560
AZ
2152 wrd = (insn >> 12) & 0xf;
2153 rd0 = (insn >> 16) & 0xf;
2154 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2155 tmp = tcg_temp_new_i32();
da6b5335 2156 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2157 tcg_temp_free_i32(tmp);
18c9b560 2158 return 1;
da6b5335 2159 }
18c9b560 2160 switch ((insn >> 22) & 3) {
18c9b560 2161 case 1:
477955bd 2162 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2163 break;
2164 case 2:
477955bd 2165 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2166 break;
2167 case 3:
477955bd 2168 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2169 break;
2170 }
7d1b0095 2171 tcg_temp_free_i32(tmp);
18c9b560
AZ
2172 gen_op_iwmmxt_movq_wRn_M0(wrd);
2173 gen_op_iwmmxt_set_mup();
2174 gen_op_iwmmxt_set_cup();
2175 break;
2176 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2177 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2178 if (((insn >> 22) & 3) == 0)
2179 return 1;
18c9b560
AZ
2180 wrd = (insn >> 12) & 0xf;
2181 rd0 = (insn >> 16) & 0xf;
2182 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2183 tmp = tcg_temp_new_i32();
18c9b560 2184 switch ((insn >> 22) & 3) {
18c9b560 2185 case 1:
da6b5335 2186 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2187 tcg_temp_free_i32(tmp);
18c9b560 2188 return 1;
da6b5335 2189 }
477955bd 2190 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2191 break;
2192 case 2:
da6b5335 2193 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2194 tcg_temp_free_i32(tmp);
18c9b560 2195 return 1;
da6b5335 2196 }
477955bd 2197 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2198 break;
2199 case 3:
da6b5335 2200 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2201 tcg_temp_free_i32(tmp);
18c9b560 2202 return 1;
da6b5335 2203 }
477955bd 2204 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2205 break;
2206 }
7d1b0095 2207 tcg_temp_free_i32(tmp);
18c9b560
AZ
2208 gen_op_iwmmxt_movq_wRn_M0(wrd);
2209 gen_op_iwmmxt_set_mup();
2210 gen_op_iwmmxt_set_cup();
2211 break;
2212 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2213 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2214 wrd = (insn >> 12) & 0xf;
2215 rd0 = (insn >> 16) & 0xf;
2216 rd1 = (insn >> 0) & 0xf;
2217 gen_op_iwmmxt_movq_M0_wRn(rd0);
2218 switch ((insn >> 22) & 3) {
2219 case 0:
2220 if (insn & (1 << 21))
2221 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2222 else
2223 gen_op_iwmmxt_minub_M0_wRn(rd1);
2224 break;
2225 case 1:
2226 if (insn & (1 << 21))
2227 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2228 else
2229 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2230 break;
2231 case 2:
2232 if (insn & (1 << 21))
2233 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2234 else
2235 gen_op_iwmmxt_minul_M0_wRn(rd1);
2236 break;
2237 case 3:
2238 return 1;
2239 }
2240 gen_op_iwmmxt_movq_wRn_M0(wrd);
2241 gen_op_iwmmxt_set_mup();
2242 break;
2243 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2244 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2245 wrd = (insn >> 12) & 0xf;
2246 rd0 = (insn >> 16) & 0xf;
2247 rd1 = (insn >> 0) & 0xf;
2248 gen_op_iwmmxt_movq_M0_wRn(rd0);
2249 switch ((insn >> 22) & 3) {
2250 case 0:
2251 if (insn & (1 << 21))
2252 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2253 else
2254 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2255 break;
2256 case 1:
2257 if (insn & (1 << 21))
2258 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2259 else
2260 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2261 break;
2262 case 2:
2263 if (insn & (1 << 21))
2264 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2265 else
2266 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2267 break;
2268 case 3:
2269 return 1;
2270 }
2271 gen_op_iwmmxt_movq_wRn_M0(wrd);
2272 gen_op_iwmmxt_set_mup();
2273 break;
2274 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2275 case 0x402: case 0x502: case 0x602: case 0x702:
2276 wrd = (insn >> 12) & 0xf;
2277 rd0 = (insn >> 16) & 0xf;
2278 rd1 = (insn >> 0) & 0xf;
2279 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2280 tmp = tcg_const_i32((insn >> 20) & 3);
2281 iwmmxt_load_reg(cpu_V1, rd1);
2282 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2283 tcg_temp_free(tmp);
18c9b560
AZ
2284 gen_op_iwmmxt_movq_wRn_M0(wrd);
2285 gen_op_iwmmxt_set_mup();
2286 break;
2287 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2288 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2289 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2290 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2291 wrd = (insn >> 12) & 0xf;
2292 rd0 = (insn >> 16) & 0xf;
2293 rd1 = (insn >> 0) & 0xf;
2294 gen_op_iwmmxt_movq_M0_wRn(rd0);
2295 switch ((insn >> 20) & 0xf) {
2296 case 0x0:
2297 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2298 break;
2299 case 0x1:
2300 gen_op_iwmmxt_subub_M0_wRn(rd1);
2301 break;
2302 case 0x3:
2303 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2304 break;
2305 case 0x4:
2306 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2307 break;
2308 case 0x5:
2309 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2310 break;
2311 case 0x7:
2312 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2313 break;
2314 case 0x8:
2315 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2316 break;
2317 case 0x9:
2318 gen_op_iwmmxt_subul_M0_wRn(rd1);
2319 break;
2320 case 0xb:
2321 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2322 break;
2323 default:
2324 return 1;
2325 }
2326 gen_op_iwmmxt_movq_wRn_M0(wrd);
2327 gen_op_iwmmxt_set_mup();
2328 gen_op_iwmmxt_set_cup();
2329 break;
2330 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2331 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2332 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2333 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2334 wrd = (insn >> 12) & 0xf;
2335 rd0 = (insn >> 16) & 0xf;
2336 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2337 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2338 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
da6b5335 2339 tcg_temp_free(tmp);
18c9b560
AZ
2340 gen_op_iwmmxt_movq_wRn_M0(wrd);
2341 gen_op_iwmmxt_set_mup();
2342 gen_op_iwmmxt_set_cup();
2343 break;
2344 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2345 case 0x418: case 0x518: case 0x618: case 0x718:
2346 case 0x818: case 0x918: case 0xa18: case 0xb18:
2347 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2348 wrd = (insn >> 12) & 0xf;
2349 rd0 = (insn >> 16) & 0xf;
2350 rd1 = (insn >> 0) & 0xf;
2351 gen_op_iwmmxt_movq_M0_wRn(rd0);
2352 switch ((insn >> 20) & 0xf) {
2353 case 0x0:
2354 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2355 break;
2356 case 0x1:
2357 gen_op_iwmmxt_addub_M0_wRn(rd1);
2358 break;
2359 case 0x3:
2360 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2361 break;
2362 case 0x4:
2363 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2364 break;
2365 case 0x5:
2366 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2367 break;
2368 case 0x7:
2369 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2370 break;
2371 case 0x8:
2372 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2373 break;
2374 case 0x9:
2375 gen_op_iwmmxt_addul_M0_wRn(rd1);
2376 break;
2377 case 0xb:
2378 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2379 break;
2380 default:
2381 return 1;
2382 }
2383 gen_op_iwmmxt_movq_wRn_M0(wrd);
2384 gen_op_iwmmxt_set_mup();
2385 gen_op_iwmmxt_set_cup();
2386 break;
2387 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2388 case 0x408: case 0x508: case 0x608: case 0x708:
2389 case 0x808: case 0x908: case 0xa08: case 0xb08:
2390 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2391 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2392 return 1;
18c9b560
AZ
2393 wrd = (insn >> 12) & 0xf;
2394 rd0 = (insn >> 16) & 0xf;
2395 rd1 = (insn >> 0) & 0xf;
2396 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2397 switch ((insn >> 22) & 3) {
18c9b560
AZ
2398 case 1:
2399 if (insn & (1 << 21))
2400 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2401 else
2402 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2403 break;
2404 case 2:
2405 if (insn & (1 << 21))
2406 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2407 else
2408 gen_op_iwmmxt_packul_M0_wRn(rd1);
2409 break;
2410 case 3:
2411 if (insn & (1 << 21))
2412 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2413 else
2414 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2415 break;
2416 }
2417 gen_op_iwmmxt_movq_wRn_M0(wrd);
2418 gen_op_iwmmxt_set_mup();
2419 gen_op_iwmmxt_set_cup();
2420 break;
2421 case 0x201: case 0x203: case 0x205: case 0x207:
2422 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2423 case 0x211: case 0x213: case 0x215: case 0x217:
2424 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2425 wrd = (insn >> 5) & 0xf;
2426 rd0 = (insn >> 12) & 0xf;
2427 rd1 = (insn >> 0) & 0xf;
2428 if (rd0 == 0xf || rd1 == 0xf)
2429 return 1;
2430 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2431 tmp = load_reg(s, rd0);
2432 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2433 switch ((insn >> 16) & 0xf) {
2434 case 0x0: /* TMIA */
da6b5335 2435 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2436 break;
2437 case 0x8: /* TMIAPH */
da6b5335 2438 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2439 break;
2440 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2441 if (insn & (1 << 16))
da6b5335 2442 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2443 if (insn & (1 << 17))
da6b5335
FN
2444 tcg_gen_shri_i32(tmp2, tmp2, 16);
2445 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2446 break;
2447 default:
7d1b0095
PM
2448 tcg_temp_free_i32(tmp2);
2449 tcg_temp_free_i32(tmp);
18c9b560
AZ
2450 return 1;
2451 }
7d1b0095
PM
2452 tcg_temp_free_i32(tmp2);
2453 tcg_temp_free_i32(tmp);
18c9b560
AZ
2454 gen_op_iwmmxt_movq_wRn_M0(wrd);
2455 gen_op_iwmmxt_set_mup();
2456 break;
2457 default:
2458 return 1;
2459 }
2460
2461 return 0;
2462}
2463
a1c7273b 2464/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2465 (ie. an undefined instruction). */
0ecb72a5 2466static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2467{
2468 int acc, rd0, rd1, rdhi, rdlo;
3a554c0f 2469 TCGv tmp, tmp2;
18c9b560
AZ
2470
2471 if ((insn & 0x0ff00f10) == 0x0e200010) {
2472 /* Multiply with Internal Accumulate Format */
2473 rd0 = (insn >> 12) & 0xf;
2474 rd1 = insn & 0xf;
2475 acc = (insn >> 5) & 7;
2476
2477 if (acc != 0)
2478 return 1;
2479
3a554c0f
FN
2480 tmp = load_reg(s, rd0);
2481 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2482 switch ((insn >> 16) & 0xf) {
2483 case 0x0: /* MIA */
3a554c0f 2484 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2485 break;
2486 case 0x8: /* MIAPH */
3a554c0f 2487 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2488 break;
2489 case 0xc: /* MIABB */
2490 case 0xd: /* MIABT */
2491 case 0xe: /* MIATB */
2492 case 0xf: /* MIATT */
18c9b560 2493 if (insn & (1 << 16))
3a554c0f 2494 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2495 if (insn & (1 << 17))
3a554c0f
FN
2496 tcg_gen_shri_i32(tmp2, tmp2, 16);
2497 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2498 break;
2499 default:
2500 return 1;
2501 }
7d1b0095
PM
2502 tcg_temp_free_i32(tmp2);
2503 tcg_temp_free_i32(tmp);
18c9b560
AZ
2504
2505 gen_op_iwmmxt_movq_wRn_M0(acc);
2506 return 0;
2507 }
2508
2509 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2510 /* Internal Accumulator Access Format */
2511 rdhi = (insn >> 16) & 0xf;
2512 rdlo = (insn >> 12) & 0xf;
2513 acc = insn & 7;
2514
2515 if (acc != 0)
2516 return 1;
2517
2518 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2519 iwmmxt_load_reg(cpu_V0, acc);
2520 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2521 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2522 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2523 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2524 } else { /* MAR */
3a554c0f
FN
2525 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2526 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2527 }
2528 return 0;
2529 }
2530
2531 return 1;
2532}
2533
9ee6e8bb
PB
2534#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2535#define VFP_SREG(insn, bigbit, smallbit) \
2536 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2537#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2538 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2539 reg = (((insn) >> (bigbit)) & 0x0f) \
2540 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2541 } else { \
2542 if (insn & (1 << (smallbit))) \
2543 return 1; \
2544 reg = ((insn) >> (bigbit)) & 0x0f; \
2545 }} while (0)
2546
2547#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2548#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2549#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2550#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2551#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2552#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2553
4373f3ce
PB
2554/* Move between integer and VFP cores. */
2555static TCGv gen_vfp_mrs(void)
2556{
7d1b0095 2557 TCGv tmp = tcg_temp_new_i32();
4373f3ce
PB
2558 tcg_gen_mov_i32(tmp, cpu_F0s);
2559 return tmp;
2560}
2561
2562static void gen_vfp_msr(TCGv tmp)
2563{
2564 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2565 tcg_temp_free_i32(tmp);
4373f3ce
PB
2566}
2567
ad69471c
PB
2568static void gen_neon_dup_u8(TCGv var, int shift)
2569{
7d1b0095 2570 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2571 if (shift)
2572 tcg_gen_shri_i32(var, var, shift);
86831435 2573 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2574 tcg_gen_shli_i32(tmp, var, 8);
2575 tcg_gen_or_i32(var, var, tmp);
2576 tcg_gen_shli_i32(tmp, var, 16);
2577 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2578 tcg_temp_free_i32(tmp);
ad69471c
PB
2579}
2580
2581static void gen_neon_dup_low16(TCGv var)
2582{
7d1b0095 2583 TCGv tmp = tcg_temp_new_i32();
86831435 2584 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2585 tcg_gen_shli_i32(tmp, var, 16);
2586 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2587 tcg_temp_free_i32(tmp);
ad69471c
PB
2588}
2589
2590static void gen_neon_dup_high16(TCGv var)
2591{
7d1b0095 2592 TCGv tmp = tcg_temp_new_i32();
ad69471c
PB
2593 tcg_gen_andi_i32(var, var, 0xffff0000);
2594 tcg_gen_shri_i32(tmp, var, 16);
2595 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2596 tcg_temp_free_i32(tmp);
ad69471c
PB
2597}
2598
8e18cde3
PM
2599static TCGv gen_load_and_replicate(DisasContext *s, TCGv addr, int size)
2600{
2601 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2602 TCGv tmp;
2603 switch (size) {
2604 case 0:
2605 tmp = gen_ld8u(addr, IS_USER(s));
2606 gen_neon_dup_u8(tmp, 0);
2607 break;
2608 case 1:
2609 tmp = gen_ld16u(addr, IS_USER(s));
2610 gen_neon_dup_low16(tmp);
2611 break;
2612 case 2:
2613 tmp = gen_ld32(addr, IS_USER(s));
2614 break;
2615 default: /* Avoid compiler warnings. */
2616 abort();
2617 }
2618 return tmp;
2619}
2620
a1c7273b 2621/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2622 (ie. an undefined instruction). */
0ecb72a5 2623static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2624{
2625 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2626 int dp, veclen;
312eea9f 2627 TCGv addr;
4373f3ce 2628 TCGv tmp;
ad69471c 2629 TCGv tmp2;
b7bcbe95 2630
40f137e1
PB
2631 if (!arm_feature(env, ARM_FEATURE_VFP))
2632 return 1;
2633
5df8bac1 2634 if (!s->vfp_enabled) {
9ee6e8bb 2635 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2636 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2637 return 1;
2638 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2639 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2640 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2641 return 1;
2642 }
b7bcbe95
FB
2643 dp = ((insn & 0xf00) == 0xb00);
2644 switch ((insn >> 24) & 0xf) {
2645 case 0xe:
2646 if (insn & (1 << 4)) {
2647 /* single register transfer */
b7bcbe95
FB
2648 rd = (insn >> 12) & 0xf;
2649 if (dp) {
9ee6e8bb
PB
2650 int size;
2651 int pass;
2652
2653 VFP_DREG_N(rn, insn);
2654 if (insn & 0xf)
b7bcbe95 2655 return 1;
9ee6e8bb
PB
2656 if (insn & 0x00c00060
2657 && !arm_feature(env, ARM_FEATURE_NEON))
2658 return 1;
2659
2660 pass = (insn >> 21) & 1;
2661 if (insn & (1 << 22)) {
2662 size = 0;
2663 offset = ((insn >> 5) & 3) * 8;
2664 } else if (insn & (1 << 5)) {
2665 size = 1;
2666 offset = (insn & (1 << 6)) ? 16 : 0;
2667 } else {
2668 size = 2;
2669 offset = 0;
2670 }
18c9b560 2671 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2672 /* vfp->arm */
ad69471c 2673 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2674 switch (size) {
2675 case 0:
9ee6e8bb 2676 if (offset)
ad69471c 2677 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2678 if (insn & (1 << 23))
ad69471c 2679 gen_uxtb(tmp);
9ee6e8bb 2680 else
ad69471c 2681 gen_sxtb(tmp);
9ee6e8bb
PB
2682 break;
2683 case 1:
9ee6e8bb
PB
2684 if (insn & (1 << 23)) {
2685 if (offset) {
ad69471c 2686 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2687 } else {
ad69471c 2688 gen_uxth(tmp);
9ee6e8bb
PB
2689 }
2690 } else {
2691 if (offset) {
ad69471c 2692 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2693 } else {
ad69471c 2694 gen_sxth(tmp);
9ee6e8bb
PB
2695 }
2696 }
2697 break;
2698 case 2:
9ee6e8bb
PB
2699 break;
2700 }
ad69471c 2701 store_reg(s, rd, tmp);
b7bcbe95
FB
2702 } else {
2703 /* arm->vfp */
ad69471c 2704 tmp = load_reg(s, rd);
9ee6e8bb
PB
2705 if (insn & (1 << 23)) {
2706 /* VDUP */
2707 if (size == 0) {
ad69471c 2708 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2709 } else if (size == 1) {
ad69471c 2710 gen_neon_dup_low16(tmp);
9ee6e8bb 2711 }
cbbccffc 2712 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2713 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2714 tcg_gen_mov_i32(tmp2, tmp);
2715 neon_store_reg(rn, n, tmp2);
2716 }
2717 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2718 } else {
2719 /* VMOV */
2720 switch (size) {
2721 case 0:
ad69471c 2722 tmp2 = neon_load_reg(rn, pass);
d593c48e 2723 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2724 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2725 break;
2726 case 1:
ad69471c 2727 tmp2 = neon_load_reg(rn, pass);
d593c48e 2728 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2729 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2730 break;
2731 case 2:
9ee6e8bb
PB
2732 break;
2733 }
ad69471c 2734 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2735 }
b7bcbe95 2736 }
9ee6e8bb
PB
2737 } else { /* !dp */
2738 if ((insn & 0x6f) != 0x00)
2739 return 1;
2740 rn = VFP_SREG_N(insn);
18c9b560 2741 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2742 /* vfp->arm */
2743 if (insn & (1 << 21)) {
2744 /* system register */
40f137e1 2745 rn >>= 1;
9ee6e8bb 2746
b7bcbe95 2747 switch (rn) {
40f137e1 2748 case ARM_VFP_FPSID:
4373f3ce 2749 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2750 VFP3 restricts all id registers to privileged
2751 accesses. */
2752 if (IS_USER(s)
2753 && arm_feature(env, ARM_FEATURE_VFP3))
2754 return 1;
4373f3ce 2755 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2756 break;
40f137e1 2757 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2758 if (IS_USER(s))
2759 return 1;
4373f3ce 2760 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2761 break;
40f137e1
PB
2762 case ARM_VFP_FPINST:
2763 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2764 /* Not present in VFP3. */
2765 if (IS_USER(s)
2766 || arm_feature(env, ARM_FEATURE_VFP3))
2767 return 1;
4373f3ce 2768 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2769 break;
40f137e1 2770 case ARM_VFP_FPSCR:
601d70b9 2771 if (rd == 15) {
4373f3ce
PB
2772 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2773 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2774 } else {
7d1b0095 2775 tmp = tcg_temp_new_i32();
4373f3ce
PB
2776 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2777 }
b7bcbe95 2778 break;
9ee6e8bb
PB
2779 case ARM_VFP_MVFR0:
2780 case ARM_VFP_MVFR1:
2781 if (IS_USER(s)
06ed5d66 2782 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2783 return 1;
4373f3ce 2784 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2785 break;
b7bcbe95
FB
2786 default:
2787 return 1;
2788 }
2789 } else {
2790 gen_mov_F0_vreg(0, rn);
4373f3ce 2791 tmp = gen_vfp_mrs();
b7bcbe95
FB
2792 }
2793 if (rd == 15) {
b5ff1b31 2794 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2795 gen_set_nzcv(tmp);
7d1b0095 2796 tcg_temp_free_i32(tmp);
4373f3ce
PB
2797 } else {
2798 store_reg(s, rd, tmp);
2799 }
b7bcbe95
FB
2800 } else {
2801 /* arm->vfp */
b7bcbe95 2802 if (insn & (1 << 21)) {
40f137e1 2803 rn >>= 1;
b7bcbe95
FB
2804 /* system register */
2805 switch (rn) {
40f137e1 2806 case ARM_VFP_FPSID:
9ee6e8bb
PB
2807 case ARM_VFP_MVFR0:
2808 case ARM_VFP_MVFR1:
b7bcbe95
FB
2809 /* Writes are ignored. */
2810 break;
40f137e1 2811 case ARM_VFP_FPSCR:
e4c1cfa5 2812 tmp = load_reg(s, rd);
4373f3ce 2813 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2814 tcg_temp_free_i32(tmp);
b5ff1b31 2815 gen_lookup_tb(s);
b7bcbe95 2816 break;
40f137e1 2817 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2818 if (IS_USER(s))
2819 return 1;
71b3c3de
JR
2820 /* TODO: VFP subarchitecture support.
2821 * For now, keep the EN bit only */
e4c1cfa5 2822 tmp = load_reg(s, rd);
71b3c3de 2823 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2824 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2825 gen_lookup_tb(s);
2826 break;
2827 case ARM_VFP_FPINST:
2828 case ARM_VFP_FPINST2:
e4c1cfa5 2829 tmp = load_reg(s, rd);
4373f3ce 2830 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2831 break;
b7bcbe95
FB
2832 default:
2833 return 1;
2834 }
2835 } else {
e4c1cfa5 2836 tmp = load_reg(s, rd);
4373f3ce 2837 gen_vfp_msr(tmp);
b7bcbe95
FB
2838 gen_mov_vreg_F0(0, rn);
2839 }
2840 }
2841 }
2842 } else {
2843 /* data processing */
2844 /* The opcode is in bits 23, 21, 20 and 6. */
2845 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2846 if (dp) {
2847 if (op == 15) {
2848 /* rn is opcode */
2849 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2850 } else {
2851 /* rn is register number */
9ee6e8bb 2852 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2853 }
2854
04595bf6 2855 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2856 /* Integer or single precision destination. */
9ee6e8bb 2857 rd = VFP_SREG_D(insn);
b7bcbe95 2858 } else {
9ee6e8bb 2859 VFP_DREG_D(rd, insn);
b7bcbe95 2860 }
04595bf6
PM
2861 if (op == 15 &&
2862 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2863 /* VCVT from int is always from S reg regardless of dp bit.
2864 * VCVT with immediate frac_bits has same format as SREG_M
2865 */
2866 rm = VFP_SREG_M(insn);
b7bcbe95 2867 } else {
9ee6e8bb 2868 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2869 }
2870 } else {
9ee6e8bb 2871 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2872 if (op == 15 && rn == 15) {
2873 /* Double precision destination. */
9ee6e8bb
PB
2874 VFP_DREG_D(rd, insn);
2875 } else {
2876 rd = VFP_SREG_D(insn);
2877 }
04595bf6
PM
2878 /* NB that we implicitly rely on the encoding for the frac_bits
2879 * in VCVT of fixed to float being the same as that of an SREG_M
2880 */
9ee6e8bb 2881 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2882 }
2883
69d1fc22 2884 veclen = s->vec_len;
b7bcbe95
FB
2885 if (op == 15 && rn > 3)
2886 veclen = 0;
2887
2888 /* Shut up compiler warnings. */
2889 delta_m = 0;
2890 delta_d = 0;
2891 bank_mask = 0;
3b46e624 2892
b7bcbe95
FB
2893 if (veclen > 0) {
2894 if (dp)
2895 bank_mask = 0xc;
2896 else
2897 bank_mask = 0x18;
2898
2899 /* Figure out what type of vector operation this is. */
2900 if ((rd & bank_mask) == 0) {
2901 /* scalar */
2902 veclen = 0;
2903 } else {
2904 if (dp)
69d1fc22 2905 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2906 else
69d1fc22 2907 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2908
2909 if ((rm & bank_mask) == 0) {
2910 /* mixed scalar/vector */
2911 delta_m = 0;
2912 } else {
2913 /* vector */
2914 delta_m = delta_d;
2915 }
2916 }
2917 }
2918
2919 /* Load the initial operands. */
2920 if (op == 15) {
2921 switch (rn) {
2922 case 16:
2923 case 17:
2924 /* Integer source */
2925 gen_mov_F0_vreg(0, rm);
2926 break;
2927 case 8:
2928 case 9:
2929 /* Compare */
2930 gen_mov_F0_vreg(dp, rd);
2931 gen_mov_F1_vreg(dp, rm);
2932 break;
2933 case 10:
2934 case 11:
2935 /* Compare with zero */
2936 gen_mov_F0_vreg(dp, rd);
2937 gen_vfp_F1_ld0(dp);
2938 break;
9ee6e8bb
PB
2939 case 20:
2940 case 21:
2941 case 22:
2942 case 23:
644ad806
PB
2943 case 28:
2944 case 29:
2945 case 30:
2946 case 31:
9ee6e8bb
PB
2947 /* Source and destination the same. */
2948 gen_mov_F0_vreg(dp, rd);
2949 break;
6e0c0ed1
PM
2950 case 4:
2951 case 5:
2952 case 6:
2953 case 7:
2954 /* VCVTB, VCVTT: only present with the halfprec extension,
2955 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2956 */
2957 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2958 return 1;
2959 }
2960 /* Otherwise fall through */
b7bcbe95
FB
2961 default:
2962 /* One source operand. */
2963 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2964 break;
b7bcbe95
FB
2965 }
2966 } else {
2967 /* Two source operands. */
2968 gen_mov_F0_vreg(dp, rn);
2969 gen_mov_F1_vreg(dp, rm);
2970 }
2971
2972 for (;;) {
2973 /* Perform the calculation. */
2974 switch (op) {
605a6aed
PM
2975 case 0: /* VMLA: fd + (fn * fm) */
2976 /* Note that order of inputs to the add matters for NaNs */
2977 gen_vfp_F1_mul(dp);
2978 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2979 gen_vfp_add(dp);
2980 break;
605a6aed 2981 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2982 gen_vfp_mul(dp);
605a6aed
PM
2983 gen_vfp_F1_neg(dp);
2984 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2985 gen_vfp_add(dp);
2986 break;
605a6aed
PM
2987 case 2: /* VNMLS: -fd + (fn * fm) */
2988 /* Note that it isn't valid to replace (-A + B) with (B - A)
2989 * or similar plausible looking simplifications
2990 * because this will give wrong results for NaNs.
2991 */
2992 gen_vfp_F1_mul(dp);
2993 gen_mov_F0_vreg(dp, rd);
2994 gen_vfp_neg(dp);
2995 gen_vfp_add(dp);
b7bcbe95 2996 break;
605a6aed 2997 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2998 gen_vfp_mul(dp);
605a6aed
PM
2999 gen_vfp_F1_neg(dp);
3000 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3001 gen_vfp_neg(dp);
605a6aed 3002 gen_vfp_add(dp);
b7bcbe95
FB
3003 break;
3004 case 4: /* mul: fn * fm */
3005 gen_vfp_mul(dp);
3006 break;
3007 case 5: /* nmul: -(fn * fm) */
3008 gen_vfp_mul(dp);
3009 gen_vfp_neg(dp);
3010 break;
3011 case 6: /* add: fn + fm */
3012 gen_vfp_add(dp);
3013 break;
3014 case 7: /* sub: fn - fm */
3015 gen_vfp_sub(dp);
3016 break;
3017 case 8: /* div: fn / fm */
3018 gen_vfp_div(dp);
3019 break;
da97f52c
PM
3020 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3021 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3022 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3023 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3024 /* These are fused multiply-add, and must be done as one
3025 * floating point operation with no rounding between the
3026 * multiplication and addition steps.
3027 * NB that doing the negations here as separate steps is
3028 * correct : an input NaN should come out with its sign bit
3029 * flipped if it is a negated-input.
3030 */
3031 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3032 return 1;
3033 }
3034 if (dp) {
3035 TCGv_ptr fpst;
3036 TCGv_i64 frd;
3037 if (op & 1) {
3038 /* VFNMS, VFMS */
3039 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3040 }
3041 frd = tcg_temp_new_i64();
3042 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3043 if (op & 2) {
3044 /* VFNMA, VFNMS */
3045 gen_helper_vfp_negd(frd, frd);
3046 }
3047 fpst = get_fpstatus_ptr(0);
3048 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3049 cpu_F1d, frd, fpst);
3050 tcg_temp_free_ptr(fpst);
3051 tcg_temp_free_i64(frd);
3052 } else {
3053 TCGv_ptr fpst;
3054 TCGv_i32 frd;
3055 if (op & 1) {
3056 /* VFNMS, VFMS */
3057 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3058 }
3059 frd = tcg_temp_new_i32();
3060 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3061 if (op & 2) {
3062 gen_helper_vfp_negs(frd, frd);
3063 }
3064 fpst = get_fpstatus_ptr(0);
3065 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3066 cpu_F1s, frd, fpst);
3067 tcg_temp_free_ptr(fpst);
3068 tcg_temp_free_i32(frd);
3069 }
3070 break;
9ee6e8bb
PB
3071 case 14: /* fconst */
3072 if (!arm_feature(env, ARM_FEATURE_VFP3))
3073 return 1;
3074
3075 n = (insn << 12) & 0x80000000;
3076 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3077 if (dp) {
3078 if (i & 0x40)
3079 i |= 0x3f80;
3080 else
3081 i |= 0x4000;
3082 n |= i << 16;
4373f3ce 3083 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3084 } else {
3085 if (i & 0x40)
3086 i |= 0x780;
3087 else
3088 i |= 0x800;
3089 n |= i << 19;
5b340b51 3090 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3091 }
9ee6e8bb 3092 break;
b7bcbe95
FB
3093 case 15: /* extension space */
3094 switch (rn) {
3095 case 0: /* cpy */
3096 /* no-op */
3097 break;
3098 case 1: /* abs */
3099 gen_vfp_abs(dp);
3100 break;
3101 case 2: /* neg */
3102 gen_vfp_neg(dp);
3103 break;
3104 case 3: /* sqrt */
3105 gen_vfp_sqrt(dp);
3106 break;
60011498 3107 case 4: /* vcvtb.f32.f16 */
60011498
PB
3108 tmp = gen_vfp_mrs();
3109 tcg_gen_ext16u_i32(tmp, tmp);
3110 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3111 tcg_temp_free_i32(tmp);
60011498
PB
3112 break;
3113 case 5: /* vcvtt.f32.f16 */
60011498
PB
3114 tmp = gen_vfp_mrs();
3115 tcg_gen_shri_i32(tmp, tmp, 16);
3116 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3117 tcg_temp_free_i32(tmp);
60011498
PB
3118 break;
3119 case 6: /* vcvtb.f16.f32 */
7d1b0095 3120 tmp = tcg_temp_new_i32();
60011498
PB
3121 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3122 gen_mov_F0_vreg(0, rd);
3123 tmp2 = gen_vfp_mrs();
3124 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3125 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3126 tcg_temp_free_i32(tmp2);
60011498
PB
3127 gen_vfp_msr(tmp);
3128 break;
3129 case 7: /* vcvtt.f16.f32 */
7d1b0095 3130 tmp = tcg_temp_new_i32();
60011498
PB
3131 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3132 tcg_gen_shli_i32(tmp, tmp, 16);
3133 gen_mov_F0_vreg(0, rd);
3134 tmp2 = gen_vfp_mrs();
3135 tcg_gen_ext16u_i32(tmp2, tmp2);
3136 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3137 tcg_temp_free_i32(tmp2);
60011498
PB
3138 gen_vfp_msr(tmp);
3139 break;
b7bcbe95
FB
3140 case 8: /* cmp */
3141 gen_vfp_cmp(dp);
3142 break;
3143 case 9: /* cmpe */
3144 gen_vfp_cmpe(dp);
3145 break;
3146 case 10: /* cmpz */
3147 gen_vfp_cmp(dp);
3148 break;
3149 case 11: /* cmpez */
3150 gen_vfp_F1_ld0(dp);
3151 gen_vfp_cmpe(dp);
3152 break;
3153 case 15: /* single<->double conversion */
3154 if (dp)
4373f3ce 3155 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3156 else
4373f3ce 3157 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3158 break;
3159 case 16: /* fuito */
5500b06c 3160 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3161 break;
3162 case 17: /* fsito */
5500b06c 3163 gen_vfp_sito(dp, 0);
b7bcbe95 3164 break;
9ee6e8bb
PB
3165 case 20: /* fshto */
3166 if (!arm_feature(env, ARM_FEATURE_VFP3))
3167 return 1;
5500b06c 3168 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3169 break;
3170 case 21: /* fslto */
3171 if (!arm_feature(env, ARM_FEATURE_VFP3))
3172 return 1;
5500b06c 3173 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3174 break;
3175 case 22: /* fuhto */
3176 if (!arm_feature(env, ARM_FEATURE_VFP3))
3177 return 1;
5500b06c 3178 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3179 break;
3180 case 23: /* fulto */
3181 if (!arm_feature(env, ARM_FEATURE_VFP3))
3182 return 1;
5500b06c 3183 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3184 break;
b7bcbe95 3185 case 24: /* ftoui */
5500b06c 3186 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3187 break;
3188 case 25: /* ftouiz */
5500b06c 3189 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3190 break;
3191 case 26: /* ftosi */
5500b06c 3192 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3193 break;
3194 case 27: /* ftosiz */
5500b06c 3195 gen_vfp_tosiz(dp, 0);
b7bcbe95 3196 break;
9ee6e8bb
PB
3197 case 28: /* ftosh */
3198 if (!arm_feature(env, ARM_FEATURE_VFP3))
3199 return 1;
5500b06c 3200 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3201 break;
3202 case 29: /* ftosl */
3203 if (!arm_feature(env, ARM_FEATURE_VFP3))
3204 return 1;
5500b06c 3205 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3206 break;
3207 case 30: /* ftouh */
3208 if (!arm_feature(env, ARM_FEATURE_VFP3))
3209 return 1;
5500b06c 3210 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3211 break;
3212 case 31: /* ftoul */
3213 if (!arm_feature(env, ARM_FEATURE_VFP3))
3214 return 1;
5500b06c 3215 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3216 break;
b7bcbe95 3217 default: /* undefined */
b7bcbe95
FB
3218 return 1;
3219 }
3220 break;
3221 default: /* undefined */
b7bcbe95
FB
3222 return 1;
3223 }
3224
3225 /* Write back the result. */
3226 if (op == 15 && (rn >= 8 && rn <= 11))
3227 ; /* Comparison, do nothing. */
04595bf6
PM
3228 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3229 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3230 gen_mov_vreg_F0(0, rd);
3231 else if (op == 15 && rn == 15)
3232 /* conversion */
3233 gen_mov_vreg_F0(!dp, rd);
3234 else
3235 gen_mov_vreg_F0(dp, rd);
3236
3237 /* break out of the loop if we have finished */
3238 if (veclen == 0)
3239 break;
3240
3241 if (op == 15 && delta_m == 0) {
3242 /* single source one-many */
3243 while (veclen--) {
3244 rd = ((rd + delta_d) & (bank_mask - 1))
3245 | (rd & bank_mask);
3246 gen_mov_vreg_F0(dp, rd);
3247 }
3248 break;
3249 }
3250 /* Setup the next operands. */
3251 veclen--;
3252 rd = ((rd + delta_d) & (bank_mask - 1))
3253 | (rd & bank_mask);
3254
3255 if (op == 15) {
3256 /* One source operand. */
3257 rm = ((rm + delta_m) & (bank_mask - 1))
3258 | (rm & bank_mask);
3259 gen_mov_F0_vreg(dp, rm);
3260 } else {
3261 /* Two source operands. */
3262 rn = ((rn + delta_d) & (bank_mask - 1))
3263 | (rn & bank_mask);
3264 gen_mov_F0_vreg(dp, rn);
3265 if (delta_m) {
3266 rm = ((rm + delta_m) & (bank_mask - 1))
3267 | (rm & bank_mask);
3268 gen_mov_F1_vreg(dp, rm);
3269 }
3270 }
3271 }
3272 }
3273 break;
3274 case 0xc:
3275 case 0xd:
8387da81 3276 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3277 /* two-register transfer */
3278 rn = (insn >> 16) & 0xf;
3279 rd = (insn >> 12) & 0xf;
3280 if (dp) {
9ee6e8bb
PB
3281 VFP_DREG_M(rm, insn);
3282 } else {
3283 rm = VFP_SREG_M(insn);
3284 }
b7bcbe95 3285
18c9b560 3286 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3287 /* vfp->arm */
3288 if (dp) {
4373f3ce
PB
3289 gen_mov_F0_vreg(0, rm * 2);
3290 tmp = gen_vfp_mrs();
3291 store_reg(s, rd, tmp);
3292 gen_mov_F0_vreg(0, rm * 2 + 1);
3293 tmp = gen_vfp_mrs();
3294 store_reg(s, rn, tmp);
b7bcbe95
FB
3295 } else {
3296 gen_mov_F0_vreg(0, rm);
4373f3ce 3297 tmp = gen_vfp_mrs();
8387da81 3298 store_reg(s, rd, tmp);
b7bcbe95 3299 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3300 tmp = gen_vfp_mrs();
8387da81 3301 store_reg(s, rn, tmp);
b7bcbe95
FB
3302 }
3303 } else {
3304 /* arm->vfp */
3305 if (dp) {
4373f3ce
PB
3306 tmp = load_reg(s, rd);
3307 gen_vfp_msr(tmp);
3308 gen_mov_vreg_F0(0, rm * 2);
3309 tmp = load_reg(s, rn);
3310 gen_vfp_msr(tmp);
3311 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3312 } else {
8387da81 3313 tmp = load_reg(s, rd);
4373f3ce 3314 gen_vfp_msr(tmp);
b7bcbe95 3315 gen_mov_vreg_F0(0, rm);
8387da81 3316 tmp = load_reg(s, rn);
4373f3ce 3317 gen_vfp_msr(tmp);
b7bcbe95
FB
3318 gen_mov_vreg_F0(0, rm + 1);
3319 }
3320 }
3321 } else {
3322 /* Load/store */
3323 rn = (insn >> 16) & 0xf;
3324 if (dp)
9ee6e8bb 3325 VFP_DREG_D(rd, insn);
b7bcbe95 3326 else
9ee6e8bb 3327 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3328 if ((insn & 0x01200000) == 0x01000000) {
3329 /* Single load/store */
3330 offset = (insn & 0xff) << 2;
3331 if ((insn & (1 << 23)) == 0)
3332 offset = -offset;
934814f1
PM
3333 if (s->thumb && rn == 15) {
3334 /* This is actually UNPREDICTABLE */
3335 addr = tcg_temp_new_i32();
3336 tcg_gen_movi_i32(addr, s->pc & ~2);
3337 } else {
3338 addr = load_reg(s, rn);
3339 }
312eea9f 3340 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3341 if (insn & (1 << 20)) {
312eea9f 3342 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3343 gen_mov_vreg_F0(dp, rd);
3344 } else {
3345 gen_mov_F0_vreg(dp, rd);
312eea9f 3346 gen_vfp_st(s, dp, addr);
b7bcbe95 3347 }
7d1b0095 3348 tcg_temp_free_i32(addr);
b7bcbe95
FB
3349 } else {
3350 /* load/store multiple */
934814f1 3351 int w = insn & (1 << 21);
b7bcbe95
FB
3352 if (dp)
3353 n = (insn >> 1) & 0x7f;
3354 else
3355 n = insn & 0xff;
3356
934814f1
PM
3357 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3358 /* P == U , W == 1 => UNDEF */
3359 return 1;
3360 }
3361 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3362 /* UNPREDICTABLE cases for bad immediates: we choose to
3363 * UNDEF to avoid generating huge numbers of TCG ops
3364 */
3365 return 1;
3366 }
3367 if (rn == 15 && w) {
3368 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3369 return 1;
3370 }
3371
3372 if (s->thumb && rn == 15) {
3373 /* This is actually UNPREDICTABLE */
3374 addr = tcg_temp_new_i32();
3375 tcg_gen_movi_i32(addr, s->pc & ~2);
3376 } else {
3377 addr = load_reg(s, rn);
3378 }
b7bcbe95 3379 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3380 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3381
3382 if (dp)
3383 offset = 8;
3384 else
3385 offset = 4;
3386 for (i = 0; i < n; i++) {
18c9b560 3387 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3388 /* load */
312eea9f 3389 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3390 gen_mov_vreg_F0(dp, rd + i);
3391 } else {
3392 /* store */
3393 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3394 gen_vfp_st(s, dp, addr);
b7bcbe95 3395 }
312eea9f 3396 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3397 }
934814f1 3398 if (w) {
b7bcbe95
FB
3399 /* writeback */
3400 if (insn & (1 << 24))
3401 offset = -offset * n;
3402 else if (dp && (insn & 1))
3403 offset = 4;
3404 else
3405 offset = 0;
3406
3407 if (offset != 0)
312eea9f
FN
3408 tcg_gen_addi_i32(addr, addr, offset);
3409 store_reg(s, rn, addr);
3410 } else {
7d1b0095 3411 tcg_temp_free_i32(addr);
b7bcbe95
FB
3412 }
3413 }
3414 }
3415 break;
3416 default:
3417 /* Should never happen. */
3418 return 1;
3419 }
3420 return 0;
3421}
3422
6e256c93 3423static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3424{
6e256c93
FB
3425 TranslationBlock *tb;
3426
3427 tb = s->tb;
3428 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3429 tcg_gen_goto_tb(n);
8984bd2e 3430 gen_set_pc_im(dest);
4b4a72e5 3431 tcg_gen_exit_tb((tcg_target_long)tb + n);
6e256c93 3432 } else {
8984bd2e 3433 gen_set_pc_im(dest);
57fec1fe 3434 tcg_gen_exit_tb(0);
6e256c93 3435 }
c53be334
FB
3436}
3437
8aaca4c0
FB
3438static inline void gen_jmp (DisasContext *s, uint32_t dest)
3439{
551bd27f 3440 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3441 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3442 if (s->thumb)
d9ba4830
PB
3443 dest |= 1;
3444 gen_bx_im(s, dest);
8aaca4c0 3445 } else {
6e256c93 3446 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3447 s->is_jmp = DISAS_TB_JUMP;
3448 }
3449}
3450
d9ba4830 3451static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3452{
ee097184 3453 if (x)
d9ba4830 3454 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3455 else
d9ba4830 3456 gen_sxth(t0);
ee097184 3457 if (y)
d9ba4830 3458 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3459 else
d9ba4830
PB
3460 gen_sxth(t1);
3461 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3462}
3463
3464/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3465static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3466 uint32_t mask;
3467
3468 mask = 0;
3469 if (flags & (1 << 0))
3470 mask |= 0xff;
3471 if (flags & (1 << 1))
3472 mask |= 0xff00;
3473 if (flags & (1 << 2))
3474 mask |= 0xff0000;
3475 if (flags & (1 << 3))
3476 mask |= 0xff000000;
9ee6e8bb 3477
2ae23e75 3478 /* Mask out undefined bits. */
9ee6e8bb 3479 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3480 if (!arm_feature(env, ARM_FEATURE_V4T))
3481 mask &= ~CPSR_T;
3482 if (!arm_feature(env, ARM_FEATURE_V5))
3483 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3484 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3485 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3486 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3487 mask &= ~CPSR_IT;
9ee6e8bb 3488 /* Mask out execution state bits. */
2ae23e75 3489 if (!spsr)
e160c51c 3490 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3491 /* Mask out privileged bits. */
3492 if (IS_USER(s))
9ee6e8bb 3493 mask &= CPSR_USER;
b5ff1b31
FB
3494 return mask;
3495}
3496
2fbac54b
FN
3497/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3498static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
b5ff1b31 3499{
d9ba4830 3500 TCGv tmp;
b5ff1b31
FB
3501 if (spsr) {
3502 /* ??? This is also undefined in system mode. */
3503 if (IS_USER(s))
3504 return 1;
d9ba4830
PB
3505
3506 tmp = load_cpu_field(spsr);
3507 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3508 tcg_gen_andi_i32(t0, t0, mask);
3509 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3510 store_cpu_field(tmp, spsr);
b5ff1b31 3511 } else {
2fbac54b 3512 gen_set_cpsr(t0, mask);
b5ff1b31 3513 }
7d1b0095 3514 tcg_temp_free_i32(t0);
b5ff1b31
FB
3515 gen_lookup_tb(s);
3516 return 0;
3517}
3518
2fbac54b
FN
3519/* Returns nonzero if access to the PSR is not permitted. */
3520static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3521{
3522 TCGv tmp;
7d1b0095 3523 tmp = tcg_temp_new_i32();
2fbac54b
FN
3524 tcg_gen_movi_i32(tmp, val);
3525 return gen_set_psr(s, mask, spsr, tmp);
3526}
3527
e9bb4aa9
JR
3528/* Generate an old-style exception return. Marks pc as dead. */
3529static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3530{
d9ba4830 3531 TCGv tmp;
e9bb4aa9 3532 store_reg(s, 15, pc);
d9ba4830
PB
3533 tmp = load_cpu_field(spsr);
3534 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3535 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3536 s->is_jmp = DISAS_UPDATE;
3537}
3538
b0109805
PB
3539/* Generate a v6 exception return. Marks both values as dead. */
3540static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3541{
b0109805 3542 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3543 tcg_temp_free_i32(cpsr);
b0109805 3544 store_reg(s, 15, pc);
9ee6e8bb
PB
3545 s->is_jmp = DISAS_UPDATE;
3546}
3b46e624 3547
9ee6e8bb
PB
3548static inline void
3549gen_set_condexec (DisasContext *s)
3550{
3551 if (s->condexec_mask) {
8f01245e 3552 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
7d1b0095 3553 TCGv tmp = tcg_temp_new_i32();
8f01245e 3554 tcg_gen_movi_i32(tmp, val);
d9ba4830 3555 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3556 }
3557}
3b46e624 3558
bc4a0de0
PM
3559static void gen_exception_insn(DisasContext *s, int offset, int excp)
3560{
3561 gen_set_condexec(s);
3562 gen_set_pc_im(s->pc - offset);
3563 gen_exception(excp);
3564 s->is_jmp = DISAS_JUMP;
3565}
3566
9ee6e8bb
PB
3567static void gen_nop_hint(DisasContext *s, int val)
3568{
3569 switch (val) {
3570 case 3: /* wfi */
8984bd2e 3571 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3572 s->is_jmp = DISAS_WFI;
3573 break;
3574 case 2: /* wfe */
3575 case 4: /* sev */
3576 /* TODO: Implement SEV and WFE. May help SMP performance. */
3577 default: /* nop */
3578 break;
3579 }
3580}
99c475ab 3581
ad69471c 3582#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3583
62698be3 3584static inline void gen_neon_add(int size, TCGv t0, TCGv t1)
9ee6e8bb
PB
3585{
3586 switch (size) {
dd8fbd78
FN
3587 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3588 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3589 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3590 default: abort();
9ee6e8bb 3591 }
9ee6e8bb
PB
3592}
3593
dd8fbd78 3594static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
ad69471c
PB
3595{
3596 switch (size) {
dd8fbd78
FN
3597 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3598 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3599 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3600 default: return;
3601 }
3602}
3603
3604/* 32-bit pairwise ops end up the same as the elementwise versions. */
3605#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3606#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3607#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3608#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3609
ad69471c
PB
3610#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3611 switch ((size << 1) | u) { \
3612 case 0: \
dd8fbd78 3613 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3614 break; \
3615 case 1: \
dd8fbd78 3616 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3617 break; \
3618 case 2: \
dd8fbd78 3619 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3620 break; \
3621 case 3: \
dd8fbd78 3622 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3623 break; \
3624 case 4: \
dd8fbd78 3625 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3626 break; \
3627 case 5: \
dd8fbd78 3628 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3629 break; \
3630 default: return 1; \
3631 }} while (0)
9ee6e8bb
PB
3632
3633#define GEN_NEON_INTEGER_OP(name) do { \
3634 switch ((size << 1) | u) { \
ad69471c 3635 case 0: \
dd8fbd78 3636 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3637 break; \
3638 case 1: \
dd8fbd78 3639 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3640 break; \
3641 case 2: \
dd8fbd78 3642 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3643 break; \
3644 case 3: \
dd8fbd78 3645 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3646 break; \
3647 case 4: \
dd8fbd78 3648 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3649 break; \
3650 case 5: \
dd8fbd78 3651 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3652 break; \
9ee6e8bb
PB
3653 default: return 1; \
3654 }} while (0)
3655
dd8fbd78 3656static TCGv neon_load_scratch(int scratch)
9ee6e8bb 3657{
7d1b0095 3658 TCGv tmp = tcg_temp_new_i32();
dd8fbd78
FN
3659 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3660 return tmp;
9ee6e8bb
PB
3661}
3662
dd8fbd78 3663static void neon_store_scratch(int scratch, TCGv var)
9ee6e8bb 3664{
dd8fbd78 3665 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3666 tcg_temp_free_i32(var);
9ee6e8bb
PB
3667}
3668
dd8fbd78 3669static inline TCGv neon_get_scalar(int size, int reg)
9ee6e8bb 3670{
dd8fbd78 3671 TCGv tmp;
9ee6e8bb 3672 if (size == 1) {
0fad6efc
PM
3673 tmp = neon_load_reg(reg & 7, reg >> 4);
3674 if (reg & 8) {
dd8fbd78 3675 gen_neon_dup_high16(tmp);
0fad6efc
PM
3676 } else {
3677 gen_neon_dup_low16(tmp);
dd8fbd78 3678 }
0fad6efc
PM
3679 } else {
3680 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3681 }
dd8fbd78 3682 return tmp;
9ee6e8bb
PB
3683}
3684
02acedf9 3685static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3686{
02acedf9 3687 TCGv tmp, tmp2;
600b828c 3688 if (!q && size == 2) {
02acedf9
PM
3689 return 1;
3690 }
3691 tmp = tcg_const_i32(rd);
3692 tmp2 = tcg_const_i32(rm);
3693 if (q) {
3694 switch (size) {
3695 case 0:
02da0b2d 3696 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3697 break;
3698 case 1:
02da0b2d 3699 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3700 break;
3701 case 2:
02da0b2d 3702 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3703 break;
3704 default:
3705 abort();
3706 }
3707 } else {
3708 switch (size) {
3709 case 0:
02da0b2d 3710 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3711 break;
3712 case 1:
02da0b2d 3713 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3714 break;
3715 default:
3716 abort();
3717 }
3718 }
3719 tcg_temp_free_i32(tmp);
3720 tcg_temp_free_i32(tmp2);
3721 return 0;
19457615
FN
3722}
3723
d68a6f3a 3724static int gen_neon_zip(int rd, int rm, int size, int q)
19457615
FN
3725{
3726 TCGv tmp, tmp2;
600b828c 3727 if (!q && size == 2) {
d68a6f3a
PM
3728 return 1;
3729 }
3730 tmp = tcg_const_i32(rd);
3731 tmp2 = tcg_const_i32(rm);
3732 if (q) {
3733 switch (size) {
3734 case 0:
02da0b2d 3735 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3736 break;
3737 case 1:
02da0b2d 3738 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3739 break;
3740 case 2:
02da0b2d 3741 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3742 break;
3743 default:
3744 abort();
3745 }
3746 } else {
3747 switch (size) {
3748 case 0:
02da0b2d 3749 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3750 break;
3751 case 1:
02da0b2d 3752 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3753 break;
3754 default:
3755 abort();
3756 }
3757 }
3758 tcg_temp_free_i32(tmp);
3759 tcg_temp_free_i32(tmp2);
3760 return 0;
19457615
FN
3761}
3762
19457615
FN
3763static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3764{
3765 TCGv rd, tmp;
3766
7d1b0095
PM
3767 rd = tcg_temp_new_i32();
3768 tmp = tcg_temp_new_i32();
19457615
FN
3769
3770 tcg_gen_shli_i32(rd, t0, 8);
3771 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3772 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3773 tcg_gen_or_i32(rd, rd, tmp);
3774
3775 tcg_gen_shri_i32(t1, t1, 8);
3776 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3777 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3778 tcg_gen_or_i32(t1, t1, tmp);
3779 tcg_gen_mov_i32(t0, rd);
3780
7d1b0095
PM
3781 tcg_temp_free_i32(tmp);
3782 tcg_temp_free_i32(rd);
19457615
FN
3783}
3784
3785static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3786{
3787 TCGv rd, tmp;
3788
7d1b0095
PM
3789 rd = tcg_temp_new_i32();
3790 tmp = tcg_temp_new_i32();
19457615
FN
3791
3792 tcg_gen_shli_i32(rd, t0, 16);
3793 tcg_gen_andi_i32(tmp, t1, 0xffff);
3794 tcg_gen_or_i32(rd, rd, tmp);
3795 tcg_gen_shri_i32(t1, t1, 16);
3796 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3797 tcg_gen_or_i32(t1, t1, tmp);
3798 tcg_gen_mov_i32(t0, rd);
3799
7d1b0095
PM
3800 tcg_temp_free_i32(tmp);
3801 tcg_temp_free_i32(rd);
19457615
FN
3802}
3803
3804
9ee6e8bb
PB
3805static struct {
3806 int nregs;
3807 int interleave;
3808 int spacing;
3809} neon_ls_element_type[11] = {
3810 {4, 4, 1},
3811 {4, 4, 2},
3812 {4, 1, 1},
3813 {4, 2, 1},
3814 {3, 3, 1},
3815 {3, 3, 2},
3816 {3, 1, 1},
3817 {1, 1, 1},
3818 {2, 2, 1},
3819 {2, 2, 2},
3820 {2, 1, 1}
3821};
3822
3823/* Translate a NEON load/store element instruction. Return nonzero if the
3824 instruction is invalid. */
0ecb72a5 3825static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3826{
3827 int rd, rn, rm;
3828 int op;
3829 int nregs;
3830 int interleave;
84496233 3831 int spacing;
9ee6e8bb
PB
3832 int stride;
3833 int size;
3834 int reg;
3835 int pass;
3836 int load;
3837 int shift;
9ee6e8bb 3838 int n;
1b2b1e54 3839 TCGv addr;
b0109805 3840 TCGv tmp;
8f8e3aa4 3841 TCGv tmp2;
84496233 3842 TCGv_i64 tmp64;
9ee6e8bb 3843
5df8bac1 3844 if (!s->vfp_enabled)
9ee6e8bb
PB
3845 return 1;
3846 VFP_DREG_D(rd, insn);
3847 rn = (insn >> 16) & 0xf;
3848 rm = insn & 0xf;
3849 load = (insn & (1 << 21)) != 0;
3850 if ((insn & (1 << 23)) == 0) {
3851 /* Load store all elements. */
3852 op = (insn >> 8) & 0xf;
3853 size = (insn >> 6) & 3;
84496233 3854 if (op > 10)
9ee6e8bb 3855 return 1;
f2dd89d0
PM
3856 /* Catch UNDEF cases for bad values of align field */
3857 switch (op & 0xc) {
3858 case 4:
3859 if (((insn >> 5) & 1) == 1) {
3860 return 1;
3861 }
3862 break;
3863 case 8:
3864 if (((insn >> 4) & 3) == 3) {
3865 return 1;
3866 }
3867 break;
3868 default:
3869 break;
3870 }
9ee6e8bb
PB
3871 nregs = neon_ls_element_type[op].nregs;
3872 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3873 spacing = neon_ls_element_type[op].spacing;
3874 if (size == 3 && (interleave | spacing) != 1)
3875 return 1;
e318a60b 3876 addr = tcg_temp_new_i32();
dcc65026 3877 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3878 stride = (1 << size) * interleave;
3879 for (reg = 0; reg < nregs; reg++) {
3880 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3881 load_reg_var(s, addr, rn);
3882 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3883 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3884 load_reg_var(s, addr, rn);
3885 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3886 }
84496233
JR
3887 if (size == 3) {
3888 if (load) {
3889 tmp64 = gen_ld64(addr, IS_USER(s));
3890 neon_store_reg64(tmp64, rd);
3891 tcg_temp_free_i64(tmp64);
3892 } else {
3893 tmp64 = tcg_temp_new_i64();
3894 neon_load_reg64(tmp64, rd);
3895 gen_st64(tmp64, addr, IS_USER(s));
3896 }
3897 tcg_gen_addi_i32(addr, addr, stride);
3898 } else {
3899 for (pass = 0; pass < 2; pass++) {
3900 if (size == 2) {
3901 if (load) {
3902 tmp = gen_ld32(addr, IS_USER(s));
3903 neon_store_reg(rd, pass, tmp);
3904 } else {
3905 tmp = neon_load_reg(rd, pass);
3906 gen_st32(tmp, addr, IS_USER(s));
3907 }
1b2b1e54 3908 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3909 } else if (size == 1) {
3910 if (load) {
3911 tmp = gen_ld16u(addr, IS_USER(s));
3912 tcg_gen_addi_i32(addr, addr, stride);
3913 tmp2 = gen_ld16u(addr, IS_USER(s));
3914 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3915 tcg_gen_shli_i32(tmp2, tmp2, 16);
3916 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3917 tcg_temp_free_i32(tmp2);
84496233
JR
3918 neon_store_reg(rd, pass, tmp);
3919 } else {
3920 tmp = neon_load_reg(rd, pass);
7d1b0095 3921 tmp2 = tcg_temp_new_i32();
84496233
JR
3922 tcg_gen_shri_i32(tmp2, tmp, 16);
3923 gen_st16(tmp, addr, IS_USER(s));
3924 tcg_gen_addi_i32(addr, addr, stride);
3925 gen_st16(tmp2, addr, IS_USER(s));
1b2b1e54 3926 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3927 }
84496233
JR
3928 } else /* size == 0 */ {
3929 if (load) {
3930 TCGV_UNUSED(tmp2);
3931 for (n = 0; n < 4; n++) {
3932 tmp = gen_ld8u(addr, IS_USER(s));
3933 tcg_gen_addi_i32(addr, addr, stride);
3934 if (n == 0) {
3935 tmp2 = tmp;
3936 } else {
41ba8341
PB
3937 tcg_gen_shli_i32(tmp, tmp, n * 8);
3938 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3939 tcg_temp_free_i32(tmp);
84496233 3940 }
9ee6e8bb 3941 }
84496233
JR
3942 neon_store_reg(rd, pass, tmp2);
3943 } else {
3944 tmp2 = neon_load_reg(rd, pass);
3945 for (n = 0; n < 4; n++) {
7d1b0095 3946 tmp = tcg_temp_new_i32();
84496233
JR
3947 if (n == 0) {
3948 tcg_gen_mov_i32(tmp, tmp2);
3949 } else {
3950 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3951 }
3952 gen_st8(tmp, addr, IS_USER(s));
3953 tcg_gen_addi_i32(addr, addr, stride);
3954 }
7d1b0095 3955 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3956 }
3957 }
3958 }
3959 }
84496233 3960 rd += spacing;
9ee6e8bb 3961 }
e318a60b 3962 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3963 stride = nregs * 8;
3964 } else {
3965 size = (insn >> 10) & 3;
3966 if (size == 3) {
3967 /* Load single element to all lanes. */
8e18cde3
PM
3968 int a = (insn >> 4) & 1;
3969 if (!load) {
9ee6e8bb 3970 return 1;
8e18cde3 3971 }
9ee6e8bb
PB
3972 size = (insn >> 6) & 3;
3973 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3974
3975 if (size == 3) {
3976 if (nregs != 4 || a == 0) {
9ee6e8bb 3977 return 1;
99c475ab 3978 }
8e18cde3
PM
3979 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3980 size = 2;
3981 }
3982 if (nregs == 1 && a == 1 && size == 0) {
3983 return 1;
3984 }
3985 if (nregs == 3 && a == 1) {
3986 return 1;
3987 }
e318a60b 3988 addr = tcg_temp_new_i32();
8e18cde3
PM
3989 load_reg_var(s, addr, rn);
3990 if (nregs == 1) {
3991 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3992 tmp = gen_load_and_replicate(s, addr, size);
3993 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3994 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3995 if (insn & (1 << 5)) {
3996 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3997 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
3998 }
3999 tcg_temp_free_i32(tmp);
4000 } else {
4001 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4002 stride = (insn & (1 << 5)) ? 2 : 1;
4003 for (reg = 0; reg < nregs; reg++) {
4004 tmp = gen_load_and_replicate(s, addr, size);
4005 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4006 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4007 tcg_temp_free_i32(tmp);
4008 tcg_gen_addi_i32(addr, addr, 1 << size);
4009 rd += stride;
4010 }
9ee6e8bb 4011 }
e318a60b 4012 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4013 stride = (1 << size) * nregs;
4014 } else {
4015 /* Single element. */
93262b16 4016 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4017 pass = (insn >> 7) & 1;
4018 switch (size) {
4019 case 0:
4020 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4021 stride = 1;
4022 break;
4023 case 1:
4024 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4025 stride = (insn & (1 << 5)) ? 2 : 1;
4026 break;
4027 case 2:
4028 shift = 0;
9ee6e8bb
PB
4029 stride = (insn & (1 << 6)) ? 2 : 1;
4030 break;
4031 default:
4032 abort();
4033 }
4034 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4035 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4036 switch (nregs) {
4037 case 1:
4038 if (((idx & (1 << size)) != 0) ||
4039 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4040 return 1;
4041 }
4042 break;
4043 case 3:
4044 if ((idx & 1) != 0) {
4045 return 1;
4046 }
4047 /* fall through */
4048 case 2:
4049 if (size == 2 && (idx & 2) != 0) {
4050 return 1;
4051 }
4052 break;
4053 case 4:
4054 if ((size == 2) && ((idx & 3) == 3)) {
4055 return 1;
4056 }
4057 break;
4058 default:
4059 abort();
4060 }
4061 if ((rd + stride * (nregs - 1)) > 31) {
4062 /* Attempts to write off the end of the register file
4063 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4064 * the neon_load_reg() would write off the end of the array.
4065 */
4066 return 1;
4067 }
e318a60b 4068 addr = tcg_temp_new_i32();
dcc65026 4069 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4070 for (reg = 0; reg < nregs; reg++) {
4071 if (load) {
9ee6e8bb
PB
4072 switch (size) {
4073 case 0:
1b2b1e54 4074 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb
PB
4075 break;
4076 case 1:
1b2b1e54 4077 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
4078 break;
4079 case 2:
1b2b1e54 4080 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 4081 break;
a50f5b91
PB
4082 default: /* Avoid compiler warnings. */
4083 abort();
9ee6e8bb
PB
4084 }
4085 if (size != 2) {
8f8e3aa4 4086 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4087 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4088 shift, size ? 16 : 8);
7d1b0095 4089 tcg_temp_free_i32(tmp2);
9ee6e8bb 4090 }
8f8e3aa4 4091 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4092 } else { /* Store */
8f8e3aa4
PB
4093 tmp = neon_load_reg(rd, pass);
4094 if (shift)
4095 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4096 switch (size) {
4097 case 0:
1b2b1e54 4098 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4099 break;
4100 case 1:
1b2b1e54 4101 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4102 break;
4103 case 2:
1b2b1e54 4104 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4105 break;
99c475ab 4106 }
99c475ab 4107 }
9ee6e8bb 4108 rd += stride;
1b2b1e54 4109 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4110 }
e318a60b 4111 tcg_temp_free_i32(addr);
9ee6e8bb 4112 stride = nregs * (1 << size);
99c475ab 4113 }
9ee6e8bb
PB
4114 }
4115 if (rm != 15) {
b26eefb6
PB
4116 TCGv base;
4117
4118 base = load_reg(s, rn);
9ee6e8bb 4119 if (rm == 13) {
b26eefb6 4120 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4121 } else {
b26eefb6
PB
4122 TCGv index;
4123 index = load_reg(s, rm);
4124 tcg_gen_add_i32(base, base, index);
7d1b0095 4125 tcg_temp_free_i32(index);
9ee6e8bb 4126 }
b26eefb6 4127 store_reg(s, rn, base);
9ee6e8bb
PB
4128 }
4129 return 0;
4130}
3b46e624 4131
8f8e3aa4
PB
4132/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4133static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4134{
4135 tcg_gen_and_i32(t, t, c);
f669df27 4136 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4137 tcg_gen_or_i32(dest, t, f);
4138}
4139
a7812ae4 4140static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4141{
4142 switch (size) {
4143 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4144 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4145 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4146 default: abort();
4147 }
4148}
4149
a7812ae4 4150static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4151{
4152 switch (size) {
02da0b2d
PM
4153 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4154 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4155 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4156 default: abort();
4157 }
4158}
4159
a7812ae4 4160static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
4161{
4162 switch (size) {
02da0b2d
PM
4163 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4164 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4165 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4166 default: abort();
4167 }
4168}
4169
af1bbf30
JR
4170static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
4171{
4172 switch (size) {
02da0b2d
PM
4173 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4174 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4175 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4176 default: abort();
4177 }
4178}
4179
ad69471c
PB
4180static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4181 int q, int u)
4182{
4183 if (q) {
4184 if (u) {
4185 switch (size) {
4186 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4187 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4188 default: abort();
4189 }
4190 } else {
4191 switch (size) {
4192 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4193 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4194 default: abort();
4195 }
4196 }
4197 } else {
4198 if (u) {
4199 switch (size) {
b408a9b0
CL
4200 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4201 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4202 default: abort();
4203 }
4204 } else {
4205 switch (size) {
4206 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4207 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4208 default: abort();
4209 }
4210 }
4211 }
4212}
4213
a7812ae4 4214static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
4215{
4216 if (u) {
4217 switch (size) {
4218 case 0: gen_helper_neon_widen_u8(dest, src); break;
4219 case 1: gen_helper_neon_widen_u16(dest, src); break;
4220 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4221 default: abort();
4222 }
4223 } else {
4224 switch (size) {
4225 case 0: gen_helper_neon_widen_s8(dest, src); break;
4226 case 1: gen_helper_neon_widen_s16(dest, src); break;
4227 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4228 default: abort();
4229 }
4230 }
7d1b0095 4231 tcg_temp_free_i32(src);
ad69471c
PB
4232}
4233
4234static inline void gen_neon_addl(int size)
4235{
4236 switch (size) {
4237 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4238 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4239 case 2: tcg_gen_add_i64(CPU_V001); break;
4240 default: abort();
4241 }
4242}
4243
4244static inline void gen_neon_subl(int size)
4245{
4246 switch (size) {
4247 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4248 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4249 case 2: tcg_gen_sub_i64(CPU_V001); break;
4250 default: abort();
4251 }
4252}
4253
a7812ae4 4254static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4255{
4256 switch (size) {
4257 case 0: gen_helper_neon_negl_u16(var, var); break;
4258 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4259 case 2:
4260 tcg_gen_neg_i64(var, var);
4261 break;
ad69471c
PB
4262 default: abort();
4263 }
4264}
4265
a7812ae4 4266static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4267{
4268 switch (size) {
02da0b2d
PM
4269 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4270 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4271 default: abort();
4272 }
4273}
4274
a7812ae4 4275static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4276{
a7812ae4 4277 TCGv_i64 tmp;
ad69471c
PB
4278
4279 switch ((size << 1) | u) {
4280 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4281 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4282 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4283 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4284 case 4:
4285 tmp = gen_muls_i64_i32(a, b);
4286 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4287 tcg_temp_free_i64(tmp);
ad69471c
PB
4288 break;
4289 case 5:
4290 tmp = gen_mulu_i64_i32(a, b);
4291 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4292 tcg_temp_free_i64(tmp);
ad69471c
PB
4293 break;
4294 default: abort();
4295 }
c6067f04
CL
4296
4297 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4298 Don't forget to clean them now. */
4299 if (size < 2) {
7d1b0095
PM
4300 tcg_temp_free_i32(a);
4301 tcg_temp_free_i32(b);
c6067f04 4302 }
ad69471c
PB
4303}
4304
c33171c7
PM
4305static void gen_neon_narrow_op(int op, int u, int size, TCGv dest, TCGv_i64 src)
4306{
4307 if (op) {
4308 if (u) {
4309 gen_neon_unarrow_sats(size, dest, src);
4310 } else {
4311 gen_neon_narrow(size, dest, src);
4312 }
4313 } else {
4314 if (u) {
4315 gen_neon_narrow_satu(size, dest, src);
4316 } else {
4317 gen_neon_narrow_sats(size, dest, src);
4318 }
4319 }
4320}
4321
62698be3
PM
4322/* Symbolic constants for op fields for Neon 3-register same-length.
4323 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4324 * table A7-9.
4325 */
4326#define NEON_3R_VHADD 0
4327#define NEON_3R_VQADD 1
4328#define NEON_3R_VRHADD 2
4329#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4330#define NEON_3R_VHSUB 4
4331#define NEON_3R_VQSUB 5
4332#define NEON_3R_VCGT 6
4333#define NEON_3R_VCGE 7
4334#define NEON_3R_VSHL 8
4335#define NEON_3R_VQSHL 9
4336#define NEON_3R_VRSHL 10
4337#define NEON_3R_VQRSHL 11
4338#define NEON_3R_VMAX 12
4339#define NEON_3R_VMIN 13
4340#define NEON_3R_VABD 14
4341#define NEON_3R_VABA 15
4342#define NEON_3R_VADD_VSUB 16
4343#define NEON_3R_VTST_VCEQ 17
4344#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4345#define NEON_3R_VMUL 19
4346#define NEON_3R_VPMAX 20
4347#define NEON_3R_VPMIN 21
4348#define NEON_3R_VQDMULH_VQRDMULH 22
4349#define NEON_3R_VPADD 23
da97f52c 4350#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4351#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4352#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4353#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4354#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4355#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4356#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4357
4358static const uint8_t neon_3r_sizes[] = {
4359 [NEON_3R_VHADD] = 0x7,
4360 [NEON_3R_VQADD] = 0xf,
4361 [NEON_3R_VRHADD] = 0x7,
4362 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4363 [NEON_3R_VHSUB] = 0x7,
4364 [NEON_3R_VQSUB] = 0xf,
4365 [NEON_3R_VCGT] = 0x7,
4366 [NEON_3R_VCGE] = 0x7,
4367 [NEON_3R_VSHL] = 0xf,
4368 [NEON_3R_VQSHL] = 0xf,
4369 [NEON_3R_VRSHL] = 0xf,
4370 [NEON_3R_VQRSHL] = 0xf,
4371 [NEON_3R_VMAX] = 0x7,
4372 [NEON_3R_VMIN] = 0x7,
4373 [NEON_3R_VABD] = 0x7,
4374 [NEON_3R_VABA] = 0x7,
4375 [NEON_3R_VADD_VSUB] = 0xf,
4376 [NEON_3R_VTST_VCEQ] = 0x7,
4377 [NEON_3R_VML] = 0x7,
4378 [NEON_3R_VMUL] = 0x7,
4379 [NEON_3R_VPMAX] = 0x7,
4380 [NEON_3R_VPMIN] = 0x7,
4381 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4382 [NEON_3R_VPADD] = 0x7,
da97f52c 4383 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4384 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4385 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4386 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4387 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4388 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4389 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4390};
4391
600b828c
PM
4392/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4393 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4394 * table A7-13.
4395 */
4396#define NEON_2RM_VREV64 0
4397#define NEON_2RM_VREV32 1
4398#define NEON_2RM_VREV16 2
4399#define NEON_2RM_VPADDL 4
4400#define NEON_2RM_VPADDL_U 5
4401#define NEON_2RM_VCLS 8
4402#define NEON_2RM_VCLZ 9
4403#define NEON_2RM_VCNT 10
4404#define NEON_2RM_VMVN 11
4405#define NEON_2RM_VPADAL 12
4406#define NEON_2RM_VPADAL_U 13
4407#define NEON_2RM_VQABS 14
4408#define NEON_2RM_VQNEG 15
4409#define NEON_2RM_VCGT0 16
4410#define NEON_2RM_VCGE0 17
4411#define NEON_2RM_VCEQ0 18
4412#define NEON_2RM_VCLE0 19
4413#define NEON_2RM_VCLT0 20
4414#define NEON_2RM_VABS 22
4415#define NEON_2RM_VNEG 23
4416#define NEON_2RM_VCGT0_F 24
4417#define NEON_2RM_VCGE0_F 25
4418#define NEON_2RM_VCEQ0_F 26
4419#define NEON_2RM_VCLE0_F 27
4420#define NEON_2RM_VCLT0_F 28
4421#define NEON_2RM_VABS_F 30
4422#define NEON_2RM_VNEG_F 31
4423#define NEON_2RM_VSWP 32
4424#define NEON_2RM_VTRN 33
4425#define NEON_2RM_VUZP 34
4426#define NEON_2RM_VZIP 35
4427#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4428#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4429#define NEON_2RM_VSHLL 38
4430#define NEON_2RM_VCVT_F16_F32 44
4431#define NEON_2RM_VCVT_F32_F16 46
4432#define NEON_2RM_VRECPE 56
4433#define NEON_2RM_VRSQRTE 57
4434#define NEON_2RM_VRECPE_F 58
4435#define NEON_2RM_VRSQRTE_F 59
4436#define NEON_2RM_VCVT_FS 60
4437#define NEON_2RM_VCVT_FU 61
4438#define NEON_2RM_VCVT_SF 62
4439#define NEON_2RM_VCVT_UF 63
4440
4441static int neon_2rm_is_float_op(int op)
4442{
4443 /* Return true if this neon 2reg-misc op is float-to-float */
4444 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4445 op >= NEON_2RM_VRECPE_F);
4446}
4447
4448/* Each entry in this array has bit n set if the insn allows
4449 * size value n (otherwise it will UNDEF). Since unallocated
4450 * op values will have no bits set they always UNDEF.
4451 */
4452static const uint8_t neon_2rm_sizes[] = {
4453 [NEON_2RM_VREV64] = 0x7,
4454 [NEON_2RM_VREV32] = 0x3,
4455 [NEON_2RM_VREV16] = 0x1,
4456 [NEON_2RM_VPADDL] = 0x7,
4457 [NEON_2RM_VPADDL_U] = 0x7,
4458 [NEON_2RM_VCLS] = 0x7,
4459 [NEON_2RM_VCLZ] = 0x7,
4460 [NEON_2RM_VCNT] = 0x1,
4461 [NEON_2RM_VMVN] = 0x1,
4462 [NEON_2RM_VPADAL] = 0x7,
4463 [NEON_2RM_VPADAL_U] = 0x7,
4464 [NEON_2RM_VQABS] = 0x7,
4465 [NEON_2RM_VQNEG] = 0x7,
4466 [NEON_2RM_VCGT0] = 0x7,
4467 [NEON_2RM_VCGE0] = 0x7,
4468 [NEON_2RM_VCEQ0] = 0x7,
4469 [NEON_2RM_VCLE0] = 0x7,
4470 [NEON_2RM_VCLT0] = 0x7,
4471 [NEON_2RM_VABS] = 0x7,
4472 [NEON_2RM_VNEG] = 0x7,
4473 [NEON_2RM_VCGT0_F] = 0x4,
4474 [NEON_2RM_VCGE0_F] = 0x4,
4475 [NEON_2RM_VCEQ0_F] = 0x4,
4476 [NEON_2RM_VCLE0_F] = 0x4,
4477 [NEON_2RM_VCLT0_F] = 0x4,
4478 [NEON_2RM_VABS_F] = 0x4,
4479 [NEON_2RM_VNEG_F] = 0x4,
4480 [NEON_2RM_VSWP] = 0x1,
4481 [NEON_2RM_VTRN] = 0x7,
4482 [NEON_2RM_VUZP] = 0x7,
4483 [NEON_2RM_VZIP] = 0x7,
4484 [NEON_2RM_VMOVN] = 0x7,
4485 [NEON_2RM_VQMOVN] = 0x7,
4486 [NEON_2RM_VSHLL] = 0x7,
4487 [NEON_2RM_VCVT_F16_F32] = 0x2,
4488 [NEON_2RM_VCVT_F32_F16] = 0x2,
4489 [NEON_2RM_VRECPE] = 0x4,
4490 [NEON_2RM_VRSQRTE] = 0x4,
4491 [NEON_2RM_VRECPE_F] = 0x4,
4492 [NEON_2RM_VRSQRTE_F] = 0x4,
4493 [NEON_2RM_VCVT_FS] = 0x4,
4494 [NEON_2RM_VCVT_FU] = 0x4,
4495 [NEON_2RM_VCVT_SF] = 0x4,
4496 [NEON_2RM_VCVT_UF] = 0x4,
4497};
4498
9ee6e8bb
PB
4499/* Translate a NEON data processing instruction. Return nonzero if the
4500 instruction is invalid.
ad69471c
PB
4501 We process data in a mixture of 32-bit and 64-bit chunks.
4502 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4503
0ecb72a5 4504static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4505{
4506 int op;
4507 int q;
4508 int rd, rn, rm;
4509 int size;
4510 int shift;
4511 int pass;
4512 int count;
4513 int pairwise;
4514 int u;
ca9a32e4 4515 uint32_t imm, mask;
b75263d6 4516 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4517 TCGv_i64 tmp64;
9ee6e8bb 4518
5df8bac1 4519 if (!s->vfp_enabled)
9ee6e8bb
PB
4520 return 1;
4521 q = (insn & (1 << 6)) != 0;
4522 u = (insn >> 24) & 1;
4523 VFP_DREG_D(rd, insn);
4524 VFP_DREG_N(rn, insn);
4525 VFP_DREG_M(rm, insn);
4526 size = (insn >> 20) & 3;
4527 if ((insn & (1 << 23)) == 0) {
4528 /* Three register same length. */
4529 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4530 /* Catch invalid op and bad size combinations: UNDEF */
4531 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4532 return 1;
4533 }
25f84f79
PM
4534 /* All insns of this form UNDEF for either this condition or the
4535 * superset of cases "Q==1"; we catch the latter later.
4536 */
4537 if (q && ((rd | rn | rm) & 1)) {
4538 return 1;
4539 }
62698be3
PM
4540 if (size == 3 && op != NEON_3R_LOGIC) {
4541 /* 64-bit element instructions. */
9ee6e8bb 4542 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4543 neon_load_reg64(cpu_V0, rn + pass);
4544 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4545 switch (op) {
62698be3 4546 case NEON_3R_VQADD:
9ee6e8bb 4547 if (u) {
02da0b2d
PM
4548 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4549 cpu_V0, cpu_V1);
2c0262af 4550 } else {
02da0b2d
PM
4551 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4552 cpu_V0, cpu_V1);
2c0262af 4553 }
9ee6e8bb 4554 break;
62698be3 4555 case NEON_3R_VQSUB:
9ee6e8bb 4556 if (u) {
02da0b2d
PM
4557 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4558 cpu_V0, cpu_V1);
ad69471c 4559 } else {
02da0b2d
PM
4560 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4561 cpu_V0, cpu_V1);
ad69471c
PB
4562 }
4563 break;
62698be3 4564 case NEON_3R_VSHL:
ad69471c
PB
4565 if (u) {
4566 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4567 } else {
4568 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4569 }
4570 break;
62698be3 4571 case NEON_3R_VQSHL:
ad69471c 4572 if (u) {
02da0b2d
PM
4573 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4574 cpu_V1, cpu_V0);
ad69471c 4575 } else {
02da0b2d
PM
4576 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4577 cpu_V1, cpu_V0);
ad69471c
PB
4578 }
4579 break;
62698be3 4580 case NEON_3R_VRSHL:
ad69471c
PB
4581 if (u) {
4582 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4583 } else {
ad69471c
PB
4584 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4585 }
4586 break;
62698be3 4587 case NEON_3R_VQRSHL:
ad69471c 4588 if (u) {
02da0b2d
PM
4589 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4590 cpu_V1, cpu_V0);
ad69471c 4591 } else {
02da0b2d
PM
4592 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4593 cpu_V1, cpu_V0);
1e8d4eec 4594 }
9ee6e8bb 4595 break;
62698be3 4596 case NEON_3R_VADD_VSUB:
9ee6e8bb 4597 if (u) {
ad69471c 4598 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4599 } else {
ad69471c 4600 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4601 }
4602 break;
4603 default:
4604 abort();
2c0262af 4605 }
ad69471c 4606 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4607 }
9ee6e8bb 4608 return 0;
2c0262af 4609 }
25f84f79 4610 pairwise = 0;
9ee6e8bb 4611 switch (op) {
62698be3
PM
4612 case NEON_3R_VSHL:
4613 case NEON_3R_VQSHL:
4614 case NEON_3R_VRSHL:
4615 case NEON_3R_VQRSHL:
9ee6e8bb 4616 {
ad69471c
PB
4617 int rtmp;
4618 /* Shift instruction operands are reversed. */
4619 rtmp = rn;
9ee6e8bb 4620 rn = rm;
ad69471c 4621 rm = rtmp;
9ee6e8bb 4622 }
2c0262af 4623 break;
25f84f79
PM
4624 case NEON_3R_VPADD:
4625 if (u) {
4626 return 1;
4627 }
4628 /* Fall through */
62698be3
PM
4629 case NEON_3R_VPMAX:
4630 case NEON_3R_VPMIN:
9ee6e8bb 4631 pairwise = 1;
2c0262af 4632 break;
25f84f79
PM
4633 case NEON_3R_FLOAT_ARITH:
4634 pairwise = (u && size < 2); /* if VPADD (float) */
4635 break;
4636 case NEON_3R_FLOAT_MINMAX:
4637 pairwise = u; /* if VPMIN/VPMAX (float) */
4638 break;
4639 case NEON_3R_FLOAT_CMP:
4640 if (!u && size) {
4641 /* no encoding for U=0 C=1x */
4642 return 1;
4643 }
4644 break;
4645 case NEON_3R_FLOAT_ACMP:
4646 if (!u) {
4647 return 1;
4648 }
4649 break;
4650 case NEON_3R_VRECPS_VRSQRTS:
4651 if (u) {
4652 return 1;
4653 }
2c0262af 4654 break;
25f84f79
PM
4655 case NEON_3R_VMUL:
4656 if (u && (size != 0)) {
4657 /* UNDEF on invalid size for polynomial subcase */
4658 return 1;
4659 }
2c0262af 4660 break;
da97f52c
PM
4661 case NEON_3R_VFM:
4662 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4663 return 1;
4664 }
4665 break;
9ee6e8bb 4666 default:
2c0262af 4667 break;
9ee6e8bb 4668 }
dd8fbd78 4669
25f84f79
PM
4670 if (pairwise && q) {
4671 /* All the pairwise insns UNDEF if Q is set */
4672 return 1;
4673 }
4674
9ee6e8bb
PB
4675 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4676
4677 if (pairwise) {
4678 /* Pairwise. */
a5a14945
JR
4679 if (pass < 1) {
4680 tmp = neon_load_reg(rn, 0);
4681 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4682 } else {
a5a14945
JR
4683 tmp = neon_load_reg(rm, 0);
4684 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4685 }
4686 } else {
4687 /* Elementwise. */
dd8fbd78
FN
4688 tmp = neon_load_reg(rn, pass);
4689 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4690 }
4691 switch (op) {
62698be3 4692 case NEON_3R_VHADD:
9ee6e8bb
PB
4693 GEN_NEON_INTEGER_OP(hadd);
4694 break;
62698be3 4695 case NEON_3R_VQADD:
02da0b2d 4696 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4697 break;
62698be3 4698 case NEON_3R_VRHADD:
9ee6e8bb 4699 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4700 break;
62698be3 4701 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4702 switch ((u << 2) | size) {
4703 case 0: /* VAND */
dd8fbd78 4704 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4705 break;
4706 case 1: /* BIC */
f669df27 4707 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4708 break;
4709 case 2: /* VORR */
dd8fbd78 4710 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4711 break;
4712 case 3: /* VORN */
f669df27 4713 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4714 break;
4715 case 4: /* VEOR */
dd8fbd78 4716 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4717 break;
4718 case 5: /* VBSL */
dd8fbd78
FN
4719 tmp3 = neon_load_reg(rd, pass);
4720 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4721 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4722 break;
4723 case 6: /* VBIT */
dd8fbd78
FN
4724 tmp3 = neon_load_reg(rd, pass);
4725 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4726 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4727 break;
4728 case 7: /* VBIF */
dd8fbd78
FN
4729 tmp3 = neon_load_reg(rd, pass);
4730 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4731 tcg_temp_free_i32(tmp3);
9ee6e8bb 4732 break;
2c0262af
FB
4733 }
4734 break;
62698be3 4735 case NEON_3R_VHSUB:
9ee6e8bb
PB
4736 GEN_NEON_INTEGER_OP(hsub);
4737 break;
62698be3 4738 case NEON_3R_VQSUB:
02da0b2d 4739 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4740 break;
62698be3 4741 case NEON_3R_VCGT:
9ee6e8bb
PB
4742 GEN_NEON_INTEGER_OP(cgt);
4743 break;
62698be3 4744 case NEON_3R_VCGE:
9ee6e8bb
PB
4745 GEN_NEON_INTEGER_OP(cge);
4746 break;
62698be3 4747 case NEON_3R_VSHL:
ad69471c 4748 GEN_NEON_INTEGER_OP(shl);
2c0262af 4749 break;
62698be3 4750 case NEON_3R_VQSHL:
02da0b2d 4751 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4752 break;
62698be3 4753 case NEON_3R_VRSHL:
ad69471c 4754 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4755 break;
62698be3 4756 case NEON_3R_VQRSHL:
02da0b2d 4757 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4758 break;
62698be3 4759 case NEON_3R_VMAX:
9ee6e8bb
PB
4760 GEN_NEON_INTEGER_OP(max);
4761 break;
62698be3 4762 case NEON_3R_VMIN:
9ee6e8bb
PB
4763 GEN_NEON_INTEGER_OP(min);
4764 break;
62698be3 4765 case NEON_3R_VABD:
9ee6e8bb
PB
4766 GEN_NEON_INTEGER_OP(abd);
4767 break;
62698be3 4768 case NEON_3R_VABA:
9ee6e8bb 4769 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4770 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4771 tmp2 = neon_load_reg(rd, pass);
4772 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4773 break;
62698be3 4774 case NEON_3R_VADD_VSUB:
9ee6e8bb 4775 if (!u) { /* VADD */
62698be3 4776 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4777 } else { /* VSUB */
4778 switch (size) {
dd8fbd78
FN
4779 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4780 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4781 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4782 default: abort();
9ee6e8bb
PB
4783 }
4784 }
4785 break;
62698be3 4786 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4787 if (!u) { /* VTST */
4788 switch (size) {
dd8fbd78
FN
4789 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4790 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4791 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4792 default: abort();
9ee6e8bb
PB
4793 }
4794 } else { /* VCEQ */
4795 switch (size) {
dd8fbd78
FN
4796 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4797 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4798 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4799 default: abort();
9ee6e8bb
PB
4800 }
4801 }
4802 break;
62698be3 4803 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4804 switch (size) {
dd8fbd78
FN
4805 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4806 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4807 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4808 default: abort();
9ee6e8bb 4809 }
7d1b0095 4810 tcg_temp_free_i32(tmp2);
dd8fbd78 4811 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4812 if (u) { /* VMLS */
dd8fbd78 4813 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4814 } else { /* VMLA */
dd8fbd78 4815 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4816 }
4817 break;
62698be3 4818 case NEON_3R_VMUL:
9ee6e8bb 4819 if (u) { /* polynomial */
dd8fbd78 4820 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4821 } else { /* Integer */
4822 switch (size) {
dd8fbd78
FN
4823 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4824 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4825 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4826 default: abort();
9ee6e8bb
PB
4827 }
4828 }
4829 break;
62698be3 4830 case NEON_3R_VPMAX:
9ee6e8bb
PB
4831 GEN_NEON_INTEGER_OP(pmax);
4832 break;
62698be3 4833 case NEON_3R_VPMIN:
9ee6e8bb
PB
4834 GEN_NEON_INTEGER_OP(pmin);
4835 break;
62698be3 4836 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4837 if (!u) { /* VQDMULH */
4838 switch (size) {
02da0b2d
PM
4839 case 1:
4840 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4841 break;
4842 case 2:
4843 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4844 break;
62698be3 4845 default: abort();
9ee6e8bb 4846 }
62698be3 4847 } else { /* VQRDMULH */
9ee6e8bb 4848 switch (size) {
02da0b2d
PM
4849 case 1:
4850 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4851 break;
4852 case 2:
4853 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4854 break;
62698be3 4855 default: abort();
9ee6e8bb
PB
4856 }
4857 }
4858 break;
62698be3 4859 case NEON_3R_VPADD:
9ee6e8bb 4860 switch (size) {
dd8fbd78
FN
4861 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4862 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4863 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4864 default: abort();
9ee6e8bb
PB
4865 }
4866 break;
62698be3 4867 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4868 {
4869 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4870 switch ((u << 2) | size) {
4871 case 0: /* VADD */
aa47cfdd
PM
4872 case 4: /* VPADD */
4873 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4874 break;
4875 case 2: /* VSUB */
aa47cfdd 4876 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4877 break;
4878 case 6: /* VABD */
aa47cfdd 4879 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4880 break;
4881 default:
62698be3 4882 abort();
9ee6e8bb 4883 }
aa47cfdd 4884 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4885 break;
aa47cfdd 4886 }
62698be3 4887 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4888 {
4889 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4890 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4891 if (!u) {
7d1b0095 4892 tcg_temp_free_i32(tmp2);
dd8fbd78 4893 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4894 if (size == 0) {
aa47cfdd 4895 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4896 } else {
aa47cfdd 4897 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4898 }
4899 }
aa47cfdd 4900 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4901 break;
aa47cfdd 4902 }
62698be3 4903 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4904 {
4905 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4906 if (!u) {
aa47cfdd 4907 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4908 } else {
aa47cfdd
PM
4909 if (size == 0) {
4910 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4911 } else {
4912 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4913 }
b5ff1b31 4914 }
aa47cfdd 4915 tcg_temp_free_ptr(fpstatus);
2c0262af 4916 break;
aa47cfdd 4917 }
62698be3 4918 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4919 {
4920 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4921 if (size == 0) {
4922 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4923 } else {
4924 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4925 }
4926 tcg_temp_free_ptr(fpstatus);
2c0262af 4927 break;
aa47cfdd 4928 }
62698be3 4929 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4930 {
4931 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4932 if (size == 0) {
4933 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4934 } else {
4935 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4936 }
4937 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4938 break;
aa47cfdd 4939 }
62698be3 4940 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4941 if (size == 0)
dd8fbd78 4942 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4943 else
dd8fbd78 4944 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4945 break;
da97f52c
PM
4946 case NEON_3R_VFM:
4947 {
4948 /* VFMA, VFMS: fused multiply-add */
4949 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4950 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4951 if (size) {
4952 /* VFMS */
4953 gen_helper_vfp_negs(tmp, tmp);
4954 }
4955 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4956 tcg_temp_free_i32(tmp3);
4957 tcg_temp_free_ptr(fpstatus);
4958 break;
4959 }
9ee6e8bb
PB
4960 default:
4961 abort();
2c0262af 4962 }
7d1b0095 4963 tcg_temp_free_i32(tmp2);
dd8fbd78 4964
9ee6e8bb
PB
4965 /* Save the result. For elementwise operations we can put it
4966 straight into the destination register. For pairwise operations
4967 we have to be careful to avoid clobbering the source operands. */
4968 if (pairwise && rd == rm) {
dd8fbd78 4969 neon_store_scratch(pass, tmp);
9ee6e8bb 4970 } else {
dd8fbd78 4971 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4972 }
4973
4974 } /* for pass */
4975 if (pairwise && rd == rm) {
4976 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4977 tmp = neon_load_scratch(pass);
4978 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4979 }
4980 }
ad69471c 4981 /* End of 3 register same size operations. */
9ee6e8bb
PB
4982 } else if (insn & (1 << 4)) {
4983 if ((insn & 0x00380080) != 0) {
4984 /* Two registers and shift. */
4985 op = (insn >> 8) & 0xf;
4986 if (insn & (1 << 7)) {
cc13115b
PM
4987 /* 64-bit shift. */
4988 if (op > 7) {
4989 return 1;
4990 }
9ee6e8bb
PB
4991 size = 3;
4992 } else {
4993 size = 2;
4994 while ((insn & (1 << (size + 19))) == 0)
4995 size--;
4996 }
4997 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 4998 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
4999 by immediate using the variable shift operations. */
5000 if (op < 8) {
5001 /* Shift by immediate:
5002 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5003 if (q && ((rd | rm) & 1)) {
5004 return 1;
5005 }
5006 if (!u && (op == 4 || op == 6)) {
5007 return 1;
5008 }
9ee6e8bb
PB
5009 /* Right shifts are encoded as N - shift, where N is the
5010 element size in bits. */
5011 if (op <= 4)
5012 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5013 if (size == 3) {
5014 count = q + 1;
5015 } else {
5016 count = q ? 4: 2;
5017 }
5018 switch (size) {
5019 case 0:
5020 imm = (uint8_t) shift;
5021 imm |= imm << 8;
5022 imm |= imm << 16;
5023 break;
5024 case 1:
5025 imm = (uint16_t) shift;
5026 imm |= imm << 16;
5027 break;
5028 case 2:
5029 case 3:
5030 imm = shift;
5031 break;
5032 default:
5033 abort();
5034 }
5035
5036 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5037 if (size == 3) {
5038 neon_load_reg64(cpu_V0, rm + pass);
5039 tcg_gen_movi_i64(cpu_V1, imm);
5040 switch (op) {
5041 case 0: /* VSHR */
5042 case 1: /* VSRA */
5043 if (u)
5044 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5045 else
ad69471c 5046 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5047 break;
ad69471c
PB
5048 case 2: /* VRSHR */
5049 case 3: /* VRSRA */
5050 if (u)
5051 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5052 else
ad69471c 5053 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5054 break;
ad69471c 5055 case 4: /* VSRI */
ad69471c
PB
5056 case 5: /* VSHL, VSLI */
5057 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5058 break;
0322b26e 5059 case 6: /* VQSHLU */
02da0b2d
PM
5060 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5061 cpu_V0, cpu_V1);
ad69471c 5062 break;
0322b26e
PM
5063 case 7: /* VQSHL */
5064 if (u) {
02da0b2d 5065 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5066 cpu_V0, cpu_V1);
5067 } else {
02da0b2d 5068 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5069 cpu_V0, cpu_V1);
5070 }
9ee6e8bb 5071 break;
9ee6e8bb 5072 }
ad69471c
PB
5073 if (op == 1 || op == 3) {
5074 /* Accumulate. */
5371cb81 5075 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5076 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5077 } else if (op == 4 || (op == 5 && u)) {
5078 /* Insert */
923e6509
CL
5079 neon_load_reg64(cpu_V1, rd + pass);
5080 uint64_t mask;
5081 if (shift < -63 || shift > 63) {
5082 mask = 0;
5083 } else {
5084 if (op == 4) {
5085 mask = 0xffffffffffffffffull >> -shift;
5086 } else {
5087 mask = 0xffffffffffffffffull << shift;
5088 }
5089 }
5090 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5091 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5092 }
5093 neon_store_reg64(cpu_V0, rd + pass);
5094 } else { /* size < 3 */
5095 /* Operands in T0 and T1. */
dd8fbd78 5096 tmp = neon_load_reg(rm, pass);
7d1b0095 5097 tmp2 = tcg_temp_new_i32();
dd8fbd78 5098 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5099 switch (op) {
5100 case 0: /* VSHR */
5101 case 1: /* VSRA */
5102 GEN_NEON_INTEGER_OP(shl);
5103 break;
5104 case 2: /* VRSHR */
5105 case 3: /* VRSRA */
5106 GEN_NEON_INTEGER_OP(rshl);
5107 break;
5108 case 4: /* VSRI */
ad69471c
PB
5109 case 5: /* VSHL, VSLI */
5110 switch (size) {
dd8fbd78
FN
5111 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5112 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5113 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5114 default: abort();
ad69471c
PB
5115 }
5116 break;
0322b26e 5117 case 6: /* VQSHLU */
ad69471c 5118 switch (size) {
0322b26e 5119 case 0:
02da0b2d
PM
5120 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5121 tmp, tmp2);
0322b26e
PM
5122 break;
5123 case 1:
02da0b2d
PM
5124 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5125 tmp, tmp2);
0322b26e
PM
5126 break;
5127 case 2:
02da0b2d
PM
5128 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5129 tmp, tmp2);
0322b26e
PM
5130 break;
5131 default:
cc13115b 5132 abort();
ad69471c
PB
5133 }
5134 break;
0322b26e 5135 case 7: /* VQSHL */
02da0b2d 5136 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5137 break;
ad69471c 5138 }
7d1b0095 5139 tcg_temp_free_i32(tmp2);
ad69471c
PB
5140
5141 if (op == 1 || op == 3) {
5142 /* Accumulate. */
dd8fbd78 5143 tmp2 = neon_load_reg(rd, pass);
5371cb81 5144 gen_neon_add(size, tmp, tmp2);
7d1b0095 5145 tcg_temp_free_i32(tmp2);
ad69471c
PB
5146 } else if (op == 4 || (op == 5 && u)) {
5147 /* Insert */
5148 switch (size) {
5149 case 0:
5150 if (op == 4)
ca9a32e4 5151 mask = 0xff >> -shift;
ad69471c 5152 else
ca9a32e4
JR
5153 mask = (uint8_t)(0xff << shift);
5154 mask |= mask << 8;
5155 mask |= mask << 16;
ad69471c
PB
5156 break;
5157 case 1:
5158 if (op == 4)
ca9a32e4 5159 mask = 0xffff >> -shift;
ad69471c 5160 else
ca9a32e4
JR
5161 mask = (uint16_t)(0xffff << shift);
5162 mask |= mask << 16;
ad69471c
PB
5163 break;
5164 case 2:
ca9a32e4
JR
5165 if (shift < -31 || shift > 31) {
5166 mask = 0;
5167 } else {
5168 if (op == 4)
5169 mask = 0xffffffffu >> -shift;
5170 else
5171 mask = 0xffffffffu << shift;
5172 }
ad69471c
PB
5173 break;
5174 default:
5175 abort();
5176 }
dd8fbd78 5177 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5178 tcg_gen_andi_i32(tmp, tmp, mask);
5179 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5180 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5181 tcg_temp_free_i32(tmp2);
ad69471c 5182 }
dd8fbd78 5183 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5184 }
5185 } /* for pass */
5186 } else if (op < 10) {
ad69471c 5187 /* Shift by immediate and narrow:
9ee6e8bb 5188 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5189 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5190 if (rm & 1) {
5191 return 1;
5192 }
9ee6e8bb
PB
5193 shift = shift - (1 << (size + 3));
5194 size++;
92cdfaeb 5195 if (size == 3) {
a7812ae4 5196 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5197 neon_load_reg64(cpu_V0, rm);
5198 neon_load_reg64(cpu_V1, rm + 1);
5199 for (pass = 0; pass < 2; pass++) {
5200 TCGv_i64 in;
5201 if (pass == 0) {
5202 in = cpu_V0;
5203 } else {
5204 in = cpu_V1;
5205 }
ad69471c 5206 if (q) {
0b36f4cd 5207 if (input_unsigned) {
92cdfaeb 5208 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5209 } else {
92cdfaeb 5210 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5211 }
ad69471c 5212 } else {
0b36f4cd 5213 if (input_unsigned) {
92cdfaeb 5214 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5215 } else {
92cdfaeb 5216 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5217 }
ad69471c 5218 }
7d1b0095 5219 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5220 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5221 neon_store_reg(rd, pass, tmp);
5222 } /* for pass */
5223 tcg_temp_free_i64(tmp64);
5224 } else {
5225 if (size == 1) {
5226 imm = (uint16_t)shift;
5227 imm |= imm << 16;
2c0262af 5228 } else {
92cdfaeb
PM
5229 /* size == 2 */
5230 imm = (uint32_t)shift;
5231 }
5232 tmp2 = tcg_const_i32(imm);
5233 tmp4 = neon_load_reg(rm + 1, 0);
5234 tmp5 = neon_load_reg(rm + 1, 1);
5235 for (pass = 0; pass < 2; pass++) {
5236 if (pass == 0) {
5237 tmp = neon_load_reg(rm, 0);
5238 } else {
5239 tmp = tmp4;
5240 }
0b36f4cd
CL
5241 gen_neon_shift_narrow(size, tmp, tmp2, q,
5242 input_unsigned);
92cdfaeb
PM
5243 if (pass == 0) {
5244 tmp3 = neon_load_reg(rm, 1);
5245 } else {
5246 tmp3 = tmp5;
5247 }
0b36f4cd
CL
5248 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5249 input_unsigned);
36aa55dc 5250 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5251 tcg_temp_free_i32(tmp);
5252 tcg_temp_free_i32(tmp3);
5253 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5254 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5255 neon_store_reg(rd, pass, tmp);
5256 } /* for pass */
c6067f04 5257 tcg_temp_free_i32(tmp2);
b75263d6 5258 }
9ee6e8bb 5259 } else if (op == 10) {
cc13115b
PM
5260 /* VSHLL, VMOVL */
5261 if (q || (rd & 1)) {
9ee6e8bb 5262 return 1;
cc13115b 5263 }
ad69471c
PB
5264 tmp = neon_load_reg(rm, 0);
5265 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5266 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5267 if (pass == 1)
5268 tmp = tmp2;
5269
5270 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5271
9ee6e8bb
PB
5272 if (shift != 0) {
5273 /* The shift is less than the width of the source
ad69471c
PB
5274 type, so we can just shift the whole register. */
5275 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5276 /* Widen the result of shift: we need to clear
5277 * the potential overflow bits resulting from
5278 * left bits of the narrow input appearing as
5279 * right bits of left the neighbour narrow
5280 * input. */
ad69471c
PB
5281 if (size < 2 || !u) {
5282 uint64_t imm64;
5283 if (size == 0) {
5284 imm = (0xffu >> (8 - shift));
5285 imm |= imm << 16;
acdf01ef 5286 } else if (size == 1) {
ad69471c 5287 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5288 } else {
5289 /* size == 2 */
5290 imm = 0xffffffff >> (32 - shift);
5291 }
5292 if (size < 2) {
5293 imm64 = imm | (((uint64_t)imm) << 32);
5294 } else {
5295 imm64 = imm;
9ee6e8bb 5296 }
acdf01ef 5297 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5298 }
5299 }
ad69471c 5300 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5301 }
f73534a5 5302 } else if (op >= 14) {
9ee6e8bb 5303 /* VCVT fixed-point. */
cc13115b
PM
5304 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5305 return 1;
5306 }
f73534a5
PM
5307 /* We have already masked out the must-be-1 top bit of imm6,
5308 * hence this 32-shift where the ARM ARM has 64-imm6.
5309 */
5310 shift = 32 - shift;
9ee6e8bb 5311 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5312 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5313 if (!(op & 1)) {
9ee6e8bb 5314 if (u)
5500b06c 5315 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5316 else
5500b06c 5317 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5318 } else {
5319 if (u)
5500b06c 5320 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5321 else
5500b06c 5322 gen_vfp_tosl(0, shift, 1);
2c0262af 5323 }
4373f3ce 5324 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5325 }
5326 } else {
9ee6e8bb
PB
5327 return 1;
5328 }
5329 } else { /* (insn & 0x00380080) == 0 */
5330 int invert;
7d80fee5
PM
5331 if (q && (rd & 1)) {
5332 return 1;
5333 }
9ee6e8bb
PB
5334
5335 op = (insn >> 8) & 0xf;
5336 /* One register and immediate. */
5337 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5338 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5339 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5340 * We choose to not special-case this and will behave as if a
5341 * valid constant encoding of 0 had been given.
5342 */
9ee6e8bb
PB
5343 switch (op) {
5344 case 0: case 1:
5345 /* no-op */
5346 break;
5347 case 2: case 3:
5348 imm <<= 8;
5349 break;
5350 case 4: case 5:
5351 imm <<= 16;
5352 break;
5353 case 6: case 7:
5354 imm <<= 24;
5355 break;
5356 case 8: case 9:
5357 imm |= imm << 16;
5358 break;
5359 case 10: case 11:
5360 imm = (imm << 8) | (imm << 24);
5361 break;
5362 case 12:
8e31209e 5363 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5364 break;
5365 case 13:
5366 imm = (imm << 16) | 0xffff;
5367 break;
5368 case 14:
5369 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5370 if (invert)
5371 imm = ~imm;
5372 break;
5373 case 15:
7d80fee5
PM
5374 if (invert) {
5375 return 1;
5376 }
9ee6e8bb
PB
5377 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5378 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5379 break;
5380 }
5381 if (invert)
5382 imm = ~imm;
5383
9ee6e8bb
PB
5384 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5385 if (op & 1 && op < 12) {
ad69471c 5386 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5387 if (invert) {
5388 /* The immediate value has already been inverted, so
5389 BIC becomes AND. */
ad69471c 5390 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5391 } else {
ad69471c 5392 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5393 }
9ee6e8bb 5394 } else {
ad69471c 5395 /* VMOV, VMVN. */
7d1b0095 5396 tmp = tcg_temp_new_i32();
9ee6e8bb 5397 if (op == 14 && invert) {
a5a14945 5398 int n;
ad69471c
PB
5399 uint32_t val;
5400 val = 0;
9ee6e8bb
PB
5401 for (n = 0; n < 4; n++) {
5402 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5403 val |= 0xff << (n * 8);
9ee6e8bb 5404 }
ad69471c
PB
5405 tcg_gen_movi_i32(tmp, val);
5406 } else {
5407 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5408 }
9ee6e8bb 5409 }
ad69471c 5410 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5411 }
5412 }
e4b3861d 5413 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5414 if (size != 3) {
5415 op = (insn >> 8) & 0xf;
5416 if ((insn & (1 << 6)) == 0) {
5417 /* Three registers of different lengths. */
5418 int src1_wide;
5419 int src2_wide;
5420 int prewiden;
695272dc
PM
5421 /* undefreq: bit 0 : UNDEF if size != 0
5422 * bit 1 : UNDEF if size == 0
5423 * bit 2 : UNDEF if U == 1
5424 * Note that [1:0] set implies 'always UNDEF'
5425 */
5426 int undefreq;
5427 /* prewiden, src1_wide, src2_wide, undefreq */
5428 static const int neon_3reg_wide[16][4] = {
5429 {1, 0, 0, 0}, /* VADDL */
5430 {1, 1, 0, 0}, /* VADDW */
5431 {1, 0, 0, 0}, /* VSUBL */
5432 {1, 1, 0, 0}, /* VSUBW */
5433 {0, 1, 1, 0}, /* VADDHN */
5434 {0, 0, 0, 0}, /* VABAL */
5435 {0, 1, 1, 0}, /* VSUBHN */
5436 {0, 0, 0, 0}, /* VABDL */
5437 {0, 0, 0, 0}, /* VMLAL */
5438 {0, 0, 0, 6}, /* VQDMLAL */
5439 {0, 0, 0, 0}, /* VMLSL */
5440 {0, 0, 0, 6}, /* VQDMLSL */
5441 {0, 0, 0, 0}, /* Integer VMULL */
5442 {0, 0, 0, 2}, /* VQDMULL */
5443 {0, 0, 0, 5}, /* Polynomial VMULL */
5444 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5445 };
5446
5447 prewiden = neon_3reg_wide[op][0];
5448 src1_wide = neon_3reg_wide[op][1];
5449 src2_wide = neon_3reg_wide[op][2];
695272dc 5450 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5451
695272dc
PM
5452 if (((undefreq & 1) && (size != 0)) ||
5453 ((undefreq & 2) && (size == 0)) ||
5454 ((undefreq & 4) && u)) {
5455 return 1;
5456 }
5457 if ((src1_wide && (rn & 1)) ||
5458 (src2_wide && (rm & 1)) ||
5459 (!src2_wide && (rd & 1))) {
ad69471c 5460 return 1;
695272dc 5461 }
ad69471c 5462
9ee6e8bb
PB
5463 /* Avoid overlapping operands. Wide source operands are
5464 always aligned so will never overlap with wide
5465 destinations in problematic ways. */
8f8e3aa4 5466 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5467 tmp = neon_load_reg(rm, 1);
5468 neon_store_scratch(2, tmp);
8f8e3aa4 5469 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5470 tmp = neon_load_reg(rn, 1);
5471 neon_store_scratch(2, tmp);
9ee6e8bb 5472 }
a50f5b91 5473 TCGV_UNUSED(tmp3);
9ee6e8bb 5474 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5475 if (src1_wide) {
5476 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 5477 TCGV_UNUSED(tmp);
9ee6e8bb 5478 } else {
ad69471c 5479 if (pass == 1 && rd == rn) {
dd8fbd78 5480 tmp = neon_load_scratch(2);
9ee6e8bb 5481 } else {
ad69471c
PB
5482 tmp = neon_load_reg(rn, pass);
5483 }
5484 if (prewiden) {
5485 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5486 }
5487 }
ad69471c
PB
5488 if (src2_wide) {
5489 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 5490 TCGV_UNUSED(tmp2);
9ee6e8bb 5491 } else {
ad69471c 5492 if (pass == 1 && rd == rm) {
dd8fbd78 5493 tmp2 = neon_load_scratch(2);
9ee6e8bb 5494 } else {
ad69471c
PB
5495 tmp2 = neon_load_reg(rm, pass);
5496 }
5497 if (prewiden) {
5498 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5499 }
9ee6e8bb
PB
5500 }
5501 switch (op) {
5502 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5503 gen_neon_addl(size);
9ee6e8bb 5504 break;
79b0e534 5505 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5506 gen_neon_subl(size);
9ee6e8bb
PB
5507 break;
5508 case 5: case 7: /* VABAL, VABDL */
5509 switch ((size << 1) | u) {
ad69471c
PB
5510 case 0:
5511 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5512 break;
5513 case 1:
5514 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5515 break;
5516 case 2:
5517 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5518 break;
5519 case 3:
5520 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5521 break;
5522 case 4:
5523 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5524 break;
5525 case 5:
5526 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5527 break;
9ee6e8bb
PB
5528 default: abort();
5529 }
7d1b0095
PM
5530 tcg_temp_free_i32(tmp2);
5531 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5532 break;
5533 case 8: case 9: case 10: case 11: case 12: case 13:
5534 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5535 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5536 break;
5537 case 14: /* Polynomial VMULL */
e5ca24cb 5538 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5539 tcg_temp_free_i32(tmp2);
5540 tcg_temp_free_i32(tmp);
e5ca24cb 5541 break;
695272dc
PM
5542 default: /* 15 is RESERVED: caught earlier */
5543 abort();
9ee6e8bb 5544 }
ebcd88ce
PM
5545 if (op == 13) {
5546 /* VQDMULL */
5547 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5548 neon_store_reg64(cpu_V0, rd + pass);
5549 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5550 /* Accumulate. */
ebcd88ce 5551 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5552 switch (op) {
4dc064e6
PM
5553 case 10: /* VMLSL */
5554 gen_neon_negl(cpu_V0, size);
5555 /* Fall through */
5556 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5557 gen_neon_addl(size);
9ee6e8bb
PB
5558 break;
5559 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5560 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5561 if (op == 11) {
5562 gen_neon_negl(cpu_V0, size);
5563 }
ad69471c
PB
5564 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5565 break;
9ee6e8bb
PB
5566 default:
5567 abort();
5568 }
ad69471c 5569 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5570 } else if (op == 4 || op == 6) {
5571 /* Narrowing operation. */
7d1b0095 5572 tmp = tcg_temp_new_i32();
79b0e534 5573 if (!u) {
9ee6e8bb 5574 switch (size) {
ad69471c
PB
5575 case 0:
5576 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5577 break;
5578 case 1:
5579 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5580 break;
5581 case 2:
5582 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5583 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5584 break;
9ee6e8bb
PB
5585 default: abort();
5586 }
5587 } else {
5588 switch (size) {
ad69471c
PB
5589 case 0:
5590 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5591 break;
5592 case 1:
5593 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5594 break;
5595 case 2:
5596 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5597 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5598 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5599 break;
9ee6e8bb
PB
5600 default: abort();
5601 }
5602 }
ad69471c
PB
5603 if (pass == 0) {
5604 tmp3 = tmp;
5605 } else {
5606 neon_store_reg(rd, 0, tmp3);
5607 neon_store_reg(rd, 1, tmp);
5608 }
9ee6e8bb
PB
5609 } else {
5610 /* Write back the result. */
ad69471c 5611 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5612 }
5613 }
5614 } else {
3e3326df
PM
5615 /* Two registers and a scalar. NB that for ops of this form
5616 * the ARM ARM labels bit 24 as Q, but it is in our variable
5617 * 'u', not 'q'.
5618 */
5619 if (size == 0) {
5620 return 1;
5621 }
9ee6e8bb 5622 switch (op) {
9ee6e8bb 5623 case 1: /* Float VMLA scalar */
9ee6e8bb 5624 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5625 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5626 if (size == 1) {
5627 return 1;
5628 }
5629 /* fall through */
5630 case 0: /* Integer VMLA scalar */
5631 case 4: /* Integer VMLS scalar */
5632 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5633 case 12: /* VQDMULH scalar */
5634 case 13: /* VQRDMULH scalar */
3e3326df
PM
5635 if (u && ((rd | rn) & 1)) {
5636 return 1;
5637 }
dd8fbd78
FN
5638 tmp = neon_get_scalar(size, rm);
5639 neon_store_scratch(0, tmp);
9ee6e8bb 5640 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5641 tmp = neon_load_scratch(0);
5642 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5643 if (op == 12) {
5644 if (size == 1) {
02da0b2d 5645 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5646 } else {
02da0b2d 5647 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5648 }
5649 } else if (op == 13) {
5650 if (size == 1) {
02da0b2d 5651 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5652 } else {
02da0b2d 5653 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5654 }
5655 } else if (op & 1) {
aa47cfdd
PM
5656 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5657 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5658 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5659 } else {
5660 switch (size) {
dd8fbd78
FN
5661 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5662 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5663 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5664 default: abort();
9ee6e8bb
PB
5665 }
5666 }
7d1b0095 5667 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5668 if (op < 8) {
5669 /* Accumulate. */
dd8fbd78 5670 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5671 switch (op) {
5672 case 0:
dd8fbd78 5673 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5674 break;
5675 case 1:
aa47cfdd
PM
5676 {
5677 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5678 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5679 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5680 break;
aa47cfdd 5681 }
9ee6e8bb 5682 case 4:
dd8fbd78 5683 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5684 break;
5685 case 5:
aa47cfdd
PM
5686 {
5687 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5688 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5689 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5690 break;
aa47cfdd 5691 }
9ee6e8bb
PB
5692 default:
5693 abort();
5694 }
7d1b0095 5695 tcg_temp_free_i32(tmp2);
9ee6e8bb 5696 }
dd8fbd78 5697 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5698 }
5699 break;
9ee6e8bb 5700 case 3: /* VQDMLAL scalar */
9ee6e8bb 5701 case 7: /* VQDMLSL scalar */
9ee6e8bb 5702 case 11: /* VQDMULL scalar */
3e3326df 5703 if (u == 1) {
ad69471c 5704 return 1;
3e3326df
PM
5705 }
5706 /* fall through */
5707 case 2: /* VMLAL sclar */
5708 case 6: /* VMLSL scalar */
5709 case 10: /* VMULL scalar */
5710 if (rd & 1) {
5711 return 1;
5712 }
dd8fbd78 5713 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5714 /* We need a copy of tmp2 because gen_neon_mull
5715 * deletes it during pass 0. */
7d1b0095 5716 tmp4 = tcg_temp_new_i32();
c6067f04 5717 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5718 tmp3 = neon_load_reg(rn, 1);
ad69471c 5719
9ee6e8bb 5720 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5721 if (pass == 0) {
5722 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5723 } else {
dd8fbd78 5724 tmp = tmp3;
c6067f04 5725 tmp2 = tmp4;
9ee6e8bb 5726 }
ad69471c 5727 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5728 if (op != 11) {
5729 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5730 }
9ee6e8bb 5731 switch (op) {
4dc064e6
PM
5732 case 6:
5733 gen_neon_negl(cpu_V0, size);
5734 /* Fall through */
5735 case 2:
ad69471c 5736 gen_neon_addl(size);
9ee6e8bb
PB
5737 break;
5738 case 3: case 7:
ad69471c 5739 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5740 if (op == 7) {
5741 gen_neon_negl(cpu_V0, size);
5742 }
ad69471c 5743 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5744 break;
5745 case 10:
5746 /* no-op */
5747 break;
5748 case 11:
ad69471c 5749 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5750 break;
5751 default:
5752 abort();
5753 }
ad69471c 5754 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5755 }
dd8fbd78 5756
dd8fbd78 5757
9ee6e8bb
PB
5758 break;
5759 default: /* 14 and 15 are RESERVED */
5760 return 1;
5761 }
5762 }
5763 } else { /* size == 3 */
5764 if (!u) {
5765 /* Extract. */
9ee6e8bb 5766 imm = (insn >> 8) & 0xf;
ad69471c
PB
5767
5768 if (imm > 7 && !q)
5769 return 1;
5770
52579ea1
PM
5771 if (q && ((rd | rn | rm) & 1)) {
5772 return 1;
5773 }
5774
ad69471c
PB
5775 if (imm == 0) {
5776 neon_load_reg64(cpu_V0, rn);
5777 if (q) {
5778 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5779 }
ad69471c
PB
5780 } else if (imm == 8) {
5781 neon_load_reg64(cpu_V0, rn + 1);
5782 if (q) {
5783 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5784 }
ad69471c 5785 } else if (q) {
a7812ae4 5786 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5787 if (imm < 8) {
5788 neon_load_reg64(cpu_V0, rn);
a7812ae4 5789 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5790 } else {
5791 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5792 neon_load_reg64(tmp64, rm);
ad69471c
PB
5793 }
5794 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5795 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5796 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5797 if (imm < 8) {
5798 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5799 } else {
ad69471c
PB
5800 neon_load_reg64(cpu_V1, rm + 1);
5801 imm -= 8;
9ee6e8bb 5802 }
ad69471c 5803 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5804 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5805 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5806 tcg_temp_free_i64(tmp64);
ad69471c 5807 } else {
a7812ae4 5808 /* BUGFIX */
ad69471c 5809 neon_load_reg64(cpu_V0, rn);
a7812ae4 5810 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5811 neon_load_reg64(cpu_V1, rm);
a7812ae4 5812 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5813 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5814 }
5815 neon_store_reg64(cpu_V0, rd);
5816 if (q) {
5817 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5818 }
5819 } else if ((insn & (1 << 11)) == 0) {
5820 /* Two register misc. */
5821 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5822 size = (insn >> 18) & 3;
600b828c
PM
5823 /* UNDEF for unknown op values and bad op-size combinations */
5824 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5825 return 1;
5826 }
fc2a9b37
PM
5827 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5828 q && ((rm | rd) & 1)) {
5829 return 1;
5830 }
9ee6e8bb 5831 switch (op) {
600b828c 5832 case NEON_2RM_VREV64:
9ee6e8bb 5833 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5834 tmp = neon_load_reg(rm, pass * 2);
5835 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5836 switch (size) {
dd8fbd78
FN
5837 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5838 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5839 case 2: /* no-op */ break;
5840 default: abort();
5841 }
dd8fbd78 5842 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5843 if (size == 2) {
dd8fbd78 5844 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5845 } else {
9ee6e8bb 5846 switch (size) {
dd8fbd78
FN
5847 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5848 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5849 default: abort();
5850 }
dd8fbd78 5851 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5852 }
5853 }
5854 break;
600b828c
PM
5855 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5856 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5857 for (pass = 0; pass < q + 1; pass++) {
5858 tmp = neon_load_reg(rm, pass * 2);
5859 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5860 tmp = neon_load_reg(rm, pass * 2 + 1);
5861 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5862 switch (size) {
5863 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5864 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5865 case 2: tcg_gen_add_i64(CPU_V001); break;
5866 default: abort();
5867 }
600b828c 5868 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5869 /* Accumulate. */
ad69471c
PB
5870 neon_load_reg64(cpu_V1, rd + pass);
5871 gen_neon_addl(size);
9ee6e8bb 5872 }
ad69471c 5873 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5874 }
5875 break;
600b828c 5876 case NEON_2RM_VTRN:
9ee6e8bb 5877 if (size == 2) {
a5a14945 5878 int n;
9ee6e8bb 5879 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5880 tmp = neon_load_reg(rm, n);
5881 tmp2 = neon_load_reg(rd, n + 1);
5882 neon_store_reg(rm, n, tmp2);
5883 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5884 }
5885 } else {
5886 goto elementwise;
5887 }
5888 break;
600b828c 5889 case NEON_2RM_VUZP:
02acedf9 5890 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5891 return 1;
9ee6e8bb
PB
5892 }
5893 break;
600b828c 5894 case NEON_2RM_VZIP:
d68a6f3a 5895 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5896 return 1;
9ee6e8bb
PB
5897 }
5898 break;
600b828c
PM
5899 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5900 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5901 if (rm & 1) {
5902 return 1;
5903 }
a50f5b91 5904 TCGV_UNUSED(tmp2);
9ee6e8bb 5905 for (pass = 0; pass < 2; pass++) {
ad69471c 5906 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5907 tmp = tcg_temp_new_i32();
600b828c
PM
5908 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5909 tmp, cpu_V0);
ad69471c
PB
5910 if (pass == 0) {
5911 tmp2 = tmp;
5912 } else {
5913 neon_store_reg(rd, 0, tmp2);
5914 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5915 }
9ee6e8bb
PB
5916 }
5917 break;
600b828c 5918 case NEON_2RM_VSHLL:
fc2a9b37 5919 if (q || (rd & 1)) {
9ee6e8bb 5920 return 1;
600b828c 5921 }
ad69471c
PB
5922 tmp = neon_load_reg(rm, 0);
5923 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5924 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5925 if (pass == 1)
5926 tmp = tmp2;
5927 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5928 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5929 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5930 }
5931 break;
600b828c 5932 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5933 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5934 q || (rm & 1)) {
5935 return 1;
5936 }
7d1b0095
PM
5937 tmp = tcg_temp_new_i32();
5938 tmp2 = tcg_temp_new_i32();
60011498 5939 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5940 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5941 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5942 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5943 tcg_gen_shli_i32(tmp2, tmp2, 16);
5944 tcg_gen_or_i32(tmp2, tmp2, tmp);
5945 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5946 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5947 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5948 neon_store_reg(rd, 0, tmp2);
7d1b0095 5949 tmp2 = tcg_temp_new_i32();
2d981da7 5950 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5951 tcg_gen_shli_i32(tmp2, tmp2, 16);
5952 tcg_gen_or_i32(tmp2, tmp2, tmp);
5953 neon_store_reg(rd, 1, tmp2);
7d1b0095 5954 tcg_temp_free_i32(tmp);
60011498 5955 break;
600b828c 5956 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5957 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5958 q || (rd & 1)) {
5959 return 1;
5960 }
7d1b0095 5961 tmp3 = tcg_temp_new_i32();
60011498
PB
5962 tmp = neon_load_reg(rm, 0);
5963 tmp2 = neon_load_reg(rm, 1);
5964 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5965 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5966 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5967 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5968 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5969 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5970 tcg_temp_free_i32(tmp);
60011498 5971 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5972 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5973 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5974 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5975 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5976 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5977 tcg_temp_free_i32(tmp2);
5978 tcg_temp_free_i32(tmp3);
60011498 5979 break;
9ee6e8bb
PB
5980 default:
5981 elementwise:
5982 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5983 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5984 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5985 neon_reg_offset(rm, pass));
dd8fbd78 5986 TCGV_UNUSED(tmp);
9ee6e8bb 5987 } else {
dd8fbd78 5988 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5989 }
5990 switch (op) {
600b828c 5991 case NEON_2RM_VREV32:
9ee6e8bb 5992 switch (size) {
dd8fbd78
FN
5993 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5994 case 1: gen_swap_half(tmp); break;
600b828c 5995 default: abort();
9ee6e8bb
PB
5996 }
5997 break;
600b828c 5998 case NEON_2RM_VREV16:
dd8fbd78 5999 gen_rev16(tmp);
9ee6e8bb 6000 break;
600b828c 6001 case NEON_2RM_VCLS:
9ee6e8bb 6002 switch (size) {
dd8fbd78
FN
6003 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6004 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6005 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6006 default: abort();
9ee6e8bb
PB
6007 }
6008 break;
600b828c 6009 case NEON_2RM_VCLZ:
9ee6e8bb 6010 switch (size) {
dd8fbd78
FN
6011 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6012 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6013 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6014 default: abort();
9ee6e8bb
PB
6015 }
6016 break;
600b828c 6017 case NEON_2RM_VCNT:
dd8fbd78 6018 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6019 break;
600b828c 6020 case NEON_2RM_VMVN:
dd8fbd78 6021 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6022 break;
600b828c 6023 case NEON_2RM_VQABS:
9ee6e8bb 6024 switch (size) {
02da0b2d
PM
6025 case 0:
6026 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6027 break;
6028 case 1:
6029 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6030 break;
6031 case 2:
6032 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6033 break;
600b828c 6034 default: abort();
9ee6e8bb
PB
6035 }
6036 break;
600b828c 6037 case NEON_2RM_VQNEG:
9ee6e8bb 6038 switch (size) {
02da0b2d
PM
6039 case 0:
6040 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6041 break;
6042 case 1:
6043 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6044 break;
6045 case 2:
6046 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6047 break;
600b828c 6048 default: abort();
9ee6e8bb
PB
6049 }
6050 break;
600b828c 6051 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6052 tmp2 = tcg_const_i32(0);
9ee6e8bb 6053 switch(size) {
dd8fbd78
FN
6054 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6055 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6056 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6057 default: abort();
9ee6e8bb 6058 }
dd8fbd78 6059 tcg_temp_free(tmp2);
600b828c 6060 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6061 tcg_gen_not_i32(tmp, tmp);
600b828c 6062 }
9ee6e8bb 6063 break;
600b828c 6064 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6065 tmp2 = tcg_const_i32(0);
9ee6e8bb 6066 switch(size) {
dd8fbd78
FN
6067 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6068 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6069 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6070 default: abort();
9ee6e8bb 6071 }
dd8fbd78 6072 tcg_temp_free(tmp2);
600b828c 6073 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6074 tcg_gen_not_i32(tmp, tmp);
600b828c 6075 }
9ee6e8bb 6076 break;
600b828c 6077 case NEON_2RM_VCEQ0:
dd8fbd78 6078 tmp2 = tcg_const_i32(0);
9ee6e8bb 6079 switch(size) {
dd8fbd78
FN
6080 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6081 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6082 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6083 default: abort();
9ee6e8bb 6084 }
dd8fbd78 6085 tcg_temp_free(tmp2);
9ee6e8bb 6086 break;
600b828c 6087 case NEON_2RM_VABS:
9ee6e8bb 6088 switch(size) {
dd8fbd78
FN
6089 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6090 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6091 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6092 default: abort();
9ee6e8bb
PB
6093 }
6094 break;
600b828c 6095 case NEON_2RM_VNEG:
dd8fbd78
FN
6096 tmp2 = tcg_const_i32(0);
6097 gen_neon_rsb(size, tmp, tmp2);
6098 tcg_temp_free(tmp2);
9ee6e8bb 6099 break;
600b828c 6100 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6101 {
6102 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6103 tmp2 = tcg_const_i32(0);
aa47cfdd 6104 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6105 tcg_temp_free(tmp2);
aa47cfdd 6106 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6107 break;
aa47cfdd 6108 }
600b828c 6109 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6110 {
6111 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6112 tmp2 = tcg_const_i32(0);
aa47cfdd 6113 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6114 tcg_temp_free(tmp2);
aa47cfdd 6115 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6116 break;
aa47cfdd 6117 }
600b828c 6118 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6119 {
6120 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6121 tmp2 = tcg_const_i32(0);
aa47cfdd 6122 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
dd8fbd78 6123 tcg_temp_free(tmp2);
aa47cfdd 6124 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6125 break;
aa47cfdd 6126 }
600b828c 6127 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6128 {
6129 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6130 tmp2 = tcg_const_i32(0);
aa47cfdd 6131 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6132 tcg_temp_free(tmp2);
aa47cfdd 6133 tcg_temp_free_ptr(fpstatus);
0e326109 6134 break;
aa47cfdd 6135 }
600b828c 6136 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6137 {
6138 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6139 tmp2 = tcg_const_i32(0);
aa47cfdd 6140 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
0e326109 6141 tcg_temp_free(tmp2);
aa47cfdd 6142 tcg_temp_free_ptr(fpstatus);
0e326109 6143 break;
aa47cfdd 6144 }
600b828c 6145 case NEON_2RM_VABS_F:
4373f3ce 6146 gen_vfp_abs(0);
9ee6e8bb 6147 break;
600b828c 6148 case NEON_2RM_VNEG_F:
4373f3ce 6149 gen_vfp_neg(0);
9ee6e8bb 6150 break;
600b828c 6151 case NEON_2RM_VSWP:
dd8fbd78
FN
6152 tmp2 = neon_load_reg(rd, pass);
6153 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6154 break;
600b828c 6155 case NEON_2RM_VTRN:
dd8fbd78 6156 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6157 switch (size) {
dd8fbd78
FN
6158 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6159 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6160 default: abort();
9ee6e8bb 6161 }
dd8fbd78 6162 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6163 break;
600b828c 6164 case NEON_2RM_VRECPE:
dd8fbd78 6165 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6166 break;
600b828c 6167 case NEON_2RM_VRSQRTE:
dd8fbd78 6168 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6169 break;
600b828c 6170 case NEON_2RM_VRECPE_F:
4373f3ce 6171 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6172 break;
600b828c 6173 case NEON_2RM_VRSQRTE_F:
4373f3ce 6174 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6175 break;
600b828c 6176 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6177 gen_vfp_sito(0, 1);
9ee6e8bb 6178 break;
600b828c 6179 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6180 gen_vfp_uito(0, 1);
9ee6e8bb 6181 break;
600b828c 6182 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6183 gen_vfp_tosiz(0, 1);
9ee6e8bb 6184 break;
600b828c 6185 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6186 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6187 break;
6188 default:
600b828c
PM
6189 /* Reserved op values were caught by the
6190 * neon_2rm_sizes[] check earlier.
6191 */
6192 abort();
9ee6e8bb 6193 }
600b828c 6194 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6195 tcg_gen_st_f32(cpu_F0s, cpu_env,
6196 neon_reg_offset(rd, pass));
9ee6e8bb 6197 } else {
dd8fbd78 6198 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6199 }
6200 }
6201 break;
6202 }
6203 } else if ((insn & (1 << 10)) == 0) {
6204 /* VTBL, VTBX. */
56907d77
PM
6205 int n = ((insn >> 8) & 3) + 1;
6206 if ((rn + n) > 32) {
6207 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6208 * helper function running off the end of the register file.
6209 */
6210 return 1;
6211 }
6212 n <<= 3;
9ee6e8bb 6213 if (insn & (1 << 6)) {
8f8e3aa4 6214 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6215 } else {
7d1b0095 6216 tmp = tcg_temp_new_i32();
8f8e3aa4 6217 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6218 }
8f8e3aa4 6219 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6220 tmp4 = tcg_const_i32(rn);
6221 tmp5 = tcg_const_i32(n);
9ef39277 6222 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6223 tcg_temp_free_i32(tmp);
9ee6e8bb 6224 if (insn & (1 << 6)) {
8f8e3aa4 6225 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6226 } else {
7d1b0095 6227 tmp = tcg_temp_new_i32();
8f8e3aa4 6228 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6229 }
8f8e3aa4 6230 tmp3 = neon_load_reg(rm, 1);
9ef39277 6231 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6232 tcg_temp_free_i32(tmp5);
6233 tcg_temp_free_i32(tmp4);
8f8e3aa4 6234 neon_store_reg(rd, 0, tmp2);
3018f259 6235 neon_store_reg(rd, 1, tmp3);
7d1b0095 6236 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6237 } else if ((insn & 0x380) == 0) {
6238 /* VDUP */
133da6aa
JR
6239 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6240 return 1;
6241 }
9ee6e8bb 6242 if (insn & (1 << 19)) {
dd8fbd78 6243 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6244 } else {
dd8fbd78 6245 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6246 }
6247 if (insn & (1 << 16)) {
dd8fbd78 6248 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6249 } else if (insn & (1 << 17)) {
6250 if ((insn >> 18) & 1)
dd8fbd78 6251 gen_neon_dup_high16(tmp);
9ee6e8bb 6252 else
dd8fbd78 6253 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6254 }
6255 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6256 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6257 tcg_gen_mov_i32(tmp2, tmp);
6258 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6259 }
7d1b0095 6260 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6261 } else {
6262 return 1;
6263 }
6264 }
6265 }
6266 return 0;
6267}
6268
0ecb72a5 6269static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6270{
4b6a83fb
PM
6271 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6272 const ARMCPRegInfo *ri;
6273 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6274
6275 cpnum = (insn >> 8) & 0xf;
6276 if (arm_feature(env, ARM_FEATURE_XSCALE)
6277 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6278 return 1;
6279
4b6a83fb 6280 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6281 switch (cpnum) {
6282 case 0:
6283 case 1:
6284 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6285 return disas_iwmmxt_insn(env, s, insn);
6286 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6287 return disas_dsp_insn(env, s, insn);
6288 }
6289 return 1;
6290 case 10:
6291 case 11:
6292 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6293 default:
6294 break;
6295 }
6296
6297 /* Otherwise treat as a generic register access */
6298 is64 = (insn & (1 << 25)) == 0;
6299 if (!is64 && ((insn & (1 << 4)) == 0)) {
6300 /* cdp */
6301 return 1;
6302 }
6303
6304 crm = insn & 0xf;
6305 if (is64) {
6306 crn = 0;
6307 opc1 = (insn >> 4) & 0xf;
6308 opc2 = 0;
6309 rt2 = (insn >> 16) & 0xf;
6310 } else {
6311 crn = (insn >> 16) & 0xf;
6312 opc1 = (insn >> 21) & 7;
6313 opc2 = (insn >> 5) & 7;
6314 rt2 = 0;
6315 }
6316 isread = (insn >> 20) & 1;
6317 rt = (insn >> 12) & 0xf;
6318
6319 ri = get_arm_cp_reginfo(cpu,
6320 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6321 if (ri) {
6322 /* Check access permissions */
6323 if (!cp_access_ok(env, ri, isread)) {
6324 return 1;
6325 }
6326
6327 /* Handle special cases first */
6328 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6329 case ARM_CP_NOP:
6330 return 0;
6331 case ARM_CP_WFI:
6332 if (isread) {
6333 return 1;
6334 }
6335 gen_set_pc_im(s->pc);
6336 s->is_jmp = DISAS_WFI;
2bee5105 6337 return 0;
4b6a83fb
PM
6338 default:
6339 break;
6340 }
6341
6342 if (isread) {
6343 /* Read */
6344 if (is64) {
6345 TCGv_i64 tmp64;
6346 TCGv_i32 tmp;
6347 if (ri->type & ARM_CP_CONST) {
6348 tmp64 = tcg_const_i64(ri->resetvalue);
6349 } else if (ri->readfn) {
6350 TCGv_ptr tmpptr;
6351 gen_set_pc_im(s->pc);
6352 tmp64 = tcg_temp_new_i64();
6353 tmpptr = tcg_const_ptr(ri);
6354 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6355 tcg_temp_free_ptr(tmpptr);
6356 } else {
6357 tmp64 = tcg_temp_new_i64();
6358 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6359 }
6360 tmp = tcg_temp_new_i32();
6361 tcg_gen_trunc_i64_i32(tmp, tmp64);
6362 store_reg(s, rt, tmp);
6363 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6364 tmp = tcg_temp_new_i32();
4b6a83fb 6365 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6366 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6367 store_reg(s, rt2, tmp);
6368 } else {
6369 TCGv tmp;
6370 if (ri->type & ARM_CP_CONST) {
6371 tmp = tcg_const_i32(ri->resetvalue);
6372 } else if (ri->readfn) {
6373 TCGv_ptr tmpptr;
6374 gen_set_pc_im(s->pc);
6375 tmp = tcg_temp_new_i32();
6376 tmpptr = tcg_const_ptr(ri);
6377 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6378 tcg_temp_free_ptr(tmpptr);
6379 } else {
6380 tmp = load_cpu_offset(ri->fieldoffset);
6381 }
6382 if (rt == 15) {
6383 /* Destination register of r15 for 32 bit loads sets
6384 * the condition codes from the high 4 bits of the value
6385 */
6386 gen_set_nzcv(tmp);
6387 tcg_temp_free_i32(tmp);
6388 } else {
6389 store_reg(s, rt, tmp);
6390 }
6391 }
6392 } else {
6393 /* Write */
6394 if (ri->type & ARM_CP_CONST) {
6395 /* If not forbidden by access permissions, treat as WI */
6396 return 0;
6397 }
6398
6399 if (is64) {
6400 TCGv tmplo, tmphi;
6401 TCGv_i64 tmp64 = tcg_temp_new_i64();
6402 tmplo = load_reg(s, rt);
6403 tmphi = load_reg(s, rt2);
6404 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6405 tcg_temp_free_i32(tmplo);
6406 tcg_temp_free_i32(tmphi);
6407 if (ri->writefn) {
6408 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6409 gen_set_pc_im(s->pc);
6410 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6411 tcg_temp_free_ptr(tmpptr);
6412 } else {
6413 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6414 }
6415 tcg_temp_free_i64(tmp64);
6416 } else {
6417 if (ri->writefn) {
6418 TCGv tmp;
6419 TCGv_ptr tmpptr;
6420 gen_set_pc_im(s->pc);
6421 tmp = load_reg(s, rt);
6422 tmpptr = tcg_const_ptr(ri);
6423 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6424 tcg_temp_free_ptr(tmpptr);
6425 tcg_temp_free_i32(tmp);
6426 } else {
6427 TCGv tmp = load_reg(s, rt);
6428 store_cpu_offset(tmp, ri->fieldoffset);
6429 }
6430 }
6431 /* We default to ending the TB on a coprocessor register write,
6432 * but allow this to be suppressed by the register definition
6433 * (usually only necessary to work around guest bugs).
6434 */
6435 if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6436 gen_lookup_tb(s);
6437 }
6438 }
6439 return 0;
6440 }
6441
4a9a539f 6442 return 1;
9ee6e8bb
PB
6443}
6444
5e3f878a
PB
6445
6446/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6447static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
6448{
6449 TCGv tmp;
7d1b0095 6450 tmp = tcg_temp_new_i32();
5e3f878a
PB
6451 tcg_gen_trunc_i64_i32(tmp, val);
6452 store_reg(s, rlow, tmp);
7d1b0095 6453 tmp = tcg_temp_new_i32();
5e3f878a
PB
6454 tcg_gen_shri_i64(val, val, 32);
6455 tcg_gen_trunc_i64_i32(tmp, val);
6456 store_reg(s, rhigh, tmp);
6457}
6458
6459/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6460static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6461{
a7812ae4 6462 TCGv_i64 tmp;
5e3f878a
PB
6463 TCGv tmp2;
6464
36aa55dc 6465 /* Load value and extend to 64 bits. */
a7812ae4 6466 tmp = tcg_temp_new_i64();
5e3f878a
PB
6467 tmp2 = load_reg(s, rlow);
6468 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6469 tcg_temp_free_i32(tmp2);
5e3f878a 6470 tcg_gen_add_i64(val, val, tmp);
b75263d6 6471 tcg_temp_free_i64(tmp);
5e3f878a
PB
6472}
6473
6474/* load and add a 64-bit value from a register pair. */
a7812ae4 6475static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6476{
a7812ae4 6477 TCGv_i64 tmp;
36aa55dc
PB
6478 TCGv tmpl;
6479 TCGv tmph;
5e3f878a
PB
6480
6481 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6482 tmpl = load_reg(s, rlow);
6483 tmph = load_reg(s, rhigh);
a7812ae4 6484 tmp = tcg_temp_new_i64();
36aa55dc 6485 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6486 tcg_temp_free_i32(tmpl);
6487 tcg_temp_free_i32(tmph);
5e3f878a 6488 tcg_gen_add_i64(val, val, tmp);
b75263d6 6489 tcg_temp_free_i64(tmp);
5e3f878a
PB
6490}
6491
c9f10124
RH
6492/* Set N and Z flags from hi|lo. */
6493static void gen_logicq_cc(TCGv lo, TCGv hi)
5e3f878a 6494{
c9f10124
RH
6495 tcg_gen_mov_i32(cpu_NF, hi);
6496 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6497}
6498
426f5abc
PB
6499/* Load/Store exclusive instructions are implemented by remembering
6500 the value/address loaded, and seeing if these are the same
b90372ad 6501 when the store is performed. This should be sufficient to implement
426f5abc
PB
6502 the architecturally mandated semantics, and avoids having to monitor
6503 regular stores.
6504
6505 In system emulation mode only one CPU will be running at once, so
6506 this sequence is effectively atomic. In user emulation mode we
6507 throw an exception and handle the atomic operation elsewhere. */
6508static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6509 TCGv addr, int size)
6510{
6511 TCGv tmp;
6512
6513 switch (size) {
6514 case 0:
6515 tmp = gen_ld8u(addr, IS_USER(s));
6516 break;
6517 case 1:
6518 tmp = gen_ld16u(addr, IS_USER(s));
6519 break;
6520 case 2:
6521 case 3:
6522 tmp = gen_ld32(addr, IS_USER(s));
6523 break;
6524 default:
6525 abort();
6526 }
6527 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6528 store_reg(s, rt, tmp);
6529 if (size == 3) {
7d1b0095 6530 TCGv tmp2 = tcg_temp_new_i32();
2c9adbda
PM
6531 tcg_gen_addi_i32(tmp2, addr, 4);
6532 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6533 tcg_temp_free_i32(tmp2);
426f5abc
PB
6534 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6535 store_reg(s, rt2, tmp);
6536 }
6537 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6538}
6539
6540static void gen_clrex(DisasContext *s)
6541{
6542 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6543}
6544
6545#ifdef CONFIG_USER_ONLY
6546static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6547 TCGv addr, int size)
6548{
6549 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6550 tcg_gen_movi_i32(cpu_exclusive_info,
6551 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6552 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6553}
6554#else
6555static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6556 TCGv addr, int size)
6557{
6558 TCGv tmp;
6559 int done_label;
6560 int fail_label;
6561
6562 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6563 [addr] = {Rt};
6564 {Rd} = 0;
6565 } else {
6566 {Rd} = 1;
6567 } */
6568 fail_label = gen_new_label();
6569 done_label = gen_new_label();
6570 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6571 switch (size) {
6572 case 0:
6573 tmp = gen_ld8u(addr, IS_USER(s));
6574 break;
6575 case 1:
6576 tmp = gen_ld16u(addr, IS_USER(s));
6577 break;
6578 case 2:
6579 case 3:
6580 tmp = gen_ld32(addr, IS_USER(s));
6581 break;
6582 default:
6583 abort();
6584 }
6585 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6586 tcg_temp_free_i32(tmp);
426f5abc 6587 if (size == 3) {
7d1b0095 6588 TCGv tmp2 = tcg_temp_new_i32();
426f5abc 6589 tcg_gen_addi_i32(tmp2, addr, 4);
2c9adbda 6590 tmp = gen_ld32(tmp2, IS_USER(s));
7d1b0095 6591 tcg_temp_free_i32(tmp2);
426f5abc 6592 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6593 tcg_temp_free_i32(tmp);
426f5abc
PB
6594 }
6595 tmp = load_reg(s, rt);
6596 switch (size) {
6597 case 0:
6598 gen_st8(tmp, addr, IS_USER(s));
6599 break;
6600 case 1:
6601 gen_st16(tmp, addr, IS_USER(s));
6602 break;
6603 case 2:
6604 case 3:
6605 gen_st32(tmp, addr, IS_USER(s));
6606 break;
6607 default:
6608 abort();
6609 }
6610 if (size == 3) {
6611 tcg_gen_addi_i32(addr, addr, 4);
6612 tmp = load_reg(s, rt2);
6613 gen_st32(tmp, addr, IS_USER(s));
6614 }
6615 tcg_gen_movi_i32(cpu_R[rd], 0);
6616 tcg_gen_br(done_label);
6617 gen_set_label(fail_label);
6618 tcg_gen_movi_i32(cpu_R[rd], 1);
6619 gen_set_label(done_label);
6620 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6621}
6622#endif
6623
0ecb72a5 6624static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6625{
6626 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 6627 TCGv tmp;
3670669c 6628 TCGv tmp2;
6ddbc6e4 6629 TCGv tmp3;
b0109805 6630 TCGv addr;
a7812ae4 6631 TCGv_i64 tmp64;
9ee6e8bb 6632
d31dd73e 6633 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6634 s->pc += 4;
6635
6636 /* M variants do not implement ARM mode. */
6637 if (IS_M(env))
6638 goto illegal_op;
6639 cond = insn >> 28;
6640 if (cond == 0xf){
be5e7a76
DES
6641 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6642 * choose to UNDEF. In ARMv5 and above the space is used
6643 * for miscellaneous unconditional instructions.
6644 */
6645 ARCH(5);
6646
9ee6e8bb
PB
6647 /* Unconditional instructions. */
6648 if (((insn >> 25) & 7) == 1) {
6649 /* NEON Data processing. */
6650 if (!arm_feature(env, ARM_FEATURE_NEON))
6651 goto illegal_op;
6652
6653 if (disas_neon_data_insn(env, s, insn))
6654 goto illegal_op;
6655 return;
6656 }
6657 if ((insn & 0x0f100000) == 0x04000000) {
6658 /* NEON load/store. */
6659 if (!arm_feature(env, ARM_FEATURE_NEON))
6660 goto illegal_op;
6661
6662 if (disas_neon_ls_insn(env, s, insn))
6663 goto illegal_op;
6664 return;
6665 }
3d185e5d
PM
6666 if (((insn & 0x0f30f000) == 0x0510f000) ||
6667 ((insn & 0x0f30f010) == 0x0710f000)) {
6668 if ((insn & (1 << 22)) == 0) {
6669 /* PLDW; v7MP */
6670 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6671 goto illegal_op;
6672 }
6673 }
6674 /* Otherwise PLD; v5TE+ */
be5e7a76 6675 ARCH(5TE);
3d185e5d
PM
6676 return;
6677 }
6678 if (((insn & 0x0f70f000) == 0x0450f000) ||
6679 ((insn & 0x0f70f010) == 0x0650f000)) {
6680 ARCH(7);
6681 return; /* PLI; V7 */
6682 }
6683 if (((insn & 0x0f700000) == 0x04100000) ||
6684 ((insn & 0x0f700010) == 0x06100000)) {
6685 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6686 goto illegal_op;
6687 }
6688 return; /* v7MP: Unallocated memory hint: must NOP */
6689 }
6690
6691 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6692 ARCH(6);
6693 /* setend */
10962fd5
PM
6694 if (((insn >> 9) & 1) != s->bswap_code) {
6695 /* Dynamic endianness switching not implemented. */
9ee6e8bb
PB
6696 goto illegal_op;
6697 }
6698 return;
6699 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6700 switch ((insn >> 4) & 0xf) {
6701 case 1: /* clrex */
6702 ARCH(6K);
426f5abc 6703 gen_clrex(s);
9ee6e8bb
PB
6704 return;
6705 case 4: /* dsb */
6706 case 5: /* dmb */
6707 case 6: /* isb */
6708 ARCH(7);
6709 /* We don't emulate caches so these are a no-op. */
6710 return;
6711 default:
6712 goto illegal_op;
6713 }
6714 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6715 /* srs */
c67b6b71 6716 int32_t offset;
9ee6e8bb
PB
6717 if (IS_USER(s))
6718 goto illegal_op;
6719 ARCH(6);
6720 op1 = (insn & 0x1f);
7d1b0095 6721 addr = tcg_temp_new_i32();
39ea3d4e
PM
6722 tmp = tcg_const_i32(op1);
6723 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6724 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6725 i = (insn >> 23) & 3;
6726 switch (i) {
6727 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6728 case 1: offset = 0; break; /* IA */
6729 case 2: offset = -8; break; /* DB */
9ee6e8bb
PB
6730 case 3: offset = 4; break; /* IB */
6731 default: abort();
6732 }
6733 if (offset)
b0109805
PB
6734 tcg_gen_addi_i32(addr, addr, offset);
6735 tmp = load_reg(s, 14);
6736 gen_st32(tmp, addr, 0);
c67b6b71 6737 tmp = load_cpu_field(spsr);
b0109805
PB
6738 tcg_gen_addi_i32(addr, addr, 4);
6739 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6740 if (insn & (1 << 21)) {
6741 /* Base writeback. */
6742 switch (i) {
6743 case 0: offset = -8; break;
c67b6b71
FN
6744 case 1: offset = 4; break;
6745 case 2: offset = -4; break;
9ee6e8bb
PB
6746 case 3: offset = 0; break;
6747 default: abort();
6748 }
6749 if (offset)
c67b6b71 6750 tcg_gen_addi_i32(addr, addr, offset);
39ea3d4e
PM
6751 tmp = tcg_const_i32(op1);
6752 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6753 tcg_temp_free_i32(tmp);
7d1b0095 6754 tcg_temp_free_i32(addr);
b0109805 6755 } else {
7d1b0095 6756 tcg_temp_free_i32(addr);
9ee6e8bb 6757 }
a990f58f 6758 return;
ea825eee 6759 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6760 /* rfe */
c67b6b71 6761 int32_t offset;
9ee6e8bb
PB
6762 if (IS_USER(s))
6763 goto illegal_op;
6764 ARCH(6);
6765 rn = (insn >> 16) & 0xf;
b0109805 6766 addr = load_reg(s, rn);
9ee6e8bb
PB
6767 i = (insn >> 23) & 3;
6768 switch (i) {
b0109805 6769 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6770 case 1: offset = 0; break; /* IA */
6771 case 2: offset = -8; break; /* DB */
b0109805 6772 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6773 default: abort();
6774 }
6775 if (offset)
b0109805
PB
6776 tcg_gen_addi_i32(addr, addr, offset);
6777 /* Load PC into tmp and CPSR into tmp2. */
6778 tmp = gen_ld32(addr, 0);
6779 tcg_gen_addi_i32(addr, addr, 4);
6780 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6781 if (insn & (1 << 21)) {
6782 /* Base writeback. */
6783 switch (i) {
b0109805 6784 case 0: offset = -8; break;
c67b6b71
FN
6785 case 1: offset = 4; break;
6786 case 2: offset = -4; break;
b0109805 6787 case 3: offset = 0; break;
9ee6e8bb
PB
6788 default: abort();
6789 }
6790 if (offset)
b0109805
PB
6791 tcg_gen_addi_i32(addr, addr, offset);
6792 store_reg(s, rn, addr);
6793 } else {
7d1b0095 6794 tcg_temp_free_i32(addr);
9ee6e8bb 6795 }
b0109805 6796 gen_rfe(s, tmp, tmp2);
c67b6b71 6797 return;
9ee6e8bb
PB
6798 } else if ((insn & 0x0e000000) == 0x0a000000) {
6799 /* branch link and change to thumb (blx <offset>) */
6800 int32_t offset;
6801
6802 val = (uint32_t)s->pc;
7d1b0095 6803 tmp = tcg_temp_new_i32();
d9ba4830
PB
6804 tcg_gen_movi_i32(tmp, val);
6805 store_reg(s, 14, tmp);
9ee6e8bb
PB
6806 /* Sign-extend the 24-bit offset */
6807 offset = (((int32_t)insn) << 8) >> 8;
6808 /* offset * 4 + bit24 * 2 + (thumb bit) */
6809 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6810 /* pipeline offset */
6811 val += 4;
be5e7a76 6812 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6813 gen_bx_im(s, val);
9ee6e8bb
PB
6814 return;
6815 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6816 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6817 /* iWMMXt register transfer. */
6818 if (env->cp15.c15_cpar & (1 << 1))
6819 if (!disas_iwmmxt_insn(env, s, insn))
6820 return;
6821 }
6822 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6823 /* Coprocessor double register transfer. */
be5e7a76 6824 ARCH(5TE);
9ee6e8bb
PB
6825 } else if ((insn & 0x0f000010) == 0x0e000010) {
6826 /* Additional coprocessor register transfer. */
7997d92f 6827 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6828 uint32_t mask;
6829 uint32_t val;
6830 /* cps (privileged) */
6831 if (IS_USER(s))
6832 return;
6833 mask = val = 0;
6834 if (insn & (1 << 19)) {
6835 if (insn & (1 << 8))
6836 mask |= CPSR_A;
6837 if (insn & (1 << 7))
6838 mask |= CPSR_I;
6839 if (insn & (1 << 6))
6840 mask |= CPSR_F;
6841 if (insn & (1 << 18))
6842 val |= mask;
6843 }
7997d92f 6844 if (insn & (1 << 17)) {
9ee6e8bb
PB
6845 mask |= CPSR_M;
6846 val |= (insn & 0x1f);
6847 }
6848 if (mask) {
2fbac54b 6849 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6850 }
6851 return;
6852 }
6853 goto illegal_op;
6854 }
6855 if (cond != 0xe) {
6856 /* if not always execute, we generate a conditional jump to
6857 next instruction */
6858 s->condlabel = gen_new_label();
d9ba4830 6859 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6860 s->condjmp = 1;
6861 }
6862 if ((insn & 0x0f900000) == 0x03000000) {
6863 if ((insn & (1 << 21)) == 0) {
6864 ARCH(6T2);
6865 rd = (insn >> 12) & 0xf;
6866 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6867 if ((insn & (1 << 22)) == 0) {
6868 /* MOVW */
7d1b0095 6869 tmp = tcg_temp_new_i32();
5e3f878a 6870 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6871 } else {
6872 /* MOVT */
5e3f878a 6873 tmp = load_reg(s, rd);
86831435 6874 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6875 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6876 }
5e3f878a 6877 store_reg(s, rd, tmp);
9ee6e8bb
PB
6878 } else {
6879 if (((insn >> 12) & 0xf) != 0xf)
6880 goto illegal_op;
6881 if (((insn >> 16) & 0xf) == 0) {
6882 gen_nop_hint(s, insn & 0xff);
6883 } else {
6884 /* CPSR = immediate */
6885 val = insn & 0xff;
6886 shift = ((insn >> 8) & 0xf) * 2;
6887 if (shift)
6888 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6889 i = ((insn & (1 << 22)) != 0);
2fbac54b 6890 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6891 goto illegal_op;
6892 }
6893 }
6894 } else if ((insn & 0x0f900000) == 0x01000000
6895 && (insn & 0x00000090) != 0x00000090) {
6896 /* miscellaneous instructions */
6897 op1 = (insn >> 21) & 3;
6898 sh = (insn >> 4) & 0xf;
6899 rm = insn & 0xf;
6900 switch (sh) {
6901 case 0x0: /* move program status register */
6902 if (op1 & 1) {
6903 /* PSR = reg */
2fbac54b 6904 tmp = load_reg(s, rm);
9ee6e8bb 6905 i = ((op1 & 2) != 0);
2fbac54b 6906 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6907 goto illegal_op;
6908 } else {
6909 /* reg = PSR */
6910 rd = (insn >> 12) & 0xf;
6911 if (op1 & 2) {
6912 if (IS_USER(s))
6913 goto illegal_op;
d9ba4830 6914 tmp = load_cpu_field(spsr);
9ee6e8bb 6915 } else {
7d1b0095 6916 tmp = tcg_temp_new_i32();
9ef39277 6917 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6918 }
d9ba4830 6919 store_reg(s, rd, tmp);
9ee6e8bb
PB
6920 }
6921 break;
6922 case 0x1:
6923 if (op1 == 1) {
6924 /* branch/exchange thumb (bx). */
be5e7a76 6925 ARCH(4T);
d9ba4830
PB
6926 tmp = load_reg(s, rm);
6927 gen_bx(s, tmp);
9ee6e8bb
PB
6928 } else if (op1 == 3) {
6929 /* clz */
be5e7a76 6930 ARCH(5);
9ee6e8bb 6931 rd = (insn >> 12) & 0xf;
1497c961
PB
6932 tmp = load_reg(s, rm);
6933 gen_helper_clz(tmp, tmp);
6934 store_reg(s, rd, tmp);
9ee6e8bb
PB
6935 } else {
6936 goto illegal_op;
6937 }
6938 break;
6939 case 0x2:
6940 if (op1 == 1) {
6941 ARCH(5J); /* bxj */
6942 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6943 tmp = load_reg(s, rm);
6944 gen_bx(s, tmp);
9ee6e8bb
PB
6945 } else {
6946 goto illegal_op;
6947 }
6948 break;
6949 case 0x3:
6950 if (op1 != 1)
6951 goto illegal_op;
6952
be5e7a76 6953 ARCH(5);
9ee6e8bb 6954 /* branch link/exchange thumb (blx) */
d9ba4830 6955 tmp = load_reg(s, rm);
7d1b0095 6956 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
6957 tcg_gen_movi_i32(tmp2, s->pc);
6958 store_reg(s, 14, tmp2);
6959 gen_bx(s, tmp);
9ee6e8bb
PB
6960 break;
6961 case 0x5: /* saturating add/subtract */
be5e7a76 6962 ARCH(5TE);
9ee6e8bb
PB
6963 rd = (insn >> 12) & 0xf;
6964 rn = (insn >> 16) & 0xf;
b40d0353 6965 tmp = load_reg(s, rm);
5e3f878a 6966 tmp2 = load_reg(s, rn);
9ee6e8bb 6967 if (op1 & 2)
9ef39277 6968 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 6969 if (op1 & 1)
9ef39277 6970 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6971 else
9ef39277 6972 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 6973 tcg_temp_free_i32(tmp2);
5e3f878a 6974 store_reg(s, rd, tmp);
9ee6e8bb 6975 break;
49e14940
AL
6976 case 7:
6977 /* SMC instruction (op1 == 3)
6978 and undefined instructions (op1 == 0 || op1 == 2)
6979 will trap */
6980 if (op1 != 1) {
6981 goto illegal_op;
6982 }
6983 /* bkpt */
be5e7a76 6984 ARCH(5);
bc4a0de0 6985 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
6986 break;
6987 case 0x8: /* signed multiply */
6988 case 0xa:
6989 case 0xc:
6990 case 0xe:
be5e7a76 6991 ARCH(5TE);
9ee6e8bb
PB
6992 rs = (insn >> 8) & 0xf;
6993 rn = (insn >> 12) & 0xf;
6994 rd = (insn >> 16) & 0xf;
6995 if (op1 == 1) {
6996 /* (32 * 16) >> 16 */
5e3f878a
PB
6997 tmp = load_reg(s, rm);
6998 tmp2 = load_reg(s, rs);
9ee6e8bb 6999 if (sh & 4)
5e3f878a 7000 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7001 else
5e3f878a 7002 gen_sxth(tmp2);
a7812ae4
PB
7003 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7004 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7005 tmp = tcg_temp_new_i32();
a7812ae4 7006 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7007 tcg_temp_free_i64(tmp64);
9ee6e8bb 7008 if ((sh & 2) == 0) {
5e3f878a 7009 tmp2 = load_reg(s, rn);
9ef39277 7010 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7011 tcg_temp_free_i32(tmp2);
9ee6e8bb 7012 }
5e3f878a 7013 store_reg(s, rd, tmp);
9ee6e8bb
PB
7014 } else {
7015 /* 16 * 16 */
5e3f878a
PB
7016 tmp = load_reg(s, rm);
7017 tmp2 = load_reg(s, rs);
7018 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7019 tcg_temp_free_i32(tmp2);
9ee6e8bb 7020 if (op1 == 2) {
a7812ae4
PB
7021 tmp64 = tcg_temp_new_i64();
7022 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7023 tcg_temp_free_i32(tmp);
a7812ae4
PB
7024 gen_addq(s, tmp64, rn, rd);
7025 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7026 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7027 } else {
7028 if (op1 == 0) {
5e3f878a 7029 tmp2 = load_reg(s, rn);
9ef39277 7030 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7031 tcg_temp_free_i32(tmp2);
9ee6e8bb 7032 }
5e3f878a 7033 store_reg(s, rd, tmp);
9ee6e8bb
PB
7034 }
7035 }
7036 break;
7037 default:
7038 goto illegal_op;
7039 }
7040 } else if (((insn & 0x0e000000) == 0 &&
7041 (insn & 0x00000090) != 0x90) ||
7042 ((insn & 0x0e000000) == (1 << 25))) {
7043 int set_cc, logic_cc, shiftop;
7044
7045 op1 = (insn >> 21) & 0xf;
7046 set_cc = (insn >> 20) & 1;
7047 logic_cc = table_logic_cc[op1] & set_cc;
7048
7049 /* data processing instruction */
7050 if (insn & (1 << 25)) {
7051 /* immediate operand */
7052 val = insn & 0xff;
7053 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7054 if (shift) {
9ee6e8bb 7055 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7056 }
7d1b0095 7057 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7058 tcg_gen_movi_i32(tmp2, val);
7059 if (logic_cc && shift) {
7060 gen_set_CF_bit31(tmp2);
7061 }
9ee6e8bb
PB
7062 } else {
7063 /* register */
7064 rm = (insn) & 0xf;
e9bb4aa9 7065 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7066 shiftop = (insn >> 5) & 3;
7067 if (!(insn & (1 << 4))) {
7068 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7069 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7070 } else {
7071 rs = (insn >> 8) & 0xf;
8984bd2e 7072 tmp = load_reg(s, rs);
e9bb4aa9 7073 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7074 }
7075 }
7076 if (op1 != 0x0f && op1 != 0x0d) {
7077 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7078 tmp = load_reg(s, rn);
7079 } else {
7080 TCGV_UNUSED(tmp);
9ee6e8bb
PB
7081 }
7082 rd = (insn >> 12) & 0xf;
7083 switch(op1) {
7084 case 0x00:
e9bb4aa9
JR
7085 tcg_gen_and_i32(tmp, tmp, tmp2);
7086 if (logic_cc) {
7087 gen_logic_CC(tmp);
7088 }
21aeb343 7089 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7090 break;
7091 case 0x01:
e9bb4aa9
JR
7092 tcg_gen_xor_i32(tmp, tmp, tmp2);
7093 if (logic_cc) {
7094 gen_logic_CC(tmp);
7095 }
21aeb343 7096 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7097 break;
7098 case 0x02:
7099 if (set_cc && rd == 15) {
7100 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7101 if (IS_USER(s)) {
9ee6e8bb 7102 goto illegal_op;
e9bb4aa9 7103 }
72485ec4 7104 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7105 gen_exception_return(s, tmp);
9ee6e8bb 7106 } else {
e9bb4aa9 7107 if (set_cc) {
72485ec4 7108 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7109 } else {
7110 tcg_gen_sub_i32(tmp, tmp, tmp2);
7111 }
21aeb343 7112 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7113 }
7114 break;
7115 case 0x03:
e9bb4aa9 7116 if (set_cc) {
72485ec4 7117 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7118 } else {
7119 tcg_gen_sub_i32(tmp, tmp2, tmp);
7120 }
21aeb343 7121 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7122 break;
7123 case 0x04:
e9bb4aa9 7124 if (set_cc) {
72485ec4 7125 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7126 } else {
7127 tcg_gen_add_i32(tmp, tmp, tmp2);
7128 }
21aeb343 7129 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7130 break;
7131 case 0x05:
e9bb4aa9 7132 if (set_cc) {
49b4c31e 7133 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7134 } else {
7135 gen_add_carry(tmp, tmp, tmp2);
7136 }
21aeb343 7137 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7138 break;
7139 case 0x06:
e9bb4aa9 7140 if (set_cc) {
2de68a49 7141 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7142 } else {
7143 gen_sub_carry(tmp, tmp, tmp2);
7144 }
21aeb343 7145 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7146 break;
7147 case 0x07:
e9bb4aa9 7148 if (set_cc) {
2de68a49 7149 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7150 } else {
7151 gen_sub_carry(tmp, tmp2, tmp);
7152 }
21aeb343 7153 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7154 break;
7155 case 0x08:
7156 if (set_cc) {
e9bb4aa9
JR
7157 tcg_gen_and_i32(tmp, tmp, tmp2);
7158 gen_logic_CC(tmp);
9ee6e8bb 7159 }
7d1b0095 7160 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7161 break;
7162 case 0x09:
7163 if (set_cc) {
e9bb4aa9
JR
7164 tcg_gen_xor_i32(tmp, tmp, tmp2);
7165 gen_logic_CC(tmp);
9ee6e8bb 7166 }
7d1b0095 7167 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7168 break;
7169 case 0x0a:
7170 if (set_cc) {
72485ec4 7171 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7172 }
7d1b0095 7173 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7174 break;
7175 case 0x0b:
7176 if (set_cc) {
72485ec4 7177 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7178 }
7d1b0095 7179 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7180 break;
7181 case 0x0c:
e9bb4aa9
JR
7182 tcg_gen_or_i32(tmp, tmp, tmp2);
7183 if (logic_cc) {
7184 gen_logic_CC(tmp);
7185 }
21aeb343 7186 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7187 break;
7188 case 0x0d:
7189 if (logic_cc && rd == 15) {
7190 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7191 if (IS_USER(s)) {
9ee6e8bb 7192 goto illegal_op;
e9bb4aa9
JR
7193 }
7194 gen_exception_return(s, tmp2);
9ee6e8bb 7195 } else {
e9bb4aa9
JR
7196 if (logic_cc) {
7197 gen_logic_CC(tmp2);
7198 }
21aeb343 7199 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7200 }
7201 break;
7202 case 0x0e:
f669df27 7203 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7204 if (logic_cc) {
7205 gen_logic_CC(tmp);
7206 }
21aeb343 7207 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7208 break;
7209 default:
7210 case 0x0f:
e9bb4aa9
JR
7211 tcg_gen_not_i32(tmp2, tmp2);
7212 if (logic_cc) {
7213 gen_logic_CC(tmp2);
7214 }
21aeb343 7215 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7216 break;
7217 }
e9bb4aa9 7218 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7219 tcg_temp_free_i32(tmp2);
e9bb4aa9 7220 }
9ee6e8bb
PB
7221 } else {
7222 /* other instructions */
7223 op1 = (insn >> 24) & 0xf;
7224 switch(op1) {
7225 case 0x0:
7226 case 0x1:
7227 /* multiplies, extra load/stores */
7228 sh = (insn >> 5) & 3;
7229 if (sh == 0) {
7230 if (op1 == 0x0) {
7231 rd = (insn >> 16) & 0xf;
7232 rn = (insn >> 12) & 0xf;
7233 rs = (insn >> 8) & 0xf;
7234 rm = (insn) & 0xf;
7235 op1 = (insn >> 20) & 0xf;
7236 switch (op1) {
7237 case 0: case 1: case 2: case 3: case 6:
7238 /* 32 bit mul */
5e3f878a
PB
7239 tmp = load_reg(s, rs);
7240 tmp2 = load_reg(s, rm);
7241 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7242 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7243 if (insn & (1 << 22)) {
7244 /* Subtract (mls) */
7245 ARCH(6T2);
5e3f878a
PB
7246 tmp2 = load_reg(s, rn);
7247 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7248 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7249 } else if (insn & (1 << 21)) {
7250 /* Add */
5e3f878a
PB
7251 tmp2 = load_reg(s, rn);
7252 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7253 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7254 }
7255 if (insn & (1 << 20))
5e3f878a
PB
7256 gen_logic_CC(tmp);
7257 store_reg(s, rd, tmp);
9ee6e8bb 7258 break;
8aac08b1
AJ
7259 case 4:
7260 /* 64 bit mul double accumulate (UMAAL) */
7261 ARCH(6);
7262 tmp = load_reg(s, rs);
7263 tmp2 = load_reg(s, rm);
7264 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7265 gen_addq_lo(s, tmp64, rn);
7266 gen_addq_lo(s, tmp64, rd);
7267 gen_storeq_reg(s, rn, rd, tmp64);
7268 tcg_temp_free_i64(tmp64);
7269 break;
7270 case 8: case 9: case 10: case 11:
7271 case 12: case 13: case 14: case 15:
7272 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7273 tmp = load_reg(s, rs);
7274 tmp2 = load_reg(s, rm);
8aac08b1 7275 if (insn & (1 << 22)) {
c9f10124 7276 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7277 } else {
c9f10124 7278 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7279 }
7280 if (insn & (1 << 21)) { /* mult accumulate */
c9f10124
RH
7281 TCGv al = load_reg(s, rn);
7282 TCGv ah = load_reg(s, rd);
7283 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7284 tcg_temp_free(al);
7285 tcg_temp_free(ah);
9ee6e8bb 7286 }
8aac08b1 7287 if (insn & (1 << 20)) {
c9f10124 7288 gen_logicq_cc(tmp, tmp2);
8aac08b1 7289 }
c9f10124
RH
7290 store_reg(s, rn, tmp);
7291 store_reg(s, rd, tmp2);
9ee6e8bb 7292 break;
8aac08b1
AJ
7293 default:
7294 goto illegal_op;
9ee6e8bb
PB
7295 }
7296 } else {
7297 rn = (insn >> 16) & 0xf;
7298 rd = (insn >> 12) & 0xf;
7299 if (insn & (1 << 23)) {
7300 /* load/store exclusive */
86753403
PB
7301 op1 = (insn >> 21) & 0x3;
7302 if (op1)
a47f43d2 7303 ARCH(6K);
86753403
PB
7304 else
7305 ARCH(6);
3174f8e9 7306 addr = tcg_temp_local_new_i32();
98a46317 7307 load_reg_var(s, addr, rn);
9ee6e8bb 7308 if (insn & (1 << 20)) {
86753403
PB
7309 switch (op1) {
7310 case 0: /* ldrex */
426f5abc 7311 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7312 break;
7313 case 1: /* ldrexd */
426f5abc 7314 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7315 break;
7316 case 2: /* ldrexb */
426f5abc 7317 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7318 break;
7319 case 3: /* ldrexh */
426f5abc 7320 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7321 break;
7322 default:
7323 abort();
7324 }
9ee6e8bb
PB
7325 } else {
7326 rm = insn & 0xf;
86753403
PB
7327 switch (op1) {
7328 case 0: /* strex */
426f5abc 7329 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7330 break;
7331 case 1: /* strexd */
502e64fe 7332 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7333 break;
7334 case 2: /* strexb */
426f5abc 7335 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7336 break;
7337 case 3: /* strexh */
426f5abc 7338 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7339 break;
7340 default:
7341 abort();
7342 }
9ee6e8bb 7343 }
3174f8e9 7344 tcg_temp_free(addr);
9ee6e8bb
PB
7345 } else {
7346 /* SWP instruction */
7347 rm = (insn) & 0xf;
7348
8984bd2e
PB
7349 /* ??? This is not really atomic. However we know
7350 we never have multiple CPUs running in parallel,
7351 so it is good enough. */
7352 addr = load_reg(s, rn);
7353 tmp = load_reg(s, rm);
9ee6e8bb 7354 if (insn & (1 << 22)) {
8984bd2e
PB
7355 tmp2 = gen_ld8u(addr, IS_USER(s));
7356 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7357 } else {
8984bd2e
PB
7358 tmp2 = gen_ld32(addr, IS_USER(s));
7359 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7360 }
7d1b0095 7361 tcg_temp_free_i32(addr);
8984bd2e 7362 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7363 }
7364 }
7365 } else {
7366 int address_offset;
7367 int load;
7368 /* Misc load/store */
7369 rn = (insn >> 16) & 0xf;
7370 rd = (insn >> 12) & 0xf;
b0109805 7371 addr = load_reg(s, rn);
9ee6e8bb 7372 if (insn & (1 << 24))
b0109805 7373 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7374 address_offset = 0;
7375 if (insn & (1 << 20)) {
7376 /* load */
7377 switch(sh) {
7378 case 1:
b0109805 7379 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
7380 break;
7381 case 2:
b0109805 7382 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
7383 break;
7384 default:
7385 case 3:
b0109805 7386 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
7387 break;
7388 }
7389 load = 1;
7390 } else if (sh & 2) {
be5e7a76 7391 ARCH(5TE);
9ee6e8bb
PB
7392 /* doubleword */
7393 if (sh & 1) {
7394 /* store */
b0109805
PB
7395 tmp = load_reg(s, rd);
7396 gen_st32(tmp, addr, IS_USER(s));
7397 tcg_gen_addi_i32(addr, addr, 4);
7398 tmp = load_reg(s, rd + 1);
7399 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7400 load = 0;
7401 } else {
7402 /* load */
b0109805
PB
7403 tmp = gen_ld32(addr, IS_USER(s));
7404 store_reg(s, rd, tmp);
7405 tcg_gen_addi_i32(addr, addr, 4);
7406 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
7407 rd++;
7408 load = 1;
7409 }
7410 address_offset = -4;
7411 } else {
7412 /* store */
b0109805
PB
7413 tmp = load_reg(s, rd);
7414 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7415 load = 0;
7416 }
7417 /* Perform base writeback before the loaded value to
7418 ensure correct behavior with overlapping index registers.
7419 ldrd with base writeback is is undefined if the
7420 destination and index registers overlap. */
7421 if (!(insn & (1 << 24))) {
b0109805
PB
7422 gen_add_datah_offset(s, insn, address_offset, addr);
7423 store_reg(s, rn, addr);
9ee6e8bb
PB
7424 } else if (insn & (1 << 21)) {
7425 if (address_offset)
b0109805
PB
7426 tcg_gen_addi_i32(addr, addr, address_offset);
7427 store_reg(s, rn, addr);
7428 } else {
7d1b0095 7429 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7430 }
7431 if (load) {
7432 /* Complete the load. */
b0109805 7433 store_reg(s, rd, tmp);
9ee6e8bb
PB
7434 }
7435 }
7436 break;
7437 case 0x4:
7438 case 0x5:
7439 goto do_ldst;
7440 case 0x6:
7441 case 0x7:
7442 if (insn & (1 << 4)) {
7443 ARCH(6);
7444 /* Armv6 Media instructions. */
7445 rm = insn & 0xf;
7446 rn = (insn >> 16) & 0xf;
2c0262af 7447 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7448 rs = (insn >> 8) & 0xf;
7449 switch ((insn >> 23) & 3) {
7450 case 0: /* Parallel add/subtract. */
7451 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7452 tmp = load_reg(s, rn);
7453 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7454 sh = (insn >> 5) & 7;
7455 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7456 goto illegal_op;
6ddbc6e4 7457 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7458 tcg_temp_free_i32(tmp2);
6ddbc6e4 7459 store_reg(s, rd, tmp);
9ee6e8bb
PB
7460 break;
7461 case 1:
7462 if ((insn & 0x00700020) == 0) {
6c95676b 7463 /* Halfword pack. */
3670669c
PB
7464 tmp = load_reg(s, rn);
7465 tmp2 = load_reg(s, rm);
9ee6e8bb 7466 shift = (insn >> 7) & 0x1f;
3670669c
PB
7467 if (insn & (1 << 6)) {
7468 /* pkhtb */
22478e79
AZ
7469 if (shift == 0)
7470 shift = 31;
7471 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7472 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7473 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7474 } else {
7475 /* pkhbt */
22478e79
AZ
7476 if (shift)
7477 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7478 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7479 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7480 }
7481 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7482 tcg_temp_free_i32(tmp2);
3670669c 7483 store_reg(s, rd, tmp);
9ee6e8bb
PB
7484 } else if ((insn & 0x00200020) == 0x00200000) {
7485 /* [us]sat */
6ddbc6e4 7486 tmp = load_reg(s, rm);
9ee6e8bb
PB
7487 shift = (insn >> 7) & 0x1f;
7488 if (insn & (1 << 6)) {
7489 if (shift == 0)
7490 shift = 31;
6ddbc6e4 7491 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7492 } else {
6ddbc6e4 7493 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7494 }
7495 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7496 tmp2 = tcg_const_i32(sh);
7497 if (insn & (1 << 22))
9ef39277 7498 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7499 else
9ef39277 7500 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7501 tcg_temp_free_i32(tmp2);
6ddbc6e4 7502 store_reg(s, rd, tmp);
9ee6e8bb
PB
7503 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7504 /* [us]sat16 */
6ddbc6e4 7505 tmp = load_reg(s, rm);
9ee6e8bb 7506 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7507 tmp2 = tcg_const_i32(sh);
7508 if (insn & (1 << 22))
9ef39277 7509 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7510 else
9ef39277 7511 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7512 tcg_temp_free_i32(tmp2);
6ddbc6e4 7513 store_reg(s, rd, tmp);
9ee6e8bb
PB
7514 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7515 /* Select bytes. */
6ddbc6e4
PB
7516 tmp = load_reg(s, rn);
7517 tmp2 = load_reg(s, rm);
7d1b0095 7518 tmp3 = tcg_temp_new_i32();
0ecb72a5 7519 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7520 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7521 tcg_temp_free_i32(tmp3);
7522 tcg_temp_free_i32(tmp2);
6ddbc6e4 7523 store_reg(s, rd, tmp);
9ee6e8bb 7524 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7525 tmp = load_reg(s, rm);
9ee6e8bb 7526 shift = (insn >> 10) & 3;
1301f322 7527 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7528 rotate, a shift is sufficient. */
7529 if (shift != 0)
f669df27 7530 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7531 op1 = (insn >> 20) & 7;
7532 switch (op1) {
5e3f878a
PB
7533 case 0: gen_sxtb16(tmp); break;
7534 case 2: gen_sxtb(tmp); break;
7535 case 3: gen_sxth(tmp); break;
7536 case 4: gen_uxtb16(tmp); break;
7537 case 6: gen_uxtb(tmp); break;
7538 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7539 default: goto illegal_op;
7540 }
7541 if (rn != 15) {
5e3f878a 7542 tmp2 = load_reg(s, rn);
9ee6e8bb 7543 if ((op1 & 3) == 0) {
5e3f878a 7544 gen_add16(tmp, tmp2);
9ee6e8bb 7545 } else {
5e3f878a 7546 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7547 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7548 }
7549 }
6c95676b 7550 store_reg(s, rd, tmp);
9ee6e8bb
PB
7551 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7552 /* rev */
b0109805 7553 tmp = load_reg(s, rm);
9ee6e8bb
PB
7554 if (insn & (1 << 22)) {
7555 if (insn & (1 << 7)) {
b0109805 7556 gen_revsh(tmp);
9ee6e8bb
PB
7557 } else {
7558 ARCH(6T2);
b0109805 7559 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7560 }
7561 } else {
7562 if (insn & (1 << 7))
b0109805 7563 gen_rev16(tmp);
9ee6e8bb 7564 else
66896cb8 7565 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7566 }
b0109805 7567 store_reg(s, rd, tmp);
9ee6e8bb
PB
7568 } else {
7569 goto illegal_op;
7570 }
7571 break;
7572 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7573 switch ((insn >> 20) & 0x7) {
7574 case 5:
7575 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7576 /* op2 not 00x or 11x : UNDEF */
7577 goto illegal_op;
7578 }
838fa72d
AJ
7579 /* Signed multiply most significant [accumulate].
7580 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7581 tmp = load_reg(s, rm);
7582 tmp2 = load_reg(s, rs);
a7812ae4 7583 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7584
955a7dd5 7585 if (rd != 15) {
838fa72d 7586 tmp = load_reg(s, rd);
9ee6e8bb 7587 if (insn & (1 << 6)) {
838fa72d 7588 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7589 } else {
838fa72d 7590 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7591 }
7592 }
838fa72d
AJ
7593 if (insn & (1 << 5)) {
7594 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7595 }
7596 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7597 tmp = tcg_temp_new_i32();
838fa72d
AJ
7598 tcg_gen_trunc_i64_i32(tmp, tmp64);
7599 tcg_temp_free_i64(tmp64);
955a7dd5 7600 store_reg(s, rn, tmp);
41e9564d
PM
7601 break;
7602 case 0:
7603 case 4:
7604 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7605 if (insn & (1 << 7)) {
7606 goto illegal_op;
7607 }
7608 tmp = load_reg(s, rm);
7609 tmp2 = load_reg(s, rs);
9ee6e8bb 7610 if (insn & (1 << 5))
5e3f878a
PB
7611 gen_swap_half(tmp2);
7612 gen_smul_dual(tmp, tmp2);
5e3f878a 7613 if (insn & (1 << 6)) {
e1d177b9 7614 /* This subtraction cannot overflow. */
5e3f878a
PB
7615 tcg_gen_sub_i32(tmp, tmp, tmp2);
7616 } else {
e1d177b9
PM
7617 /* This addition cannot overflow 32 bits;
7618 * however it may overflow considered as a signed
7619 * operation, in which case we must set the Q flag.
7620 */
9ef39277 7621 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7622 }
7d1b0095 7623 tcg_temp_free_i32(tmp2);
9ee6e8bb 7624 if (insn & (1 << 22)) {
5e3f878a 7625 /* smlald, smlsld */
a7812ae4
PB
7626 tmp64 = tcg_temp_new_i64();
7627 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7628 tcg_temp_free_i32(tmp);
a7812ae4
PB
7629 gen_addq(s, tmp64, rd, rn);
7630 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7631 tcg_temp_free_i64(tmp64);
9ee6e8bb 7632 } else {
5e3f878a 7633 /* smuad, smusd, smlad, smlsd */
22478e79 7634 if (rd != 15)
9ee6e8bb 7635 {
22478e79 7636 tmp2 = load_reg(s, rd);
9ef39277 7637 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7638 tcg_temp_free_i32(tmp2);
9ee6e8bb 7639 }
22478e79 7640 store_reg(s, rn, tmp);
9ee6e8bb 7641 }
41e9564d 7642 break;
b8b8ea05
PM
7643 case 1:
7644 case 3:
7645 /* SDIV, UDIV */
7646 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7647 goto illegal_op;
7648 }
7649 if (((insn >> 5) & 7) || (rd != 15)) {
7650 goto illegal_op;
7651 }
7652 tmp = load_reg(s, rm);
7653 tmp2 = load_reg(s, rs);
7654 if (insn & (1 << 21)) {
7655 gen_helper_udiv(tmp, tmp, tmp2);
7656 } else {
7657 gen_helper_sdiv(tmp, tmp, tmp2);
7658 }
7659 tcg_temp_free_i32(tmp2);
7660 store_reg(s, rn, tmp);
7661 break;
41e9564d
PM
7662 default:
7663 goto illegal_op;
9ee6e8bb
PB
7664 }
7665 break;
7666 case 3:
7667 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7668 switch (op1) {
7669 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7670 ARCH(6);
7671 tmp = load_reg(s, rm);
7672 tmp2 = load_reg(s, rs);
7673 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7674 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7675 if (rd != 15) {
7676 tmp2 = load_reg(s, rd);
6ddbc6e4 7677 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7678 tcg_temp_free_i32(tmp2);
9ee6e8bb 7679 }
ded9d295 7680 store_reg(s, rn, tmp);
9ee6e8bb
PB
7681 break;
7682 case 0x20: case 0x24: case 0x28: case 0x2c:
7683 /* Bitfield insert/clear. */
7684 ARCH(6T2);
7685 shift = (insn >> 7) & 0x1f;
7686 i = (insn >> 16) & 0x1f;
7687 i = i + 1 - shift;
7688 if (rm == 15) {
7d1b0095 7689 tmp = tcg_temp_new_i32();
5e3f878a 7690 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7691 } else {
5e3f878a 7692 tmp = load_reg(s, rm);
9ee6e8bb
PB
7693 }
7694 if (i != 32) {
5e3f878a 7695 tmp2 = load_reg(s, rd);
d593c48e 7696 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7697 tcg_temp_free_i32(tmp2);
9ee6e8bb 7698 }
5e3f878a 7699 store_reg(s, rd, tmp);
9ee6e8bb
PB
7700 break;
7701 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7702 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7703 ARCH(6T2);
5e3f878a 7704 tmp = load_reg(s, rm);
9ee6e8bb
PB
7705 shift = (insn >> 7) & 0x1f;
7706 i = ((insn >> 16) & 0x1f) + 1;
7707 if (shift + i > 32)
7708 goto illegal_op;
7709 if (i < 32) {
7710 if (op1 & 0x20) {
5e3f878a 7711 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7712 } else {
5e3f878a 7713 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7714 }
7715 }
5e3f878a 7716 store_reg(s, rd, tmp);
9ee6e8bb
PB
7717 break;
7718 default:
7719 goto illegal_op;
7720 }
7721 break;
7722 }
7723 break;
7724 }
7725 do_ldst:
7726 /* Check for undefined extension instructions
7727 * per the ARM Bible IE:
7728 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7729 */
7730 sh = (0xf << 20) | (0xf << 4);
7731 if (op1 == 0x7 && ((insn & sh) == sh))
7732 {
7733 goto illegal_op;
7734 }
7735 /* load/store byte/word */
7736 rn = (insn >> 16) & 0xf;
7737 rd = (insn >> 12) & 0xf;
b0109805 7738 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7739 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7740 if (insn & (1 << 24))
b0109805 7741 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7742 if (insn & (1 << 20)) {
7743 /* load */
9ee6e8bb 7744 if (insn & (1 << 22)) {
b0109805 7745 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 7746 } else {
b0109805 7747 tmp = gen_ld32(tmp2, i);
9ee6e8bb 7748 }
9ee6e8bb
PB
7749 } else {
7750 /* store */
b0109805 7751 tmp = load_reg(s, rd);
9ee6e8bb 7752 if (insn & (1 << 22))
b0109805 7753 gen_st8(tmp, tmp2, i);
9ee6e8bb 7754 else
b0109805 7755 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
7756 }
7757 if (!(insn & (1 << 24))) {
b0109805
PB
7758 gen_add_data_offset(s, insn, tmp2);
7759 store_reg(s, rn, tmp2);
7760 } else if (insn & (1 << 21)) {
7761 store_reg(s, rn, tmp2);
7762 } else {
7d1b0095 7763 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7764 }
7765 if (insn & (1 << 20)) {
7766 /* Complete the load. */
be5e7a76 7767 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7768 }
7769 break;
7770 case 0x08:
7771 case 0x09:
7772 {
7773 int j, n, user, loaded_base;
b0109805 7774 TCGv loaded_var;
9ee6e8bb
PB
7775 /* load/store multiple words */
7776 /* XXX: store correct base if write back */
7777 user = 0;
7778 if (insn & (1 << 22)) {
7779 if (IS_USER(s))
7780 goto illegal_op; /* only usable in supervisor mode */
7781
7782 if ((insn & (1 << 15)) == 0)
7783 user = 1;
7784 }
7785 rn = (insn >> 16) & 0xf;
b0109805 7786 addr = load_reg(s, rn);
9ee6e8bb
PB
7787
7788 /* compute total size */
7789 loaded_base = 0;
a50f5b91 7790 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
7791 n = 0;
7792 for(i=0;i<16;i++) {
7793 if (insn & (1 << i))
7794 n++;
7795 }
7796 /* XXX: test invalid n == 0 case ? */
7797 if (insn & (1 << 23)) {
7798 if (insn & (1 << 24)) {
7799 /* pre increment */
b0109805 7800 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7801 } else {
7802 /* post increment */
7803 }
7804 } else {
7805 if (insn & (1 << 24)) {
7806 /* pre decrement */
b0109805 7807 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7808 } else {
7809 /* post decrement */
7810 if (n != 1)
b0109805 7811 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7812 }
7813 }
7814 j = 0;
7815 for(i=0;i<16;i++) {
7816 if (insn & (1 << i)) {
7817 if (insn & (1 << 20)) {
7818 /* load */
b0109805 7819 tmp = gen_ld32(addr, IS_USER(s));
be5e7a76 7820 if (user) {
b75263d6 7821 tmp2 = tcg_const_i32(i);
1ce94f81 7822 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7823 tcg_temp_free_i32(tmp2);
7d1b0095 7824 tcg_temp_free_i32(tmp);
9ee6e8bb 7825 } else if (i == rn) {
b0109805 7826 loaded_var = tmp;
9ee6e8bb
PB
7827 loaded_base = 1;
7828 } else {
be5e7a76 7829 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7830 }
7831 } else {
7832 /* store */
7833 if (i == 15) {
7834 /* special case: r15 = PC + 8 */
7835 val = (long)s->pc + 4;
7d1b0095 7836 tmp = tcg_temp_new_i32();
b0109805 7837 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7838 } else if (user) {
7d1b0095 7839 tmp = tcg_temp_new_i32();
b75263d6 7840 tmp2 = tcg_const_i32(i);
9ef39277 7841 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7842 tcg_temp_free_i32(tmp2);
9ee6e8bb 7843 } else {
b0109805 7844 tmp = load_reg(s, i);
9ee6e8bb 7845 }
b0109805 7846 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7847 }
7848 j++;
7849 /* no need to add after the last transfer */
7850 if (j != n)
b0109805 7851 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7852 }
7853 }
7854 if (insn & (1 << 21)) {
7855 /* write back */
7856 if (insn & (1 << 23)) {
7857 if (insn & (1 << 24)) {
7858 /* pre increment */
7859 } else {
7860 /* post increment */
b0109805 7861 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7862 }
7863 } else {
7864 if (insn & (1 << 24)) {
7865 /* pre decrement */
7866 if (n != 1)
b0109805 7867 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7868 } else {
7869 /* post decrement */
b0109805 7870 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7871 }
7872 }
b0109805
PB
7873 store_reg(s, rn, addr);
7874 } else {
7d1b0095 7875 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7876 }
7877 if (loaded_base) {
b0109805 7878 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
7879 }
7880 if ((insn & (1 << 22)) && !user) {
7881 /* Restore CPSR from SPSR. */
d9ba4830
PB
7882 tmp = load_cpu_field(spsr);
7883 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 7884 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7885 s->is_jmp = DISAS_UPDATE;
7886 }
7887 }
7888 break;
7889 case 0xa:
7890 case 0xb:
7891 {
7892 int32_t offset;
7893
7894 /* branch (and link) */
7895 val = (int32_t)s->pc;
7896 if (insn & (1 << 24)) {
7d1b0095 7897 tmp = tcg_temp_new_i32();
5e3f878a
PB
7898 tcg_gen_movi_i32(tmp, val);
7899 store_reg(s, 14, tmp);
9ee6e8bb
PB
7900 }
7901 offset = (((int32_t)insn << 8) >> 8);
7902 val += (offset << 2) + 4;
7903 gen_jmp(s, val);
7904 }
7905 break;
7906 case 0xc:
7907 case 0xd:
7908 case 0xe:
7909 /* Coprocessor. */
7910 if (disas_coproc_insn(env, s, insn))
7911 goto illegal_op;
7912 break;
7913 case 0xf:
7914 /* swi */
5e3f878a 7915 gen_set_pc_im(s->pc);
9ee6e8bb
PB
7916 s->is_jmp = DISAS_SWI;
7917 break;
7918 default:
7919 illegal_op:
bc4a0de0 7920 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
7921 break;
7922 }
7923 }
7924}
7925
7926/* Return true if this is a Thumb-2 logical op. */
7927static int
7928thumb2_logic_op(int op)
7929{
7930 return (op < 8);
7931}
7932
7933/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7934 then set condition code flags based on the result of the operation.
7935 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7936 to the high bit of T1.
7937 Returns zero if the opcode is valid. */
7938
7939static int
396e467c 7940gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
9ee6e8bb
PB
7941{
7942 int logic_cc;
7943
7944 logic_cc = 0;
7945 switch (op) {
7946 case 0: /* and */
396e467c 7947 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
7948 logic_cc = conds;
7949 break;
7950 case 1: /* bic */
f669df27 7951 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
7952 logic_cc = conds;
7953 break;
7954 case 2: /* orr */
396e467c 7955 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
7956 logic_cc = conds;
7957 break;
7958 case 3: /* orn */
29501f1b 7959 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
7960 logic_cc = conds;
7961 break;
7962 case 4: /* eor */
396e467c 7963 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
7964 logic_cc = conds;
7965 break;
7966 case 8: /* add */
7967 if (conds)
72485ec4 7968 gen_add_CC(t0, t0, t1);
9ee6e8bb 7969 else
396e467c 7970 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
7971 break;
7972 case 10: /* adc */
7973 if (conds)
49b4c31e 7974 gen_adc_CC(t0, t0, t1);
9ee6e8bb 7975 else
396e467c 7976 gen_adc(t0, t1);
9ee6e8bb
PB
7977 break;
7978 case 11: /* sbc */
2de68a49
RH
7979 if (conds) {
7980 gen_sbc_CC(t0, t0, t1);
7981 } else {
396e467c 7982 gen_sub_carry(t0, t0, t1);
2de68a49 7983 }
9ee6e8bb
PB
7984 break;
7985 case 13: /* sub */
7986 if (conds)
72485ec4 7987 gen_sub_CC(t0, t0, t1);
9ee6e8bb 7988 else
396e467c 7989 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
7990 break;
7991 case 14: /* rsb */
7992 if (conds)
72485ec4 7993 gen_sub_CC(t0, t1, t0);
9ee6e8bb 7994 else
396e467c 7995 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
7996 break;
7997 default: /* 5, 6, 7, 9, 12, 15. */
7998 return 1;
7999 }
8000 if (logic_cc) {
396e467c 8001 gen_logic_CC(t0);
9ee6e8bb 8002 if (shifter_out)
396e467c 8003 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8004 }
8005 return 0;
8006}
8007
8008/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8009 is not legal. */
0ecb72a5 8010static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8011{
b0109805 8012 uint32_t insn, imm, shift, offset;
9ee6e8bb 8013 uint32_t rd, rn, rm, rs;
b26eefb6 8014 TCGv tmp;
6ddbc6e4
PB
8015 TCGv tmp2;
8016 TCGv tmp3;
b0109805 8017 TCGv addr;
a7812ae4 8018 TCGv_i64 tmp64;
9ee6e8bb
PB
8019 int op;
8020 int shiftop;
8021 int conds;
8022 int logic_cc;
8023
8024 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8025 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8026 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8027 16-bit instructions to get correct prefetch abort behavior. */
8028 insn = insn_hw1;
8029 if ((insn & (1 << 12)) == 0) {
be5e7a76 8030 ARCH(5);
9ee6e8bb
PB
8031 /* Second half of blx. */
8032 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8033 tmp = load_reg(s, 14);
8034 tcg_gen_addi_i32(tmp, tmp, offset);
8035 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8036
7d1b0095 8037 tmp2 = tcg_temp_new_i32();
b0109805 8038 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8039 store_reg(s, 14, tmp2);
8040 gen_bx(s, tmp);
9ee6e8bb
PB
8041 return 0;
8042 }
8043 if (insn & (1 << 11)) {
8044 /* Second half of bl. */
8045 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8046 tmp = load_reg(s, 14);
6a0d8a1d 8047 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8048
7d1b0095 8049 tmp2 = tcg_temp_new_i32();
b0109805 8050 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8051 store_reg(s, 14, tmp2);
8052 gen_bx(s, tmp);
9ee6e8bb
PB
8053 return 0;
8054 }
8055 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8056 /* Instruction spans a page boundary. Implement it as two
8057 16-bit instructions in case the second half causes an
8058 prefetch abort. */
8059 offset = ((int32_t)insn << 21) >> 9;
396e467c 8060 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8061 return 0;
8062 }
8063 /* Fall through to 32-bit decode. */
8064 }
8065
d31dd73e 8066 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8067 s->pc += 2;
8068 insn |= (uint32_t)insn_hw1 << 16;
8069
8070 if ((insn & 0xf800e800) != 0xf000e800) {
8071 ARCH(6T2);
8072 }
8073
8074 rn = (insn >> 16) & 0xf;
8075 rs = (insn >> 12) & 0xf;
8076 rd = (insn >> 8) & 0xf;
8077 rm = insn & 0xf;
8078 switch ((insn >> 25) & 0xf) {
8079 case 0: case 1: case 2: case 3:
8080 /* 16-bit instructions. Should never happen. */
8081 abort();
8082 case 4:
8083 if (insn & (1 << 22)) {
8084 /* Other load/store, table branch. */
8085 if (insn & 0x01200000) {
8086 /* Load/store doubleword. */
8087 if (rn == 15) {
7d1b0095 8088 addr = tcg_temp_new_i32();
b0109805 8089 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8090 } else {
b0109805 8091 addr = load_reg(s, rn);
9ee6e8bb
PB
8092 }
8093 offset = (insn & 0xff) * 4;
8094 if ((insn & (1 << 23)) == 0)
8095 offset = -offset;
8096 if (insn & (1 << 24)) {
b0109805 8097 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8098 offset = 0;
8099 }
8100 if (insn & (1 << 20)) {
8101 /* ldrd */
b0109805
PB
8102 tmp = gen_ld32(addr, IS_USER(s));
8103 store_reg(s, rs, tmp);
8104 tcg_gen_addi_i32(addr, addr, 4);
8105 tmp = gen_ld32(addr, IS_USER(s));
8106 store_reg(s, rd, tmp);
9ee6e8bb
PB
8107 } else {
8108 /* strd */
b0109805
PB
8109 tmp = load_reg(s, rs);
8110 gen_st32(tmp, addr, IS_USER(s));
8111 tcg_gen_addi_i32(addr, addr, 4);
8112 tmp = load_reg(s, rd);
8113 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
8114 }
8115 if (insn & (1 << 21)) {
8116 /* Base writeback. */
8117 if (rn == 15)
8118 goto illegal_op;
b0109805
PB
8119 tcg_gen_addi_i32(addr, addr, offset - 4);
8120 store_reg(s, rn, addr);
8121 } else {
7d1b0095 8122 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8123 }
8124 } else if ((insn & (1 << 23)) == 0) {
8125 /* Load/store exclusive word. */
3174f8e9 8126 addr = tcg_temp_local_new();
98a46317 8127 load_reg_var(s, addr, rn);
426f5abc 8128 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8129 if (insn & (1 << 20)) {
426f5abc 8130 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8131 } else {
426f5abc 8132 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8133 }
3174f8e9 8134 tcg_temp_free(addr);
9ee6e8bb
PB
8135 } else if ((insn & (1 << 6)) == 0) {
8136 /* Table Branch. */
8137 if (rn == 15) {
7d1b0095 8138 addr = tcg_temp_new_i32();
b0109805 8139 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8140 } else {
b0109805 8141 addr = load_reg(s, rn);
9ee6e8bb 8142 }
b26eefb6 8143 tmp = load_reg(s, rm);
b0109805 8144 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8145 if (insn & (1 << 4)) {
8146 /* tbh */
b0109805 8147 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8148 tcg_temp_free_i32(tmp);
b0109805 8149 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 8150 } else { /* tbb */
7d1b0095 8151 tcg_temp_free_i32(tmp);
b0109805 8152 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 8153 }
7d1b0095 8154 tcg_temp_free_i32(addr);
b0109805
PB
8155 tcg_gen_shli_i32(tmp, tmp, 1);
8156 tcg_gen_addi_i32(tmp, tmp, s->pc);
8157 store_reg(s, 15, tmp);
9ee6e8bb
PB
8158 } else {
8159 /* Load/store exclusive byte/halfword/doubleword. */
426f5abc 8160 ARCH(7);
9ee6e8bb 8161 op = (insn >> 4) & 0x3;
426f5abc
PB
8162 if (op == 2) {
8163 goto illegal_op;
8164 }
3174f8e9 8165 addr = tcg_temp_local_new();
98a46317 8166 load_reg_var(s, addr, rn);
9ee6e8bb 8167 if (insn & (1 << 20)) {
426f5abc 8168 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8169 } else {
426f5abc 8170 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8171 }
3174f8e9 8172 tcg_temp_free(addr);
9ee6e8bb
PB
8173 }
8174 } else {
8175 /* Load/store multiple, RFE, SRS. */
8176 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8177 /* Not available in user mode. */
b0109805 8178 if (IS_USER(s))
9ee6e8bb
PB
8179 goto illegal_op;
8180 if (insn & (1 << 20)) {
8181 /* rfe */
b0109805
PB
8182 addr = load_reg(s, rn);
8183 if ((insn & (1 << 24)) == 0)
8184 tcg_gen_addi_i32(addr, addr, -8);
8185 /* Load PC into tmp and CPSR into tmp2. */
8186 tmp = gen_ld32(addr, 0);
8187 tcg_gen_addi_i32(addr, addr, 4);
8188 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
8189 if (insn & (1 << 21)) {
8190 /* Base writeback. */
b0109805
PB
8191 if (insn & (1 << 24)) {
8192 tcg_gen_addi_i32(addr, addr, 4);
8193 } else {
8194 tcg_gen_addi_i32(addr, addr, -4);
8195 }
8196 store_reg(s, rn, addr);
8197 } else {
7d1b0095 8198 tcg_temp_free_i32(addr);
9ee6e8bb 8199 }
b0109805 8200 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8201 } else {
8202 /* srs */
8203 op = (insn & 0x1f);
7d1b0095 8204 addr = tcg_temp_new_i32();
39ea3d4e
PM
8205 tmp = tcg_const_i32(op);
8206 gen_helper_get_r13_banked(addr, cpu_env, tmp);
8207 tcg_temp_free_i32(tmp);
9ee6e8bb 8208 if ((insn & (1 << 24)) == 0) {
b0109805 8209 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 8210 }
b0109805
PB
8211 tmp = load_reg(s, 14);
8212 gen_st32(tmp, addr, 0);
8213 tcg_gen_addi_i32(addr, addr, 4);
7d1b0095 8214 tmp = tcg_temp_new_i32();
9ef39277 8215 gen_helper_cpsr_read(tmp, cpu_env);
b0109805 8216 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
8217 if (insn & (1 << 21)) {
8218 if ((insn & (1 << 24)) == 0) {
b0109805 8219 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 8220 } else {
b0109805 8221 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8222 }
39ea3d4e
PM
8223 tmp = tcg_const_i32(op);
8224 gen_helper_set_r13_banked(cpu_env, tmp, addr);
8225 tcg_temp_free_i32(tmp);
b0109805 8226 } else {
7d1b0095 8227 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8228 }
8229 }
8230 } else {
5856d44e
YO
8231 int i, loaded_base = 0;
8232 TCGv loaded_var;
9ee6e8bb 8233 /* Load/store multiple. */
b0109805 8234 addr = load_reg(s, rn);
9ee6e8bb
PB
8235 offset = 0;
8236 for (i = 0; i < 16; i++) {
8237 if (insn & (1 << i))
8238 offset += 4;
8239 }
8240 if (insn & (1 << 24)) {
b0109805 8241 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8242 }
8243
5856d44e 8244 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
8245 for (i = 0; i < 16; i++) {
8246 if ((insn & (1 << i)) == 0)
8247 continue;
8248 if (insn & (1 << 20)) {
8249 /* Load. */
b0109805 8250 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 8251 if (i == 15) {
b0109805 8252 gen_bx(s, tmp);
5856d44e
YO
8253 } else if (i == rn) {
8254 loaded_var = tmp;
8255 loaded_base = 1;
9ee6e8bb 8256 } else {
b0109805 8257 store_reg(s, i, tmp);
9ee6e8bb
PB
8258 }
8259 } else {
8260 /* Store. */
b0109805
PB
8261 tmp = load_reg(s, i);
8262 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 8263 }
b0109805 8264 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8265 }
5856d44e
YO
8266 if (loaded_base) {
8267 store_reg(s, rn, loaded_var);
8268 }
9ee6e8bb
PB
8269 if (insn & (1 << 21)) {
8270 /* Base register writeback. */
8271 if (insn & (1 << 24)) {
b0109805 8272 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8273 }
8274 /* Fault if writeback register is in register list. */
8275 if (insn & (1 << rn))
8276 goto illegal_op;
b0109805
PB
8277 store_reg(s, rn, addr);
8278 } else {
7d1b0095 8279 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8280 }
8281 }
8282 }
8283 break;
2af9ab77
JB
8284 case 5:
8285
9ee6e8bb 8286 op = (insn >> 21) & 0xf;
2af9ab77
JB
8287 if (op == 6) {
8288 /* Halfword pack. */
8289 tmp = load_reg(s, rn);
8290 tmp2 = load_reg(s, rm);
8291 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8292 if (insn & (1 << 5)) {
8293 /* pkhtb */
8294 if (shift == 0)
8295 shift = 31;
8296 tcg_gen_sari_i32(tmp2, tmp2, shift);
8297 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8298 tcg_gen_ext16u_i32(tmp2, tmp2);
8299 } else {
8300 /* pkhbt */
8301 if (shift)
8302 tcg_gen_shli_i32(tmp2, tmp2, shift);
8303 tcg_gen_ext16u_i32(tmp, tmp);
8304 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8305 }
8306 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8307 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8308 store_reg(s, rd, tmp);
8309 } else {
2af9ab77
JB
8310 /* Data processing register constant shift. */
8311 if (rn == 15) {
7d1b0095 8312 tmp = tcg_temp_new_i32();
2af9ab77
JB
8313 tcg_gen_movi_i32(tmp, 0);
8314 } else {
8315 tmp = load_reg(s, rn);
8316 }
8317 tmp2 = load_reg(s, rm);
8318
8319 shiftop = (insn >> 4) & 3;
8320 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8321 conds = (insn & (1 << 20)) != 0;
8322 logic_cc = (conds && thumb2_logic_op(op));
8323 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8324 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8325 goto illegal_op;
7d1b0095 8326 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8327 if (rd != 15) {
8328 store_reg(s, rd, tmp);
8329 } else {
7d1b0095 8330 tcg_temp_free_i32(tmp);
2af9ab77 8331 }
3174f8e9 8332 }
9ee6e8bb
PB
8333 break;
8334 case 13: /* Misc data processing. */
8335 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8336 if (op < 4 && (insn & 0xf000) != 0xf000)
8337 goto illegal_op;
8338 switch (op) {
8339 case 0: /* Register controlled shift. */
8984bd2e
PB
8340 tmp = load_reg(s, rn);
8341 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8342 if ((insn & 0x70) != 0)
8343 goto illegal_op;
8344 op = (insn >> 21) & 3;
8984bd2e
PB
8345 logic_cc = (insn & (1 << 20)) != 0;
8346 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8347 if (logic_cc)
8348 gen_logic_CC(tmp);
21aeb343 8349 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8350 break;
8351 case 1: /* Sign/zero extend. */
5e3f878a 8352 tmp = load_reg(s, rm);
9ee6e8bb 8353 shift = (insn >> 4) & 3;
1301f322 8354 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8355 rotate, a shift is sufficient. */
8356 if (shift != 0)
f669df27 8357 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8358 op = (insn >> 20) & 7;
8359 switch (op) {
5e3f878a
PB
8360 case 0: gen_sxth(tmp); break;
8361 case 1: gen_uxth(tmp); break;
8362 case 2: gen_sxtb16(tmp); break;
8363 case 3: gen_uxtb16(tmp); break;
8364 case 4: gen_sxtb(tmp); break;
8365 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8366 default: goto illegal_op;
8367 }
8368 if (rn != 15) {
5e3f878a 8369 tmp2 = load_reg(s, rn);
9ee6e8bb 8370 if ((op >> 1) == 1) {
5e3f878a 8371 gen_add16(tmp, tmp2);
9ee6e8bb 8372 } else {
5e3f878a 8373 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8374 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8375 }
8376 }
5e3f878a 8377 store_reg(s, rd, tmp);
9ee6e8bb
PB
8378 break;
8379 case 2: /* SIMD add/subtract. */
8380 op = (insn >> 20) & 7;
8381 shift = (insn >> 4) & 7;
8382 if ((op & 3) == 3 || (shift & 3) == 3)
8383 goto illegal_op;
6ddbc6e4
PB
8384 tmp = load_reg(s, rn);
8385 tmp2 = load_reg(s, rm);
8386 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8387 tcg_temp_free_i32(tmp2);
6ddbc6e4 8388 store_reg(s, rd, tmp);
9ee6e8bb
PB
8389 break;
8390 case 3: /* Other data processing. */
8391 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8392 if (op < 4) {
8393 /* Saturating add/subtract. */
d9ba4830
PB
8394 tmp = load_reg(s, rn);
8395 tmp2 = load_reg(s, rm);
9ee6e8bb 8396 if (op & 1)
9ef39277 8397 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8398 if (op & 2)
9ef39277 8399 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8400 else
9ef39277 8401 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8402 tcg_temp_free_i32(tmp2);
9ee6e8bb 8403 } else {
d9ba4830 8404 tmp = load_reg(s, rn);
9ee6e8bb
PB
8405 switch (op) {
8406 case 0x0a: /* rbit */
d9ba4830 8407 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8408 break;
8409 case 0x08: /* rev */
66896cb8 8410 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8411 break;
8412 case 0x09: /* rev16 */
d9ba4830 8413 gen_rev16(tmp);
9ee6e8bb
PB
8414 break;
8415 case 0x0b: /* revsh */
d9ba4830 8416 gen_revsh(tmp);
9ee6e8bb
PB
8417 break;
8418 case 0x10: /* sel */
d9ba4830 8419 tmp2 = load_reg(s, rm);
7d1b0095 8420 tmp3 = tcg_temp_new_i32();
0ecb72a5 8421 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8422 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8423 tcg_temp_free_i32(tmp3);
8424 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8425 break;
8426 case 0x18: /* clz */
d9ba4830 8427 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8428 break;
8429 default:
8430 goto illegal_op;
8431 }
8432 }
d9ba4830 8433 store_reg(s, rd, tmp);
9ee6e8bb
PB
8434 break;
8435 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8436 op = (insn >> 4) & 0xf;
d9ba4830
PB
8437 tmp = load_reg(s, rn);
8438 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8439 switch ((insn >> 20) & 7) {
8440 case 0: /* 32 x 32 -> 32 */
d9ba4830 8441 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8442 tcg_temp_free_i32(tmp2);
9ee6e8bb 8443 if (rs != 15) {
d9ba4830 8444 tmp2 = load_reg(s, rs);
9ee6e8bb 8445 if (op)
d9ba4830 8446 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8447 else
d9ba4830 8448 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8449 tcg_temp_free_i32(tmp2);
9ee6e8bb 8450 }
9ee6e8bb
PB
8451 break;
8452 case 1: /* 16 x 16 -> 32 */
d9ba4830 8453 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8454 tcg_temp_free_i32(tmp2);
9ee6e8bb 8455 if (rs != 15) {
d9ba4830 8456 tmp2 = load_reg(s, rs);
9ef39277 8457 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8458 tcg_temp_free_i32(tmp2);
9ee6e8bb 8459 }
9ee6e8bb
PB
8460 break;
8461 case 2: /* Dual multiply add. */
8462 case 4: /* Dual multiply subtract. */
8463 if (op)
d9ba4830
PB
8464 gen_swap_half(tmp2);
8465 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8466 if (insn & (1 << 22)) {
e1d177b9 8467 /* This subtraction cannot overflow. */
d9ba4830 8468 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8469 } else {
e1d177b9
PM
8470 /* This addition cannot overflow 32 bits;
8471 * however it may overflow considered as a signed
8472 * operation, in which case we must set the Q flag.
8473 */
9ef39277 8474 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8475 }
7d1b0095 8476 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8477 if (rs != 15)
8478 {
d9ba4830 8479 tmp2 = load_reg(s, rs);
9ef39277 8480 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8481 tcg_temp_free_i32(tmp2);
9ee6e8bb 8482 }
9ee6e8bb
PB
8483 break;
8484 case 3: /* 32 * 16 -> 32msb */
8485 if (op)
d9ba4830 8486 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8487 else
d9ba4830 8488 gen_sxth(tmp2);
a7812ae4
PB
8489 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8490 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8491 tmp = tcg_temp_new_i32();
a7812ae4 8492 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8493 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8494 if (rs != 15)
8495 {
d9ba4830 8496 tmp2 = load_reg(s, rs);
9ef39277 8497 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8498 tcg_temp_free_i32(tmp2);
9ee6e8bb 8499 }
9ee6e8bb 8500 break;
838fa72d
AJ
8501 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8502 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8503 if (rs != 15) {
838fa72d
AJ
8504 tmp = load_reg(s, rs);
8505 if (insn & (1 << 20)) {
8506 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8507 } else {
838fa72d 8508 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8509 }
2c0262af 8510 }
838fa72d
AJ
8511 if (insn & (1 << 4)) {
8512 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8513 }
8514 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8515 tmp = tcg_temp_new_i32();
838fa72d
AJ
8516 tcg_gen_trunc_i64_i32(tmp, tmp64);
8517 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8518 break;
8519 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8520 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8521 tcg_temp_free_i32(tmp2);
9ee6e8bb 8522 if (rs != 15) {
d9ba4830
PB
8523 tmp2 = load_reg(s, rs);
8524 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8525 tcg_temp_free_i32(tmp2);
5fd46862 8526 }
9ee6e8bb 8527 break;
2c0262af 8528 }
d9ba4830 8529 store_reg(s, rd, tmp);
2c0262af 8530 break;
9ee6e8bb
PB
8531 case 6: case 7: /* 64-bit multiply, Divide. */
8532 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8533 tmp = load_reg(s, rn);
8534 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8535 if ((op & 0x50) == 0x10) {
8536 /* sdiv, udiv */
47789990 8537 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8538 goto illegal_op;
47789990 8539 }
9ee6e8bb 8540 if (op & 0x20)
5e3f878a 8541 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8542 else
5e3f878a 8543 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8544 tcg_temp_free_i32(tmp2);
5e3f878a 8545 store_reg(s, rd, tmp);
9ee6e8bb
PB
8546 } else if ((op & 0xe) == 0xc) {
8547 /* Dual multiply accumulate long. */
8548 if (op & 1)
5e3f878a
PB
8549 gen_swap_half(tmp2);
8550 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8551 if (op & 0x10) {
5e3f878a 8552 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8553 } else {
5e3f878a 8554 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8555 }
7d1b0095 8556 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8557 /* BUGFIX */
8558 tmp64 = tcg_temp_new_i64();
8559 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8560 tcg_temp_free_i32(tmp);
a7812ae4
PB
8561 gen_addq(s, tmp64, rs, rd);
8562 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8563 tcg_temp_free_i64(tmp64);
2c0262af 8564 } else {
9ee6e8bb
PB
8565 if (op & 0x20) {
8566 /* Unsigned 64-bit multiply */
a7812ae4 8567 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8568 } else {
9ee6e8bb
PB
8569 if (op & 8) {
8570 /* smlalxy */
5e3f878a 8571 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8572 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8573 tmp64 = tcg_temp_new_i64();
8574 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8575 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8576 } else {
8577 /* Signed 64-bit multiply */
a7812ae4 8578 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8579 }
b5ff1b31 8580 }
9ee6e8bb
PB
8581 if (op & 4) {
8582 /* umaal */
a7812ae4
PB
8583 gen_addq_lo(s, tmp64, rs);
8584 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8585 } else if (op & 0x40) {
8586 /* 64-bit accumulate. */
a7812ae4 8587 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8588 }
a7812ae4 8589 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8590 tcg_temp_free_i64(tmp64);
5fd46862 8591 }
2c0262af 8592 break;
9ee6e8bb
PB
8593 }
8594 break;
8595 case 6: case 7: case 14: case 15:
8596 /* Coprocessor. */
8597 if (((insn >> 24) & 3) == 3) {
8598 /* Translate into the equivalent ARM encoding. */
f06053e3 8599 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8600 if (disas_neon_data_insn(env, s, insn))
8601 goto illegal_op;
8602 } else {
8603 if (insn & (1 << 28))
8604 goto illegal_op;
8605 if (disas_coproc_insn (env, s, insn))
8606 goto illegal_op;
8607 }
8608 break;
8609 case 8: case 9: case 10: case 11:
8610 if (insn & (1 << 15)) {
8611 /* Branches, misc control. */
8612 if (insn & 0x5000) {
8613 /* Unconditional branch. */
8614 /* signextend(hw1[10:0]) -> offset[:12]. */
8615 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8616 /* hw1[10:0] -> offset[11:1]. */
8617 offset |= (insn & 0x7ff) << 1;
8618 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8619 offset[24:22] already have the same value because of the
8620 sign extension above. */
8621 offset ^= ((~insn) & (1 << 13)) << 10;
8622 offset ^= ((~insn) & (1 << 11)) << 11;
8623
9ee6e8bb
PB
8624 if (insn & (1 << 14)) {
8625 /* Branch and link. */
3174f8e9 8626 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8627 }
3b46e624 8628
b0109805 8629 offset += s->pc;
9ee6e8bb
PB
8630 if (insn & (1 << 12)) {
8631 /* b/bl */
b0109805 8632 gen_jmp(s, offset);
9ee6e8bb
PB
8633 } else {
8634 /* blx */
b0109805 8635 offset &= ~(uint32_t)2;
be5e7a76 8636 /* thumb2 bx, no need to check */
b0109805 8637 gen_bx_im(s, offset);
2c0262af 8638 }
9ee6e8bb
PB
8639 } else if (((insn >> 23) & 7) == 7) {
8640 /* Misc control */
8641 if (insn & (1 << 13))
8642 goto illegal_op;
8643
8644 if (insn & (1 << 26)) {
8645 /* Secure monitor call (v6Z) */
8646 goto illegal_op; /* not implemented. */
2c0262af 8647 } else {
9ee6e8bb
PB
8648 op = (insn >> 20) & 7;
8649 switch (op) {
8650 case 0: /* msr cpsr. */
8651 if (IS_M(env)) {
8984bd2e
PB
8652 tmp = load_reg(s, rn);
8653 addr = tcg_const_i32(insn & 0xff);
8654 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8655 tcg_temp_free_i32(addr);
7d1b0095 8656 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8657 gen_lookup_tb(s);
8658 break;
8659 }
8660 /* fall through */
8661 case 1: /* msr spsr. */
8662 if (IS_M(env))
8663 goto illegal_op;
2fbac54b
FN
8664 tmp = load_reg(s, rn);
8665 if (gen_set_psr(s,
9ee6e8bb 8666 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8667 op == 1, tmp))
9ee6e8bb
PB
8668 goto illegal_op;
8669 break;
8670 case 2: /* cps, nop-hint. */
8671 if (((insn >> 8) & 7) == 0) {
8672 gen_nop_hint(s, insn & 0xff);
8673 }
8674 /* Implemented as NOP in user mode. */
8675 if (IS_USER(s))
8676 break;
8677 offset = 0;
8678 imm = 0;
8679 if (insn & (1 << 10)) {
8680 if (insn & (1 << 7))
8681 offset |= CPSR_A;
8682 if (insn & (1 << 6))
8683 offset |= CPSR_I;
8684 if (insn & (1 << 5))
8685 offset |= CPSR_F;
8686 if (insn & (1 << 9))
8687 imm = CPSR_A | CPSR_I | CPSR_F;
8688 }
8689 if (insn & (1 << 8)) {
8690 offset |= 0x1f;
8691 imm |= (insn & 0x1f);
8692 }
8693 if (offset) {
2fbac54b 8694 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8695 }
8696 break;
8697 case 3: /* Special control operations. */
426f5abc 8698 ARCH(7);
9ee6e8bb
PB
8699 op = (insn >> 4) & 0xf;
8700 switch (op) {
8701 case 2: /* clrex */
426f5abc 8702 gen_clrex(s);
9ee6e8bb
PB
8703 break;
8704 case 4: /* dsb */
8705 case 5: /* dmb */
8706 case 6: /* isb */
8707 /* These execute as NOPs. */
9ee6e8bb
PB
8708 break;
8709 default:
8710 goto illegal_op;
8711 }
8712 break;
8713 case 4: /* bxj */
8714 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8715 tmp = load_reg(s, rn);
8716 gen_bx(s, tmp);
9ee6e8bb
PB
8717 break;
8718 case 5: /* Exception return. */
b8b45b68
RV
8719 if (IS_USER(s)) {
8720 goto illegal_op;
8721 }
8722 if (rn != 14 || rd != 15) {
8723 goto illegal_op;
8724 }
8725 tmp = load_reg(s, rn);
8726 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8727 gen_exception_return(s, tmp);
8728 break;
9ee6e8bb 8729 case 6: /* mrs cpsr. */
7d1b0095 8730 tmp = tcg_temp_new_i32();
9ee6e8bb 8731 if (IS_M(env)) {
8984bd2e
PB
8732 addr = tcg_const_i32(insn & 0xff);
8733 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8734 tcg_temp_free_i32(addr);
9ee6e8bb 8735 } else {
9ef39277 8736 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8737 }
8984bd2e 8738 store_reg(s, rd, tmp);
9ee6e8bb
PB
8739 break;
8740 case 7: /* mrs spsr. */
8741 /* Not accessible in user mode. */
8742 if (IS_USER(s) || IS_M(env))
8743 goto illegal_op;
d9ba4830
PB
8744 tmp = load_cpu_field(spsr);
8745 store_reg(s, rd, tmp);
9ee6e8bb 8746 break;
2c0262af
FB
8747 }
8748 }
9ee6e8bb
PB
8749 } else {
8750 /* Conditional branch. */
8751 op = (insn >> 22) & 0xf;
8752 /* Generate a conditional jump to next instruction. */
8753 s->condlabel = gen_new_label();
d9ba4830 8754 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8755 s->condjmp = 1;
8756
8757 /* offset[11:1] = insn[10:0] */
8758 offset = (insn & 0x7ff) << 1;
8759 /* offset[17:12] = insn[21:16]. */
8760 offset |= (insn & 0x003f0000) >> 4;
8761 /* offset[31:20] = insn[26]. */
8762 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8763 /* offset[18] = insn[13]. */
8764 offset |= (insn & (1 << 13)) << 5;
8765 /* offset[19] = insn[11]. */
8766 offset |= (insn & (1 << 11)) << 8;
8767
8768 /* jump to the offset */
b0109805 8769 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8770 }
8771 } else {
8772 /* Data processing immediate. */
8773 if (insn & (1 << 25)) {
8774 if (insn & (1 << 24)) {
8775 if (insn & (1 << 20))
8776 goto illegal_op;
8777 /* Bitfield/Saturate. */
8778 op = (insn >> 21) & 7;
8779 imm = insn & 0x1f;
8780 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8781 if (rn == 15) {
7d1b0095 8782 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8783 tcg_gen_movi_i32(tmp, 0);
8784 } else {
8785 tmp = load_reg(s, rn);
8786 }
9ee6e8bb
PB
8787 switch (op) {
8788 case 2: /* Signed bitfield extract. */
8789 imm++;
8790 if (shift + imm > 32)
8791 goto illegal_op;
8792 if (imm < 32)
6ddbc6e4 8793 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8794 break;
8795 case 6: /* Unsigned bitfield extract. */
8796 imm++;
8797 if (shift + imm > 32)
8798 goto illegal_op;
8799 if (imm < 32)
6ddbc6e4 8800 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8801 break;
8802 case 3: /* Bitfield insert/clear. */
8803 if (imm < shift)
8804 goto illegal_op;
8805 imm = imm + 1 - shift;
8806 if (imm != 32) {
6ddbc6e4 8807 tmp2 = load_reg(s, rd);
d593c48e 8808 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8809 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8810 }
8811 break;
8812 case 7:
8813 goto illegal_op;
8814 default: /* Saturate. */
9ee6e8bb
PB
8815 if (shift) {
8816 if (op & 1)
6ddbc6e4 8817 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8818 else
6ddbc6e4 8819 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8820 }
6ddbc6e4 8821 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8822 if (op & 4) {
8823 /* Unsigned. */
9ee6e8bb 8824 if ((op & 1) && shift == 0)
9ef39277 8825 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8826 else
9ef39277 8827 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8828 } else {
9ee6e8bb 8829 /* Signed. */
9ee6e8bb 8830 if ((op & 1) && shift == 0)
9ef39277 8831 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8832 else
9ef39277 8833 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8834 }
b75263d6 8835 tcg_temp_free_i32(tmp2);
9ee6e8bb 8836 break;
2c0262af 8837 }
6ddbc6e4 8838 store_reg(s, rd, tmp);
9ee6e8bb
PB
8839 } else {
8840 imm = ((insn & 0x04000000) >> 15)
8841 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8842 if (insn & (1 << 22)) {
8843 /* 16-bit immediate. */
8844 imm |= (insn >> 4) & 0xf000;
8845 if (insn & (1 << 23)) {
8846 /* movt */
5e3f878a 8847 tmp = load_reg(s, rd);
86831435 8848 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 8849 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 8850 } else {
9ee6e8bb 8851 /* movw */
7d1b0095 8852 tmp = tcg_temp_new_i32();
5e3f878a 8853 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
8854 }
8855 } else {
9ee6e8bb
PB
8856 /* Add/sub 12-bit immediate. */
8857 if (rn == 15) {
b0109805 8858 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 8859 if (insn & (1 << 23))
b0109805 8860 offset -= imm;
9ee6e8bb 8861 else
b0109805 8862 offset += imm;
7d1b0095 8863 tmp = tcg_temp_new_i32();
5e3f878a 8864 tcg_gen_movi_i32(tmp, offset);
2c0262af 8865 } else {
5e3f878a 8866 tmp = load_reg(s, rn);
9ee6e8bb 8867 if (insn & (1 << 23))
5e3f878a 8868 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 8869 else
5e3f878a 8870 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 8871 }
9ee6e8bb 8872 }
5e3f878a 8873 store_reg(s, rd, tmp);
191abaa2 8874 }
9ee6e8bb
PB
8875 } else {
8876 int shifter_out = 0;
8877 /* modified 12-bit immediate. */
8878 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8879 imm = (insn & 0xff);
8880 switch (shift) {
8881 case 0: /* XY */
8882 /* Nothing to do. */
8883 break;
8884 case 1: /* 00XY00XY */
8885 imm |= imm << 16;
8886 break;
8887 case 2: /* XY00XY00 */
8888 imm |= imm << 16;
8889 imm <<= 8;
8890 break;
8891 case 3: /* XYXYXYXY */
8892 imm |= imm << 16;
8893 imm |= imm << 8;
8894 break;
8895 default: /* Rotated constant. */
8896 shift = (shift << 1) | (imm >> 7);
8897 imm |= 0x80;
8898 imm = imm << (32 - shift);
8899 shifter_out = 1;
8900 break;
b5ff1b31 8901 }
7d1b0095 8902 tmp2 = tcg_temp_new_i32();
3174f8e9 8903 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 8904 rn = (insn >> 16) & 0xf;
3174f8e9 8905 if (rn == 15) {
7d1b0095 8906 tmp = tcg_temp_new_i32();
3174f8e9
FN
8907 tcg_gen_movi_i32(tmp, 0);
8908 } else {
8909 tmp = load_reg(s, rn);
8910 }
9ee6e8bb
PB
8911 op = (insn >> 21) & 0xf;
8912 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 8913 shifter_out, tmp, tmp2))
9ee6e8bb 8914 goto illegal_op;
7d1b0095 8915 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8916 rd = (insn >> 8) & 0xf;
8917 if (rd != 15) {
3174f8e9
FN
8918 store_reg(s, rd, tmp);
8919 } else {
7d1b0095 8920 tcg_temp_free_i32(tmp);
2c0262af 8921 }
2c0262af 8922 }
9ee6e8bb
PB
8923 }
8924 break;
8925 case 12: /* Load/store single data item. */
8926 {
8927 int postinc = 0;
8928 int writeback = 0;
b0109805 8929 int user;
9ee6e8bb
PB
8930 if ((insn & 0x01100000) == 0x01000000) {
8931 if (disas_neon_ls_insn(env, s, insn))
c1713132 8932 goto illegal_op;
9ee6e8bb
PB
8933 break;
8934 }
a2fdc890
PM
8935 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8936 if (rs == 15) {
8937 if (!(insn & (1 << 20))) {
8938 goto illegal_op;
8939 }
8940 if (op != 2) {
8941 /* Byte or halfword load space with dest == r15 : memory hints.
8942 * Catch them early so we don't emit pointless addressing code.
8943 * This space is a mix of:
8944 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
8945 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
8946 * cores)
8947 * unallocated hints, which must be treated as NOPs
8948 * UNPREDICTABLE space, which we NOP or UNDEF depending on
8949 * which is easiest for the decoding logic
8950 * Some space which must UNDEF
8951 */
8952 int op1 = (insn >> 23) & 3;
8953 int op2 = (insn >> 6) & 0x3f;
8954 if (op & 2) {
8955 goto illegal_op;
8956 }
8957 if (rn == 15) {
02afbf64
PM
8958 /* UNPREDICTABLE, unallocated hint or
8959 * PLD/PLDW/PLI (literal)
8960 */
a2fdc890
PM
8961 return 0;
8962 }
8963 if (op1 & 1) {
02afbf64 8964 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8965 }
8966 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 8967 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
8968 }
8969 /* UNDEF space, or an UNPREDICTABLE */
8970 return 1;
8971 }
8972 }
b0109805 8973 user = IS_USER(s);
9ee6e8bb 8974 if (rn == 15) {
7d1b0095 8975 addr = tcg_temp_new_i32();
9ee6e8bb
PB
8976 /* PC relative. */
8977 /* s->pc has already been incremented by 4. */
8978 imm = s->pc & 0xfffffffc;
8979 if (insn & (1 << 23))
8980 imm += insn & 0xfff;
8981 else
8982 imm -= insn & 0xfff;
b0109805 8983 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 8984 } else {
b0109805 8985 addr = load_reg(s, rn);
9ee6e8bb
PB
8986 if (insn & (1 << 23)) {
8987 /* Positive offset. */
8988 imm = insn & 0xfff;
b0109805 8989 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 8990 } else {
9ee6e8bb 8991 imm = insn & 0xff;
2a0308c5
PM
8992 switch ((insn >> 8) & 0xf) {
8993 case 0x0: /* Shifted Register. */
9ee6e8bb 8994 shift = (insn >> 4) & 0xf;
2a0308c5
PM
8995 if (shift > 3) {
8996 tcg_temp_free_i32(addr);
18c9b560 8997 goto illegal_op;
2a0308c5 8998 }
b26eefb6 8999 tmp = load_reg(s, rm);
9ee6e8bb 9000 if (shift)
b26eefb6 9001 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9002 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9003 tcg_temp_free_i32(tmp);
9ee6e8bb 9004 break;
2a0308c5 9005 case 0xc: /* Negative offset. */
b0109805 9006 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9007 break;
2a0308c5 9008 case 0xe: /* User privilege. */
b0109805
PB
9009 tcg_gen_addi_i32(addr, addr, imm);
9010 user = 1;
9ee6e8bb 9011 break;
2a0308c5 9012 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9013 imm = -imm;
9014 /* Fall through. */
2a0308c5 9015 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9016 postinc = 1;
9017 writeback = 1;
9018 break;
2a0308c5 9019 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9020 imm = -imm;
9021 /* Fall through. */
2a0308c5 9022 case 0xf: /* Pre-increment. */
b0109805 9023 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9024 writeback = 1;
9025 break;
9026 default:
2a0308c5 9027 tcg_temp_free_i32(addr);
b7bcbe95 9028 goto illegal_op;
9ee6e8bb
PB
9029 }
9030 }
9031 }
9ee6e8bb
PB
9032 if (insn & (1 << 20)) {
9033 /* Load. */
a2fdc890
PM
9034 switch (op) {
9035 case 0: tmp = gen_ld8u(addr, user); break;
9036 case 4: tmp = gen_ld8s(addr, user); break;
9037 case 1: tmp = gen_ld16u(addr, user); break;
9038 case 5: tmp = gen_ld16s(addr, user); break;
9039 case 2: tmp = gen_ld32(addr, user); break;
2a0308c5
PM
9040 default:
9041 tcg_temp_free_i32(addr);
9042 goto illegal_op;
a2fdc890
PM
9043 }
9044 if (rs == 15) {
9045 gen_bx(s, tmp);
9ee6e8bb 9046 } else {
a2fdc890 9047 store_reg(s, rs, tmp);
9ee6e8bb
PB
9048 }
9049 } else {
9050 /* Store. */
b0109805 9051 tmp = load_reg(s, rs);
9ee6e8bb 9052 switch (op) {
b0109805
PB
9053 case 0: gen_st8(tmp, addr, user); break;
9054 case 1: gen_st16(tmp, addr, user); break;
9055 case 2: gen_st32(tmp, addr, user); break;
2a0308c5
PM
9056 default:
9057 tcg_temp_free_i32(addr);
9058 goto illegal_op;
b7bcbe95 9059 }
2c0262af 9060 }
9ee6e8bb 9061 if (postinc)
b0109805
PB
9062 tcg_gen_addi_i32(addr, addr, imm);
9063 if (writeback) {
9064 store_reg(s, rn, addr);
9065 } else {
7d1b0095 9066 tcg_temp_free_i32(addr);
b0109805 9067 }
9ee6e8bb
PB
9068 }
9069 break;
9070 default:
9071 goto illegal_op;
2c0262af 9072 }
9ee6e8bb
PB
9073 return 0;
9074illegal_op:
9075 return 1;
2c0262af
FB
9076}
9077
0ecb72a5 9078static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9079{
9080 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9081 int32_t offset;
9082 int i;
b26eefb6 9083 TCGv tmp;
d9ba4830 9084 TCGv tmp2;
b0109805 9085 TCGv addr;
99c475ab 9086
9ee6e8bb
PB
9087 if (s->condexec_mask) {
9088 cond = s->condexec_cond;
bedd2912
JB
9089 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9090 s->condlabel = gen_new_label();
9091 gen_test_cc(cond ^ 1, s->condlabel);
9092 s->condjmp = 1;
9093 }
9ee6e8bb
PB
9094 }
9095
d31dd73e 9096 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9097 s->pc += 2;
b5ff1b31 9098
99c475ab
FB
9099 switch (insn >> 12) {
9100 case 0: case 1:
396e467c 9101
99c475ab
FB
9102 rd = insn & 7;
9103 op = (insn >> 11) & 3;
9104 if (op == 3) {
9105 /* add/subtract */
9106 rn = (insn >> 3) & 7;
396e467c 9107 tmp = load_reg(s, rn);
99c475ab
FB
9108 if (insn & (1 << 10)) {
9109 /* immediate */
7d1b0095 9110 tmp2 = tcg_temp_new_i32();
396e467c 9111 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9112 } else {
9113 /* reg */
9114 rm = (insn >> 6) & 7;
396e467c 9115 tmp2 = load_reg(s, rm);
99c475ab 9116 }
9ee6e8bb
PB
9117 if (insn & (1 << 9)) {
9118 if (s->condexec_mask)
396e467c 9119 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9120 else
72485ec4 9121 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9122 } else {
9123 if (s->condexec_mask)
396e467c 9124 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9125 else
72485ec4 9126 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9127 }
7d1b0095 9128 tcg_temp_free_i32(tmp2);
396e467c 9129 store_reg(s, rd, tmp);
99c475ab
FB
9130 } else {
9131 /* shift immediate */
9132 rm = (insn >> 3) & 7;
9133 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9134 tmp = load_reg(s, rm);
9135 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9136 if (!s->condexec_mask)
9137 gen_logic_CC(tmp);
9138 store_reg(s, rd, tmp);
99c475ab
FB
9139 }
9140 break;
9141 case 2: case 3:
9142 /* arithmetic large immediate */
9143 op = (insn >> 11) & 3;
9144 rd = (insn >> 8) & 0x7;
396e467c 9145 if (op == 0) { /* mov */
7d1b0095 9146 tmp = tcg_temp_new_i32();
396e467c 9147 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9148 if (!s->condexec_mask)
396e467c
FN
9149 gen_logic_CC(tmp);
9150 store_reg(s, rd, tmp);
9151 } else {
9152 tmp = load_reg(s, rd);
7d1b0095 9153 tmp2 = tcg_temp_new_i32();
396e467c
FN
9154 tcg_gen_movi_i32(tmp2, insn & 0xff);
9155 switch (op) {
9156 case 1: /* cmp */
72485ec4 9157 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9158 tcg_temp_free_i32(tmp);
9159 tcg_temp_free_i32(tmp2);
396e467c
FN
9160 break;
9161 case 2: /* add */
9162 if (s->condexec_mask)
9163 tcg_gen_add_i32(tmp, tmp, tmp2);
9164 else
72485ec4 9165 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9166 tcg_temp_free_i32(tmp2);
396e467c
FN
9167 store_reg(s, rd, tmp);
9168 break;
9169 case 3: /* sub */
9170 if (s->condexec_mask)
9171 tcg_gen_sub_i32(tmp, tmp, tmp2);
9172 else
72485ec4 9173 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9174 tcg_temp_free_i32(tmp2);
396e467c
FN
9175 store_reg(s, rd, tmp);
9176 break;
9177 }
99c475ab 9178 }
99c475ab
FB
9179 break;
9180 case 4:
9181 if (insn & (1 << 11)) {
9182 rd = (insn >> 8) & 7;
5899f386
FB
9183 /* load pc-relative. Bit 1 of PC is ignored. */
9184 val = s->pc + 2 + ((insn & 0xff) * 4);
9185 val &= ~(uint32_t)2;
7d1b0095 9186 addr = tcg_temp_new_i32();
b0109805
PB
9187 tcg_gen_movi_i32(addr, val);
9188 tmp = gen_ld32(addr, IS_USER(s));
7d1b0095 9189 tcg_temp_free_i32(addr);
b0109805 9190 store_reg(s, rd, tmp);
99c475ab
FB
9191 break;
9192 }
9193 if (insn & (1 << 10)) {
9194 /* data processing extended or blx */
9195 rd = (insn & 7) | ((insn >> 4) & 8);
9196 rm = (insn >> 3) & 0xf;
9197 op = (insn >> 8) & 3;
9198 switch (op) {
9199 case 0: /* add */
396e467c
FN
9200 tmp = load_reg(s, rd);
9201 tmp2 = load_reg(s, rm);
9202 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9203 tcg_temp_free_i32(tmp2);
396e467c 9204 store_reg(s, rd, tmp);
99c475ab
FB
9205 break;
9206 case 1: /* cmp */
396e467c
FN
9207 tmp = load_reg(s, rd);
9208 tmp2 = load_reg(s, rm);
72485ec4 9209 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9210 tcg_temp_free_i32(tmp2);
9211 tcg_temp_free_i32(tmp);
99c475ab
FB
9212 break;
9213 case 2: /* mov/cpy */
396e467c
FN
9214 tmp = load_reg(s, rm);
9215 store_reg(s, rd, tmp);
99c475ab
FB
9216 break;
9217 case 3:/* branch [and link] exchange thumb register */
b0109805 9218 tmp = load_reg(s, rm);
99c475ab 9219 if (insn & (1 << 7)) {
be5e7a76 9220 ARCH(5);
99c475ab 9221 val = (uint32_t)s->pc | 1;
7d1b0095 9222 tmp2 = tcg_temp_new_i32();
b0109805
PB
9223 tcg_gen_movi_i32(tmp2, val);
9224 store_reg(s, 14, tmp2);
99c475ab 9225 }
be5e7a76 9226 /* already thumb, no need to check */
d9ba4830 9227 gen_bx(s, tmp);
99c475ab
FB
9228 break;
9229 }
9230 break;
9231 }
9232
9233 /* data processing register */
9234 rd = insn & 7;
9235 rm = (insn >> 3) & 7;
9236 op = (insn >> 6) & 0xf;
9237 if (op == 2 || op == 3 || op == 4 || op == 7) {
9238 /* the shift/rotate ops want the operands backwards */
9239 val = rm;
9240 rm = rd;
9241 rd = val;
9242 val = 1;
9243 } else {
9244 val = 0;
9245 }
9246
396e467c 9247 if (op == 9) { /* neg */
7d1b0095 9248 tmp = tcg_temp_new_i32();
396e467c
FN
9249 tcg_gen_movi_i32(tmp, 0);
9250 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9251 tmp = load_reg(s, rd);
9252 } else {
9253 TCGV_UNUSED(tmp);
9254 }
99c475ab 9255
396e467c 9256 tmp2 = load_reg(s, rm);
5899f386 9257 switch (op) {
99c475ab 9258 case 0x0: /* and */
396e467c 9259 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9260 if (!s->condexec_mask)
396e467c 9261 gen_logic_CC(tmp);
99c475ab
FB
9262 break;
9263 case 0x1: /* eor */
396e467c 9264 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9265 if (!s->condexec_mask)
396e467c 9266 gen_logic_CC(tmp);
99c475ab
FB
9267 break;
9268 case 0x2: /* lsl */
9ee6e8bb 9269 if (s->condexec_mask) {
365af80e 9270 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9271 } else {
9ef39277 9272 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9273 gen_logic_CC(tmp2);
9ee6e8bb 9274 }
99c475ab
FB
9275 break;
9276 case 0x3: /* lsr */
9ee6e8bb 9277 if (s->condexec_mask) {
365af80e 9278 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9279 } else {
9ef39277 9280 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9281 gen_logic_CC(tmp2);
9ee6e8bb 9282 }
99c475ab
FB
9283 break;
9284 case 0x4: /* asr */
9ee6e8bb 9285 if (s->condexec_mask) {
365af80e 9286 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9287 } else {
9ef39277 9288 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9289 gen_logic_CC(tmp2);
9ee6e8bb 9290 }
99c475ab
FB
9291 break;
9292 case 0x5: /* adc */
49b4c31e 9293 if (s->condexec_mask) {
396e467c 9294 gen_adc(tmp, tmp2);
49b4c31e
RH
9295 } else {
9296 gen_adc_CC(tmp, tmp, tmp2);
9297 }
99c475ab
FB
9298 break;
9299 case 0x6: /* sbc */
2de68a49 9300 if (s->condexec_mask) {
396e467c 9301 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9302 } else {
9303 gen_sbc_CC(tmp, tmp, tmp2);
9304 }
99c475ab
FB
9305 break;
9306 case 0x7: /* ror */
9ee6e8bb 9307 if (s->condexec_mask) {
f669df27
AJ
9308 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9309 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9310 } else {
9ef39277 9311 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9312 gen_logic_CC(tmp2);
9ee6e8bb 9313 }
99c475ab
FB
9314 break;
9315 case 0x8: /* tst */
396e467c
FN
9316 tcg_gen_and_i32(tmp, tmp, tmp2);
9317 gen_logic_CC(tmp);
99c475ab 9318 rd = 16;
5899f386 9319 break;
99c475ab 9320 case 0x9: /* neg */
9ee6e8bb 9321 if (s->condexec_mask)
396e467c 9322 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9323 else
72485ec4 9324 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9325 break;
9326 case 0xa: /* cmp */
72485ec4 9327 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9328 rd = 16;
9329 break;
9330 case 0xb: /* cmn */
72485ec4 9331 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9332 rd = 16;
9333 break;
9334 case 0xc: /* orr */
396e467c 9335 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9336 if (!s->condexec_mask)
396e467c 9337 gen_logic_CC(tmp);
99c475ab
FB
9338 break;
9339 case 0xd: /* mul */
7b2919a0 9340 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9341 if (!s->condexec_mask)
396e467c 9342 gen_logic_CC(tmp);
99c475ab
FB
9343 break;
9344 case 0xe: /* bic */
f669df27 9345 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9346 if (!s->condexec_mask)
396e467c 9347 gen_logic_CC(tmp);
99c475ab
FB
9348 break;
9349 case 0xf: /* mvn */
396e467c 9350 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9351 if (!s->condexec_mask)
396e467c 9352 gen_logic_CC(tmp2);
99c475ab 9353 val = 1;
5899f386 9354 rm = rd;
99c475ab
FB
9355 break;
9356 }
9357 if (rd != 16) {
396e467c
FN
9358 if (val) {
9359 store_reg(s, rm, tmp2);
9360 if (op != 0xf)
7d1b0095 9361 tcg_temp_free_i32(tmp);
396e467c
FN
9362 } else {
9363 store_reg(s, rd, tmp);
7d1b0095 9364 tcg_temp_free_i32(tmp2);
396e467c
FN
9365 }
9366 } else {
7d1b0095
PM
9367 tcg_temp_free_i32(tmp);
9368 tcg_temp_free_i32(tmp2);
99c475ab
FB
9369 }
9370 break;
9371
9372 case 5:
9373 /* load/store register offset. */
9374 rd = insn & 7;
9375 rn = (insn >> 3) & 7;
9376 rm = (insn >> 6) & 7;
9377 op = (insn >> 9) & 7;
b0109805 9378 addr = load_reg(s, rn);
b26eefb6 9379 tmp = load_reg(s, rm);
b0109805 9380 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9381 tcg_temp_free_i32(tmp);
99c475ab
FB
9382
9383 if (op < 3) /* store */
b0109805 9384 tmp = load_reg(s, rd);
99c475ab
FB
9385
9386 switch (op) {
9387 case 0: /* str */
b0109805 9388 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9389 break;
9390 case 1: /* strh */
b0109805 9391 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9392 break;
9393 case 2: /* strb */
b0109805 9394 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9395 break;
9396 case 3: /* ldrsb */
b0109805 9397 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
9398 break;
9399 case 4: /* ldr */
b0109805 9400 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9401 break;
9402 case 5: /* ldrh */
b0109805 9403 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
9404 break;
9405 case 6: /* ldrb */
b0109805 9406 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
9407 break;
9408 case 7: /* ldrsh */
b0109805 9409 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
9410 break;
9411 }
9412 if (op >= 3) /* load */
b0109805 9413 store_reg(s, rd, tmp);
7d1b0095 9414 tcg_temp_free_i32(addr);
99c475ab
FB
9415 break;
9416
9417 case 6:
9418 /* load/store word immediate offset */
9419 rd = insn & 7;
9420 rn = (insn >> 3) & 7;
b0109805 9421 addr = load_reg(s, rn);
99c475ab 9422 val = (insn >> 4) & 0x7c;
b0109805 9423 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9424
9425 if (insn & (1 << 11)) {
9426 /* load */
b0109805
PB
9427 tmp = gen_ld32(addr, IS_USER(s));
9428 store_reg(s, rd, tmp);
99c475ab
FB
9429 } else {
9430 /* store */
b0109805
PB
9431 tmp = load_reg(s, rd);
9432 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9433 }
7d1b0095 9434 tcg_temp_free_i32(addr);
99c475ab
FB
9435 break;
9436
9437 case 7:
9438 /* load/store byte immediate offset */
9439 rd = insn & 7;
9440 rn = (insn >> 3) & 7;
b0109805 9441 addr = load_reg(s, rn);
99c475ab 9442 val = (insn >> 6) & 0x1f;
b0109805 9443 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9444
9445 if (insn & (1 << 11)) {
9446 /* load */
b0109805
PB
9447 tmp = gen_ld8u(addr, IS_USER(s));
9448 store_reg(s, rd, tmp);
99c475ab
FB
9449 } else {
9450 /* store */
b0109805
PB
9451 tmp = load_reg(s, rd);
9452 gen_st8(tmp, addr, IS_USER(s));
99c475ab 9453 }
7d1b0095 9454 tcg_temp_free_i32(addr);
99c475ab
FB
9455 break;
9456
9457 case 8:
9458 /* load/store halfword immediate offset */
9459 rd = insn & 7;
9460 rn = (insn >> 3) & 7;
b0109805 9461 addr = load_reg(s, rn);
99c475ab 9462 val = (insn >> 5) & 0x3e;
b0109805 9463 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9464
9465 if (insn & (1 << 11)) {
9466 /* load */
b0109805
PB
9467 tmp = gen_ld16u(addr, IS_USER(s));
9468 store_reg(s, rd, tmp);
99c475ab
FB
9469 } else {
9470 /* store */
b0109805
PB
9471 tmp = load_reg(s, rd);
9472 gen_st16(tmp, addr, IS_USER(s));
99c475ab 9473 }
7d1b0095 9474 tcg_temp_free_i32(addr);
99c475ab
FB
9475 break;
9476
9477 case 9:
9478 /* load/store from stack */
9479 rd = (insn >> 8) & 7;
b0109805 9480 addr = load_reg(s, 13);
99c475ab 9481 val = (insn & 0xff) * 4;
b0109805 9482 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9483
9484 if (insn & (1 << 11)) {
9485 /* load */
b0109805
PB
9486 tmp = gen_ld32(addr, IS_USER(s));
9487 store_reg(s, rd, tmp);
99c475ab
FB
9488 } else {
9489 /* store */
b0109805
PB
9490 tmp = load_reg(s, rd);
9491 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9492 }
7d1b0095 9493 tcg_temp_free_i32(addr);
99c475ab
FB
9494 break;
9495
9496 case 10:
9497 /* add to high reg */
9498 rd = (insn >> 8) & 7;
5899f386
FB
9499 if (insn & (1 << 11)) {
9500 /* SP */
5e3f878a 9501 tmp = load_reg(s, 13);
5899f386
FB
9502 } else {
9503 /* PC. bit 1 is ignored. */
7d1b0095 9504 tmp = tcg_temp_new_i32();
5e3f878a 9505 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9506 }
99c475ab 9507 val = (insn & 0xff) * 4;
5e3f878a
PB
9508 tcg_gen_addi_i32(tmp, tmp, val);
9509 store_reg(s, rd, tmp);
99c475ab
FB
9510 break;
9511
9512 case 11:
9513 /* misc */
9514 op = (insn >> 8) & 0xf;
9515 switch (op) {
9516 case 0:
9517 /* adjust stack pointer */
b26eefb6 9518 tmp = load_reg(s, 13);
99c475ab
FB
9519 val = (insn & 0x7f) * 4;
9520 if (insn & (1 << 7))
6a0d8a1d 9521 val = -(int32_t)val;
b26eefb6
PB
9522 tcg_gen_addi_i32(tmp, tmp, val);
9523 store_reg(s, 13, tmp);
99c475ab
FB
9524 break;
9525
9ee6e8bb
PB
9526 case 2: /* sign/zero extend. */
9527 ARCH(6);
9528 rd = insn & 7;
9529 rm = (insn >> 3) & 7;
b0109805 9530 tmp = load_reg(s, rm);
9ee6e8bb 9531 switch ((insn >> 6) & 3) {
b0109805
PB
9532 case 0: gen_sxth(tmp); break;
9533 case 1: gen_sxtb(tmp); break;
9534 case 2: gen_uxth(tmp); break;
9535 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9536 }
b0109805 9537 store_reg(s, rd, tmp);
9ee6e8bb 9538 break;
99c475ab
FB
9539 case 4: case 5: case 0xc: case 0xd:
9540 /* push/pop */
b0109805 9541 addr = load_reg(s, 13);
5899f386
FB
9542 if (insn & (1 << 8))
9543 offset = 4;
99c475ab 9544 else
5899f386
FB
9545 offset = 0;
9546 for (i = 0; i < 8; i++) {
9547 if (insn & (1 << i))
9548 offset += 4;
9549 }
9550 if ((insn & (1 << 11)) == 0) {
b0109805 9551 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9552 }
99c475ab
FB
9553 for (i = 0; i < 8; i++) {
9554 if (insn & (1 << i)) {
9555 if (insn & (1 << 11)) {
9556 /* pop */
b0109805
PB
9557 tmp = gen_ld32(addr, IS_USER(s));
9558 store_reg(s, i, tmp);
99c475ab
FB
9559 } else {
9560 /* push */
b0109805
PB
9561 tmp = load_reg(s, i);
9562 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9563 }
5899f386 9564 /* advance to the next address. */
b0109805 9565 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9566 }
9567 }
a50f5b91 9568 TCGV_UNUSED(tmp);
99c475ab
FB
9569 if (insn & (1 << 8)) {
9570 if (insn & (1 << 11)) {
9571 /* pop pc */
b0109805 9572 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
9573 /* don't set the pc until the rest of the instruction
9574 has completed */
9575 } else {
9576 /* push lr */
b0109805
PB
9577 tmp = load_reg(s, 14);
9578 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9579 }
b0109805 9580 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9581 }
5899f386 9582 if ((insn & (1 << 11)) == 0) {
b0109805 9583 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9584 }
99c475ab 9585 /* write back the new stack pointer */
b0109805 9586 store_reg(s, 13, addr);
99c475ab 9587 /* set the new PC value */
be5e7a76
DES
9588 if ((insn & 0x0900) == 0x0900) {
9589 store_reg_from_load(env, s, 15, tmp);
9590 }
99c475ab
FB
9591 break;
9592
9ee6e8bb
PB
9593 case 1: case 3: case 9: case 11: /* czb */
9594 rm = insn & 7;
d9ba4830 9595 tmp = load_reg(s, rm);
9ee6e8bb
PB
9596 s->condlabel = gen_new_label();
9597 s->condjmp = 1;
9598 if (insn & (1 << 11))
cb63669a 9599 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9600 else
cb63669a 9601 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9602 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9603 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9604 val = (uint32_t)s->pc + 2;
9605 val += offset;
9606 gen_jmp(s, val);
9607 break;
9608
9609 case 15: /* IT, nop-hint. */
9610 if ((insn & 0xf) == 0) {
9611 gen_nop_hint(s, (insn >> 4) & 0xf);
9612 break;
9613 }
9614 /* If Then. */
9615 s->condexec_cond = (insn >> 4) & 0xe;
9616 s->condexec_mask = insn & 0x1f;
9617 /* No actual code generated for this insn, just setup state. */
9618 break;
9619
06c949e6 9620 case 0xe: /* bkpt */
be5e7a76 9621 ARCH(5);
bc4a0de0 9622 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9623 break;
9624
9ee6e8bb
PB
9625 case 0xa: /* rev */
9626 ARCH(6);
9627 rn = (insn >> 3) & 0x7;
9628 rd = insn & 0x7;
b0109805 9629 tmp = load_reg(s, rn);
9ee6e8bb 9630 switch ((insn >> 6) & 3) {
66896cb8 9631 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9632 case 1: gen_rev16(tmp); break;
9633 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9634 default: goto illegal_op;
9635 }
b0109805 9636 store_reg(s, rd, tmp);
9ee6e8bb
PB
9637 break;
9638
d9e028c1
PM
9639 case 6:
9640 switch ((insn >> 5) & 7) {
9641 case 2:
9642 /* setend */
9643 ARCH(6);
10962fd5
PM
9644 if (((insn >> 3) & 1) != s->bswap_code) {
9645 /* Dynamic endianness switching not implemented. */
d9e028c1
PM
9646 goto illegal_op;
9647 }
9ee6e8bb 9648 break;
d9e028c1
PM
9649 case 3:
9650 /* cps */
9651 ARCH(6);
9652 if (IS_USER(s)) {
9653 break;
8984bd2e 9654 }
d9e028c1
PM
9655 if (IS_M(env)) {
9656 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9657 /* FAULTMASK */
9658 if (insn & 1) {
9659 addr = tcg_const_i32(19);
9660 gen_helper_v7m_msr(cpu_env, addr, tmp);
9661 tcg_temp_free_i32(addr);
9662 }
9663 /* PRIMASK */
9664 if (insn & 2) {
9665 addr = tcg_const_i32(16);
9666 gen_helper_v7m_msr(cpu_env, addr, tmp);
9667 tcg_temp_free_i32(addr);
9668 }
9669 tcg_temp_free_i32(tmp);
9670 gen_lookup_tb(s);
9671 } else {
9672 if (insn & (1 << 4)) {
9673 shift = CPSR_A | CPSR_I | CPSR_F;
9674 } else {
9675 shift = 0;
9676 }
9677 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9678 }
d9e028c1
PM
9679 break;
9680 default:
9681 goto undef;
9ee6e8bb
PB
9682 }
9683 break;
9684
99c475ab
FB
9685 default:
9686 goto undef;
9687 }
9688 break;
9689
9690 case 12:
a7d3970d 9691 {
99c475ab 9692 /* load/store multiple */
a7d3970d
PM
9693 TCGv loaded_var;
9694 TCGV_UNUSED(loaded_var);
99c475ab 9695 rn = (insn >> 8) & 0x7;
b0109805 9696 addr = load_reg(s, rn);
99c475ab
FB
9697 for (i = 0; i < 8; i++) {
9698 if (insn & (1 << i)) {
99c475ab
FB
9699 if (insn & (1 << 11)) {
9700 /* load */
b0109805 9701 tmp = gen_ld32(addr, IS_USER(s));
a7d3970d
PM
9702 if (i == rn) {
9703 loaded_var = tmp;
9704 } else {
9705 store_reg(s, i, tmp);
9706 }
99c475ab
FB
9707 } else {
9708 /* store */
b0109805
PB
9709 tmp = load_reg(s, i);
9710 gen_st32(tmp, addr, IS_USER(s));
99c475ab 9711 }
5899f386 9712 /* advance to the next address */
b0109805 9713 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9714 }
9715 }
b0109805 9716 if ((insn & (1 << rn)) == 0) {
a7d3970d 9717 /* base reg not in list: base register writeback */
b0109805
PB
9718 store_reg(s, rn, addr);
9719 } else {
a7d3970d
PM
9720 /* base reg in list: if load, complete it now */
9721 if (insn & (1 << 11)) {
9722 store_reg(s, rn, loaded_var);
9723 }
7d1b0095 9724 tcg_temp_free_i32(addr);
b0109805 9725 }
99c475ab 9726 break;
a7d3970d 9727 }
99c475ab
FB
9728 case 13:
9729 /* conditional branch or swi */
9730 cond = (insn >> 8) & 0xf;
9731 if (cond == 0xe)
9732 goto undef;
9733
9734 if (cond == 0xf) {
9735 /* swi */
422ebf69 9736 gen_set_pc_im(s->pc);
9ee6e8bb 9737 s->is_jmp = DISAS_SWI;
99c475ab
FB
9738 break;
9739 }
9740 /* generate a conditional jump to next instruction */
e50e6a20 9741 s->condlabel = gen_new_label();
d9ba4830 9742 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9743 s->condjmp = 1;
99c475ab
FB
9744
9745 /* jump to the offset */
5899f386 9746 val = (uint32_t)s->pc + 2;
99c475ab 9747 offset = ((int32_t)insn << 24) >> 24;
5899f386 9748 val += offset << 1;
8aaca4c0 9749 gen_jmp(s, val);
99c475ab
FB
9750 break;
9751
9752 case 14:
358bf29e 9753 if (insn & (1 << 11)) {
9ee6e8bb
PB
9754 if (disas_thumb2_insn(env, s, insn))
9755 goto undef32;
358bf29e
PB
9756 break;
9757 }
9ee6e8bb 9758 /* unconditional branch */
99c475ab
FB
9759 val = (uint32_t)s->pc;
9760 offset = ((int32_t)insn << 21) >> 21;
9761 val += (offset << 1) + 2;
8aaca4c0 9762 gen_jmp(s, val);
99c475ab
FB
9763 break;
9764
9765 case 15:
9ee6e8bb 9766 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9767 goto undef32;
9ee6e8bb 9768 break;
99c475ab
FB
9769 }
9770 return;
9ee6e8bb 9771undef32:
bc4a0de0 9772 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9773 return;
9774illegal_op:
99c475ab 9775undef:
bc4a0de0 9776 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9777}
9778
2c0262af
FB
9779/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9780 basic block 'tb'. If search_pc is TRUE, also generate PC
9781 information for each intermediate instruction. */
0ecb72a5 9782static inline void gen_intermediate_code_internal(CPUARMState *env,
2cfc5f17
TS
9783 TranslationBlock *tb,
9784 int search_pc)
2c0262af
FB
9785{
9786 DisasContext dc1, *dc = &dc1;
a1d1bb31 9787 CPUBreakpoint *bp;
2c0262af
FB
9788 uint16_t *gen_opc_end;
9789 int j, lj;
0fa85d43 9790 target_ulong pc_start;
b5ff1b31 9791 uint32_t next_page_start;
2e70f6ef
PB
9792 int num_insns;
9793 int max_insns;
3b46e624 9794
2c0262af 9795 /* generate intermediate code */
0fa85d43 9796 pc_start = tb->pc;
3b46e624 9797
2c0262af
FB
9798 dc->tb = tb;
9799
92414b31 9800 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
9801
9802 dc->is_jmp = DISAS_NEXT;
9803 dc->pc = pc_start;
8aaca4c0 9804 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 9805 dc->condjmp = 0;
7204ab88 9806 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 9807 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
9808 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
9809 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 9810#if !defined(CONFIG_USER_ONLY)
61f74d6a 9811 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 9812#endif
5df8bac1 9813 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
9814 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
9815 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
9816 cpu_F0s = tcg_temp_new_i32();
9817 cpu_F1s = tcg_temp_new_i32();
9818 cpu_F0d = tcg_temp_new_i64();
9819 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
9820 cpu_V0 = cpu_F0d;
9821 cpu_V1 = cpu_F1d;
e677137d 9822 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 9823 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 9824 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 9825 lj = -1;
2e70f6ef
PB
9826 num_insns = 0;
9827 max_insns = tb->cflags & CF_COUNT_MASK;
9828 if (max_insns == 0)
9829 max_insns = CF_COUNT_MASK;
9830
9831 gen_icount_start();
e12ce78d 9832
3849902c
PM
9833 tcg_clear_temp_count();
9834
e12ce78d
PM
9835 /* A note on handling of the condexec (IT) bits:
9836 *
9837 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 9838 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 9839 * (1) if the condexec bits are not already zero then we write
0ecb72a5 9840 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
9841 * to do it at the end of the block. (For example if we don't do this
9842 * it's hard to identify whether we can safely skip writing condexec
9843 * at the end of the TB, which we definitely want to do for the case
9844 * where a TB doesn't do anything with the IT state at all.)
9845 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 9846 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
9847 * This is done both for leaving the TB at the end, and for leaving
9848 * it because of an exception we know will happen, which is done in
9849 * gen_exception_insn(). The latter is necessary because we need to
9850 * leave the TB with the PC/IT state just prior to execution of the
9851 * instruction which caused the exception.
9852 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 9853 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
9854 * This is handled in the same way as restoration of the
9855 * PC in these situations: we will be called again with search_pc=1
9856 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
9857 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
9858 * this to restore the condexec bits.
e12ce78d
PM
9859 *
9860 * Note that there are no instructions which can read the condexec
9861 * bits, and none which can write non-static values to them, so
0ecb72a5 9862 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
9863 * middle of a TB.
9864 */
9865
9ee6e8bb
PB
9866 /* Reset the conditional execution bits immediately. This avoids
9867 complications trying to do it at the end of the block. */
98eac7ca 9868 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 9869 {
7d1b0095 9870 TCGv tmp = tcg_temp_new_i32();
8f01245e 9871 tcg_gen_movi_i32(tmp, 0);
d9ba4830 9872 store_cpu_field(tmp, condexec_bits);
8f01245e 9873 }
2c0262af 9874 do {
fbb4a2e3
PB
9875#ifdef CONFIG_USER_ONLY
9876 /* Intercept jump to the magic kernel page. */
9877 if (dc->pc >= 0xffff0000) {
9878 /* We always get here via a jump, so know we are not in a
9879 conditional execution block. */
9880 gen_exception(EXCP_KERNEL_TRAP);
9881 dc->is_jmp = DISAS_UPDATE;
9882 break;
9883 }
9884#else
9ee6e8bb
PB
9885 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9886 /* We always get here via a jump, so know we are not in a
9887 conditional execution block. */
d9ba4830 9888 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
9889 dc->is_jmp = DISAS_UPDATE;
9890 break;
9ee6e8bb
PB
9891 }
9892#endif
9893
72cf2d4f
BS
9894 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9895 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 9896 if (bp->pc == dc->pc) {
bc4a0de0 9897 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
9898 /* Advance PC so that clearing the breakpoint will
9899 invalidate this TB. */
9900 dc->pc += 2;
9901 goto done_generating;
1fddef4b
FB
9902 break;
9903 }
9904 }
9905 }
2c0262af 9906 if (search_pc) {
92414b31 9907 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
9908 if (lj < j) {
9909 lj++;
9910 while (lj < j)
ab1103de 9911 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 9912 }
25983cad 9913 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 9914 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 9915 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 9916 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 9917 }
e50e6a20 9918
2e70f6ef
PB
9919 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9920 gen_io_start();
9921
fdefe51c 9922 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
9923 tcg_gen_debug_insn_start(dc->pc);
9924 }
9925
7204ab88 9926 if (dc->thumb) {
9ee6e8bb
PB
9927 disas_thumb_insn(env, dc);
9928 if (dc->condexec_mask) {
9929 dc->condexec_cond = (dc->condexec_cond & 0xe)
9930 | ((dc->condexec_mask >> 4) & 1);
9931 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9932 if (dc->condexec_mask == 0) {
9933 dc->condexec_cond = 0;
9934 }
9935 }
9936 } else {
9937 disas_arm_insn(env, dc);
9938 }
e50e6a20
FB
9939
9940 if (dc->condjmp && !dc->is_jmp) {
9941 gen_set_label(dc->condlabel);
9942 dc->condjmp = 0;
9943 }
3849902c
PM
9944
9945 if (tcg_check_temp_count()) {
9946 fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc);
9947 }
9948
aaf2d97d 9949 /* Translation stops when a conditional branch is encountered.
e50e6a20 9950 * Otherwise the subsequent code could get translated several times.
b5ff1b31 9951 * Also stop translation when a page boundary is reached. This
bf20dc07 9952 * ensures prefetch aborts occur at the right place. */
2e70f6ef 9953 num_insns ++;
efd7f486 9954 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
1fddef4b 9955 !env->singlestep_enabled &&
1b530a6d 9956 !singlestep &&
2e70f6ef
PB
9957 dc->pc < next_page_start &&
9958 num_insns < max_insns);
9959
9960 if (tb->cflags & CF_LAST_IO) {
9961 if (dc->condjmp) {
9962 /* FIXME: This can theoretically happen with self-modifying
9963 code. */
9964 cpu_abort(env, "IO on conditional branch instruction");
9965 }
9966 gen_io_end();
9967 }
9ee6e8bb 9968
b5ff1b31 9969 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
9970 instruction was a conditional branch or trap, and the PC has
9971 already been written. */
551bd27f 9972 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 9973 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 9974 if (dc->condjmp) {
9ee6e8bb
PB
9975 gen_set_condexec(dc);
9976 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 9977 gen_exception(EXCP_SWI);
9ee6e8bb 9978 } else {
d9ba4830 9979 gen_exception(EXCP_DEBUG);
9ee6e8bb 9980 }
e50e6a20
FB
9981 gen_set_label(dc->condlabel);
9982 }
9983 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 9984 gen_set_pc_im(dc->pc);
e50e6a20 9985 dc->condjmp = 0;
8aaca4c0 9986 }
9ee6e8bb
PB
9987 gen_set_condexec(dc);
9988 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 9989 gen_exception(EXCP_SWI);
9ee6e8bb
PB
9990 } else {
9991 /* FIXME: Single stepping a WFI insn will not halt
9992 the CPU. */
d9ba4830 9993 gen_exception(EXCP_DEBUG);
9ee6e8bb 9994 }
8aaca4c0 9995 } else {
9ee6e8bb
PB
9996 /* While branches must always occur at the end of an IT block,
9997 there are a few other things that can cause us to terminate
65626741 9998 the TB in the middle of an IT block:
9ee6e8bb
PB
9999 - Exception generating instructions (bkpt, swi, undefined).
10000 - Page boundaries.
10001 - Hardware watchpoints.
10002 Hardware breakpoints have already been handled and skip this code.
10003 */
10004 gen_set_condexec(dc);
8aaca4c0 10005 switch(dc->is_jmp) {
8aaca4c0 10006 case DISAS_NEXT:
6e256c93 10007 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10008 break;
10009 default:
10010 case DISAS_JUMP:
10011 case DISAS_UPDATE:
10012 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10013 tcg_gen_exit_tb(0);
8aaca4c0
FB
10014 break;
10015 case DISAS_TB_JUMP:
10016 /* nothing more to generate */
10017 break;
9ee6e8bb 10018 case DISAS_WFI:
1ce94f81 10019 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10020 break;
10021 case DISAS_SWI:
d9ba4830 10022 gen_exception(EXCP_SWI);
9ee6e8bb 10023 break;
8aaca4c0 10024 }
e50e6a20
FB
10025 if (dc->condjmp) {
10026 gen_set_label(dc->condlabel);
9ee6e8bb 10027 gen_set_condexec(dc);
6e256c93 10028 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10029 dc->condjmp = 0;
10030 }
2c0262af 10031 }
2e70f6ef 10032
9ee6e8bb 10033done_generating:
2e70f6ef 10034 gen_icount_end(tb, num_insns);
efd7f486 10035 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10036
10037#ifdef DEBUG_DISAS
8fec2b8c 10038 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10039 qemu_log("----------------\n");
10040 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10041 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10042 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10043 qemu_log("\n");
2c0262af
FB
10044 }
10045#endif
b5ff1b31 10046 if (search_pc) {
92414b31 10047 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10048 lj++;
10049 while (lj <= j)
ab1103de 10050 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10051 } else {
2c0262af 10052 tb->size = dc->pc - pc_start;
2e70f6ef 10053 tb->icount = num_insns;
b5ff1b31 10054 }
2c0262af
FB
10055}
10056
0ecb72a5 10057void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10058{
2cfc5f17 10059 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
10060}
10061
0ecb72a5 10062void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10063{
2cfc5f17 10064 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
10065}
10066
b5ff1b31
FB
10067static const char *cpu_mode_names[16] = {
10068 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10069 "???", "???", "???", "und", "???", "???", "???", "sys"
10070};
9ee6e8bb 10071
0ecb72a5 10072void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
7fe48483 10073 int flags)
2c0262af
FB
10074{
10075 int i;
b5ff1b31 10076 uint32_t psr;
2c0262af
FB
10077
10078 for(i=0;i<16;i++) {
7fe48483 10079 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10080 if ((i % 4) == 3)
7fe48483 10081 cpu_fprintf(f, "\n");
2c0262af 10082 else
7fe48483 10083 cpu_fprintf(f, " ");
2c0262af 10084 }
b5ff1b31 10085 psr = cpsr_read(env);
687fa640
TS
10086 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10087 psr,
b5ff1b31
FB
10088 psr & (1 << 31) ? 'N' : '-',
10089 psr & (1 << 30) ? 'Z' : '-',
10090 psr & (1 << 29) ? 'C' : '-',
10091 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10092 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10093 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10094
f2617cfc
PM
10095 if (flags & CPU_DUMP_FPU) {
10096 int numvfpregs = 0;
10097 if (arm_feature(env, ARM_FEATURE_VFP)) {
10098 numvfpregs += 16;
10099 }
10100 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10101 numvfpregs += 16;
10102 }
10103 for (i = 0; i < numvfpregs; i++) {
10104 uint64_t v = float64_val(env->vfp.regs[i]);
10105 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10106 i * 2, (uint32_t)v,
10107 i * 2 + 1, (uint32_t)(v >> 32),
10108 i, v);
10109 }
10110 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10111 }
2c0262af 10112}
a6b025d3 10113
0ecb72a5 10114void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10115{
25983cad 10116 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10117 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10118}