]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
target-arm: Pass DisasContext* to gen_set_pc_im()
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
76cad711 28#include "disas/disas.h"
57fec1fe 29#include "tcg-op.h"
1de7afc9 30#include "qemu/log.h"
534df156 31#include "qemu/bitops.h"
1497c961 32
7b59220e 33#include "helper.h"
1497c961 34#define GEN_HELPER 1
7b59220e 35#include "helper.h"
2c0262af 36
be5e7a76
DES
37#define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38#define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39/* currently all emulated v5 cores are also v5TE, so don't bother */
40#define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
9ee6e8bb
PB
41#define ENABLE_ARCH_5J 0
42#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
81e69fb0 46#define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
b5ff1b31 47
86753403 48#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 49
f570c61e 50#include "translate.h"
e12ce78d
PM
51static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
52
b5ff1b31
FB
53#if defined(CONFIG_USER_ONLY)
54#define IS_USER(s) 1
55#else
56#define IS_USER(s) (s->user)
57#endif
58
9ee6e8bb 59/* These instructions trap after executing, so defer them until after the
b90372ad 60 conditional execution state has been updated. */
9ee6e8bb
PB
61#define DISAS_WFI 4
62#define DISAS_SWI 5
2c0262af 63
3407ad0e 64TCGv_ptr cpu_env;
ad69471c 65/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 66static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 67static TCGv_i32 cpu_R[16];
66c374de 68static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
426f5abc
PB
69static TCGv_i32 cpu_exclusive_addr;
70static TCGv_i32 cpu_exclusive_val;
71static TCGv_i32 cpu_exclusive_high;
72#ifdef CONFIG_USER_ONLY
73static TCGv_i32 cpu_exclusive_test;
74static TCGv_i32 cpu_exclusive_info;
75#endif
ad69471c 76
b26eefb6 77/* FIXME: These should be removed. */
39d5492a 78static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 79static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 80
022c62cb 81#include "exec/gen-icount.h"
2e70f6ef 82
155c3eac
FN
83static const char *regnames[] =
84 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
85 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
86
b26eefb6
PB
87/* initialize TCG globals. */
88void arm_translate_init(void)
89{
155c3eac
FN
90 int i;
91
a7812ae4
PB
92 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93
155c3eac
FN
94 for (i = 0; i < 16; i++) {
95 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 96 offsetof(CPUARMState, regs[i]),
155c3eac
FN
97 regnames[i]);
98 }
66c374de
AJ
99 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
100 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
101 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
102 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
103
426f5abc 104 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
426f5abc 106 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 107 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 108 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 109 offsetof(CPUARMState, exclusive_high), "exclusive_high");
426f5abc
PB
110#ifdef CONFIG_USER_ONLY
111 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 112 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 113 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 114 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 115#endif
155c3eac 116
a7812ae4 117#define GEN_HELPER 2
7b59220e 118#include "helper.h"
b26eefb6
PB
119}
120
39d5492a 121static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 122{
39d5492a 123 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
124 tcg_gen_ld_i32(tmp, cpu_env, offset);
125 return tmp;
126}
127
0ecb72a5 128#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 129
39d5492a 130static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
131{
132 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 133 tcg_temp_free_i32(var);
d9ba4830
PB
134}
135
136#define store_cpu_field(var, name) \
0ecb72a5 137 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 138
b26eefb6 139/* Set a variable to the value of a CPU register. */
39d5492a 140static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
141{
142 if (reg == 15) {
143 uint32_t addr;
b90372ad 144 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
145 if (s->thumb)
146 addr = (long)s->pc + 2;
147 else
148 addr = (long)s->pc + 4;
149 tcg_gen_movi_i32(var, addr);
150 } else {
155c3eac 151 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
152 }
153}
154
155/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 156static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 157{
39d5492a 158 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
159 load_reg_var(s, tmp, reg);
160 return tmp;
161}
162
163/* Set a CPU register. The source must be a temporary and will be
164 marked as dead. */
39d5492a 165static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
166{
167 if (reg == 15) {
168 tcg_gen_andi_i32(var, var, ~1);
169 s->is_jmp = DISAS_JUMP;
170 }
155c3eac 171 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 172 tcg_temp_free_i32(var);
b26eefb6
PB
173}
174
b26eefb6 175/* Value extensions. */
86831435
PB
176#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
177#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
178#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
179#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
180
1497c961
PB
181#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
182#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 183
b26eefb6 184
39d5492a 185static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 186{
39d5492a 187 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 188 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
189 tcg_temp_free_i32(tmp_mask);
190}
d9ba4830
PB
191/* Set NZCV flags from the high 4 bits of var. */
192#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
193
194static void gen_exception(int excp)
195{
39d5492a 196 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830 197 tcg_gen_movi_i32(tmp, excp);
1ce94f81 198 gen_helper_exception(cpu_env, tmp);
7d1b0095 199 tcg_temp_free_i32(tmp);
d9ba4830
PB
200}
201
39d5492a 202static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 203{
39d5492a
PM
204 TCGv_i32 tmp1 = tcg_temp_new_i32();
205 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
206 tcg_gen_ext16s_i32(tmp1, a);
207 tcg_gen_ext16s_i32(tmp2, b);
3670669c 208 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 209 tcg_temp_free_i32(tmp2);
3670669c
PB
210 tcg_gen_sari_i32(a, a, 16);
211 tcg_gen_sari_i32(b, b, 16);
212 tcg_gen_mul_i32(b, b, a);
213 tcg_gen_mov_i32(a, tmp1);
7d1b0095 214 tcg_temp_free_i32(tmp1);
3670669c
PB
215}
216
217/* Byteswap each halfword. */
39d5492a 218static void gen_rev16(TCGv_i32 var)
3670669c 219{
39d5492a 220 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
221 tcg_gen_shri_i32(tmp, var, 8);
222 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
223 tcg_gen_shli_i32(var, var, 8);
224 tcg_gen_andi_i32(var, var, 0xff00ff00);
225 tcg_gen_or_i32(var, var, tmp);
7d1b0095 226 tcg_temp_free_i32(tmp);
3670669c
PB
227}
228
229/* Byteswap low halfword and sign extend. */
39d5492a 230static void gen_revsh(TCGv_i32 var)
3670669c 231{
1a855029
AJ
232 tcg_gen_ext16u_i32(var, var);
233 tcg_gen_bswap16_i32(var, var);
234 tcg_gen_ext16s_i32(var, var);
3670669c
PB
235}
236
237/* Unsigned bitfield extract. */
39d5492a 238static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
239{
240 if (shift)
241 tcg_gen_shri_i32(var, var, shift);
242 tcg_gen_andi_i32(var, var, mask);
243}
244
245/* Signed bitfield extract. */
39d5492a 246static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
247{
248 uint32_t signbit;
249
250 if (shift)
251 tcg_gen_sari_i32(var, var, shift);
252 if (shift + width < 32) {
253 signbit = 1u << (width - 1);
254 tcg_gen_andi_i32(var, var, (1u << width) - 1);
255 tcg_gen_xori_i32(var, var, signbit);
256 tcg_gen_subi_i32(var, var, signbit);
257 }
258}
259
838fa72d 260/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 261static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 262{
838fa72d
AJ
263 TCGv_i64 tmp64 = tcg_temp_new_i64();
264
265 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 266 tcg_temp_free_i32(b);
838fa72d
AJ
267 tcg_gen_shli_i64(tmp64, tmp64, 32);
268 tcg_gen_add_i64(a, tmp64, a);
269
270 tcg_temp_free_i64(tmp64);
271 return a;
272}
273
274/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 275static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
276{
277 TCGv_i64 tmp64 = tcg_temp_new_i64();
278
279 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 280 tcg_temp_free_i32(b);
838fa72d
AJ
281 tcg_gen_shli_i64(tmp64, tmp64, 32);
282 tcg_gen_sub_i64(a, tmp64, a);
283
284 tcg_temp_free_i64(tmp64);
285 return a;
3670669c
PB
286}
287
5e3f878a 288/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 289static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 290{
39d5492a
PM
291 TCGv_i32 lo = tcg_temp_new_i32();
292 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 293 TCGv_i64 ret;
5e3f878a 294
831d7fe8 295 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 296 tcg_temp_free_i32(a);
7d1b0095 297 tcg_temp_free_i32(b);
831d7fe8
RH
298
299 ret = tcg_temp_new_i64();
300 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
301 tcg_temp_free_i32(lo);
302 tcg_temp_free_i32(hi);
831d7fe8
RH
303
304 return ret;
5e3f878a
PB
305}
306
39d5492a 307static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 308{
39d5492a
PM
309 TCGv_i32 lo = tcg_temp_new_i32();
310 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 311 TCGv_i64 ret;
5e3f878a 312
831d7fe8 313 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 314 tcg_temp_free_i32(a);
7d1b0095 315 tcg_temp_free_i32(b);
831d7fe8
RH
316
317 ret = tcg_temp_new_i64();
318 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
319 tcg_temp_free_i32(lo);
320 tcg_temp_free_i32(hi);
831d7fe8
RH
321
322 return ret;
5e3f878a
PB
323}
324
8f01245e 325/* Swap low and high halfwords. */
39d5492a 326static void gen_swap_half(TCGv_i32 var)
8f01245e 327{
39d5492a 328 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
329 tcg_gen_shri_i32(tmp, var, 16);
330 tcg_gen_shli_i32(var, var, 16);
331 tcg_gen_or_i32(var, var, tmp);
7d1b0095 332 tcg_temp_free_i32(tmp);
8f01245e
PB
333}
334
b26eefb6
PB
335/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
336 tmp = (t0 ^ t1) & 0x8000;
337 t0 &= ~0x8000;
338 t1 &= ~0x8000;
339 t0 = (t0 + t1) ^ tmp;
340 */
341
39d5492a 342static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 343{
39d5492a 344 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
345 tcg_gen_xor_i32(tmp, t0, t1);
346 tcg_gen_andi_i32(tmp, tmp, 0x8000);
347 tcg_gen_andi_i32(t0, t0, ~0x8000);
348 tcg_gen_andi_i32(t1, t1, ~0x8000);
349 tcg_gen_add_i32(t0, t0, t1);
350 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
351 tcg_temp_free_i32(tmp);
352 tcg_temp_free_i32(t1);
b26eefb6
PB
353}
354
355/* Set CF to the top bit of var. */
39d5492a 356static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 357{
66c374de 358 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
359}
360
361/* Set N and Z flags from var. */
39d5492a 362static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 363{
66c374de
AJ
364 tcg_gen_mov_i32(cpu_NF, var);
365 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
366}
367
368/* T0 += T1 + CF. */
39d5492a 369static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 370{
396e467c 371 tcg_gen_add_i32(t0, t0, t1);
66c374de 372 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
373}
374
e9bb4aa9 375/* dest = T0 + T1 + CF. */
39d5492a 376static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 377{
e9bb4aa9 378 tcg_gen_add_i32(dest, t0, t1);
66c374de 379 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
380}
381
3670669c 382/* dest = T0 - T1 + CF - 1. */
39d5492a 383static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 384{
3670669c 385 tcg_gen_sub_i32(dest, t0, t1);
66c374de 386 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 387 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
388}
389
72485ec4 390/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 391static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 392{
39d5492a 393 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
394 tcg_gen_movi_i32(tmp, 0);
395 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 396 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 397 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
398 tcg_gen_xor_i32(tmp, t0, t1);
399 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
400 tcg_temp_free_i32(tmp);
401 tcg_gen_mov_i32(dest, cpu_NF);
402}
403
49b4c31e 404/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 405static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 406{
39d5492a 407 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
408 if (TCG_TARGET_HAS_add2_i32) {
409 tcg_gen_movi_i32(tmp, 0);
410 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 411 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
412 } else {
413 TCGv_i64 q0 = tcg_temp_new_i64();
414 TCGv_i64 q1 = tcg_temp_new_i64();
415 tcg_gen_extu_i32_i64(q0, t0);
416 tcg_gen_extu_i32_i64(q1, t1);
417 tcg_gen_add_i64(q0, q0, q1);
418 tcg_gen_extu_i32_i64(q1, cpu_CF);
419 tcg_gen_add_i64(q0, q0, q1);
420 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
421 tcg_temp_free_i64(q0);
422 tcg_temp_free_i64(q1);
423 }
424 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
425 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
426 tcg_gen_xor_i32(tmp, t0, t1);
427 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
428 tcg_temp_free_i32(tmp);
429 tcg_gen_mov_i32(dest, cpu_NF);
430}
431
72485ec4 432/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 433static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 434{
39d5492a 435 TCGv_i32 tmp;
72485ec4
AJ
436 tcg_gen_sub_i32(cpu_NF, t0, t1);
437 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
438 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
439 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
440 tmp = tcg_temp_new_i32();
441 tcg_gen_xor_i32(tmp, t0, t1);
442 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
443 tcg_temp_free_i32(tmp);
444 tcg_gen_mov_i32(dest, cpu_NF);
445}
446
e77f0832 447/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 448static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 449{
39d5492a 450 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
451 tcg_gen_not_i32(tmp, t1);
452 gen_adc_CC(dest, t0, tmp);
39d5492a 453 tcg_temp_free_i32(tmp);
2de68a49
RH
454}
455
365af80e 456#define GEN_SHIFT(name) \
39d5492a 457static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 458{ \
39d5492a 459 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
460 tmp1 = tcg_temp_new_i32(); \
461 tcg_gen_andi_i32(tmp1, t1, 0xff); \
462 tmp2 = tcg_const_i32(0); \
463 tmp3 = tcg_const_i32(0x1f); \
464 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
465 tcg_temp_free_i32(tmp3); \
466 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
467 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
468 tcg_temp_free_i32(tmp2); \
469 tcg_temp_free_i32(tmp1); \
470}
471GEN_SHIFT(shl)
472GEN_SHIFT(shr)
473#undef GEN_SHIFT
474
39d5492a 475static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 476{
39d5492a 477 TCGv_i32 tmp1, tmp2;
365af80e
AJ
478 tmp1 = tcg_temp_new_i32();
479 tcg_gen_andi_i32(tmp1, t1, 0xff);
480 tmp2 = tcg_const_i32(0x1f);
481 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
482 tcg_temp_free_i32(tmp2);
483 tcg_gen_sar_i32(dest, t0, tmp1);
484 tcg_temp_free_i32(tmp1);
485}
486
39d5492a 487static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 488{
39d5492a
PM
489 TCGv_i32 c0 = tcg_const_i32(0);
490 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
491 tcg_gen_neg_i32(tmp, src);
492 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
493 tcg_temp_free_i32(c0);
494 tcg_temp_free_i32(tmp);
495}
ad69471c 496
39d5492a 497static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 498{
9a119ff6 499 if (shift == 0) {
66c374de 500 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 501 } else {
66c374de
AJ
502 tcg_gen_shri_i32(cpu_CF, var, shift);
503 if (shift != 31) {
504 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
505 }
9a119ff6 506 }
9a119ff6 507}
b26eefb6 508
9a119ff6 509/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
510static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
511 int shift, int flags)
9a119ff6
PB
512{
513 switch (shiftop) {
514 case 0: /* LSL */
515 if (shift != 0) {
516 if (flags)
517 shifter_out_im(var, 32 - shift);
518 tcg_gen_shli_i32(var, var, shift);
519 }
520 break;
521 case 1: /* LSR */
522 if (shift == 0) {
523 if (flags) {
66c374de 524 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
525 }
526 tcg_gen_movi_i32(var, 0);
527 } else {
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 tcg_gen_shri_i32(var, var, shift);
531 }
532 break;
533 case 2: /* ASR */
534 if (shift == 0)
535 shift = 32;
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 if (shift == 32)
539 shift = 31;
540 tcg_gen_sari_i32(var, var, shift);
541 break;
542 case 3: /* ROR/RRX */
543 if (shift != 0) {
544 if (flags)
545 shifter_out_im(var, shift - 1);
f669df27 546 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 547 } else {
39d5492a 548 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 549 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
550 if (flags)
551 shifter_out_im(var, 0);
552 tcg_gen_shri_i32(var, var, 1);
b26eefb6 553 tcg_gen_or_i32(var, var, tmp);
7d1b0095 554 tcg_temp_free_i32(tmp);
b26eefb6
PB
555 }
556 }
557};
558
39d5492a
PM
559static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
560 TCGv_i32 shift, int flags)
8984bd2e
PB
561{
562 if (flags) {
563 switch (shiftop) {
9ef39277
BS
564 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
565 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
566 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
567 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
568 }
569 } else {
570 switch (shiftop) {
365af80e
AJ
571 case 0:
572 gen_shl(var, var, shift);
573 break;
574 case 1:
575 gen_shr(var, var, shift);
576 break;
577 case 2:
578 gen_sar(var, var, shift);
579 break;
f669df27
AJ
580 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
581 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
582 }
583 }
7d1b0095 584 tcg_temp_free_i32(shift);
8984bd2e
PB
585}
586
6ddbc6e4
PB
587#define PAS_OP(pfx) \
588 switch (op2) { \
589 case 0: gen_pas_helper(glue(pfx,add16)); break; \
590 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
591 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
592 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
593 case 4: gen_pas_helper(glue(pfx,add8)); break; \
594 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
595 }
39d5492a 596static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 597{
a7812ae4 598 TCGv_ptr tmp;
6ddbc6e4
PB
599
600 switch (op1) {
601#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
602 case 1:
a7812ae4 603 tmp = tcg_temp_new_ptr();
0ecb72a5 604 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 605 PAS_OP(s)
b75263d6 606 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
607 break;
608 case 5:
a7812ae4 609 tmp = tcg_temp_new_ptr();
0ecb72a5 610 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 611 PAS_OP(u)
b75263d6 612 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
613 break;
614#undef gen_pas_helper
615#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
616 case 2:
617 PAS_OP(q);
618 break;
619 case 3:
620 PAS_OP(sh);
621 break;
622 case 6:
623 PAS_OP(uq);
624 break;
625 case 7:
626 PAS_OP(uh);
627 break;
628#undef gen_pas_helper
629 }
630}
9ee6e8bb
PB
631#undef PAS_OP
632
6ddbc6e4
PB
633/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
634#define PAS_OP(pfx) \
ed89a2f1 635 switch (op1) { \
6ddbc6e4
PB
636 case 0: gen_pas_helper(glue(pfx,add8)); break; \
637 case 1: gen_pas_helper(glue(pfx,add16)); break; \
638 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
639 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
640 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
641 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
642 }
39d5492a 643static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 644{
a7812ae4 645 TCGv_ptr tmp;
6ddbc6e4 646
ed89a2f1 647 switch (op2) {
6ddbc6e4
PB
648#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
649 case 0:
a7812ae4 650 tmp = tcg_temp_new_ptr();
0ecb72a5 651 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 652 PAS_OP(s)
b75263d6 653 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
654 break;
655 case 4:
a7812ae4 656 tmp = tcg_temp_new_ptr();
0ecb72a5 657 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 658 PAS_OP(u)
b75263d6 659 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
660 break;
661#undef gen_pas_helper
662#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
663 case 1:
664 PAS_OP(q);
665 break;
666 case 2:
667 PAS_OP(sh);
668 break;
669 case 5:
670 PAS_OP(uq);
671 break;
672 case 6:
673 PAS_OP(uh);
674 break;
675#undef gen_pas_helper
676 }
677}
9ee6e8bb
PB
678#undef PAS_OP
679
d9ba4830
PB
680static void gen_test_cc(int cc, int label)
681{
39d5492a 682 TCGv_i32 tmp;
d9ba4830
PB
683 int inv;
684
d9ba4830
PB
685 switch (cc) {
686 case 0: /* eq: Z */
66c374de 687 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
688 break;
689 case 1: /* ne: !Z */
66c374de 690 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
691 break;
692 case 2: /* cs: C */
66c374de 693 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
694 break;
695 case 3: /* cc: !C */
66c374de 696 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
697 break;
698 case 4: /* mi: N */
66c374de 699 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
700 break;
701 case 5: /* pl: !N */
66c374de 702 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
703 break;
704 case 6: /* vs: V */
66c374de 705 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
706 break;
707 case 7: /* vc: !V */
66c374de 708 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
709 break;
710 case 8: /* hi: C && !Z */
711 inv = gen_new_label();
66c374de
AJ
712 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
713 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
714 gen_set_label(inv);
715 break;
716 case 9: /* ls: !C || Z */
66c374de
AJ
717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
718 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
719 break;
720 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
721 tmp = tcg_temp_new_i32();
722 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 723 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 724 tcg_temp_free_i32(tmp);
d9ba4830
PB
725 break;
726 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
727 tmp = tcg_temp_new_i32();
728 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 729 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 730 tcg_temp_free_i32(tmp);
d9ba4830
PB
731 break;
732 case 12: /* gt: !Z && N == V */
733 inv = gen_new_label();
66c374de
AJ
734 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
735 tmp = tcg_temp_new_i32();
736 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 737 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 738 tcg_temp_free_i32(tmp);
d9ba4830
PB
739 gen_set_label(inv);
740 break;
741 case 13: /* le: Z || N != V */
66c374de
AJ
742 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
743 tmp = tcg_temp_new_i32();
744 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 745 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 746 tcg_temp_free_i32(tmp);
d9ba4830
PB
747 break;
748 default:
749 fprintf(stderr, "Bad condition code 0x%x\n", cc);
750 abort();
751 }
d9ba4830 752}
2c0262af 753
b1d8e52e 754static const uint8_t table_logic_cc[16] = {
2c0262af
FB
755 1, /* and */
756 1, /* xor */
757 0, /* sub */
758 0, /* rsb */
759 0, /* add */
760 0, /* adc */
761 0, /* sbc */
762 0, /* rsc */
763 1, /* andl */
764 1, /* xorl */
765 0, /* cmp */
766 0, /* cmn */
767 1, /* orr */
768 1, /* mov */
769 1, /* bic */
770 1, /* mvn */
771};
3b46e624 772
d9ba4830
PB
773/* Set PC and Thumb state from an immediate address. */
774static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 775{
39d5492a 776 TCGv_i32 tmp;
99c475ab 777
b26eefb6 778 s->is_jmp = DISAS_UPDATE;
d9ba4830 779 if (s->thumb != (addr & 1)) {
7d1b0095 780 tmp = tcg_temp_new_i32();
d9ba4830 781 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 782 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 783 tcg_temp_free_i32(tmp);
d9ba4830 784 }
155c3eac 785 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
786}
787
788/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 789static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 790{
d9ba4830 791 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
792 tcg_gen_andi_i32(cpu_R[15], var, ~1);
793 tcg_gen_andi_i32(var, var, 1);
794 store_cpu_field(var, thumb);
d9ba4830
PB
795}
796
21aeb343
JR
797/* Variant of store_reg which uses branch&exchange logic when storing
798 to r15 in ARM architecture v7 and above. The source must be a temporary
799 and will be marked as dead. */
0ecb72a5 800static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
39d5492a 801 int reg, TCGv_i32 var)
21aeb343
JR
802{
803 if (reg == 15 && ENABLE_ARCH_7) {
804 gen_bx(s, var);
805 } else {
806 store_reg(s, reg, var);
807 }
808}
809
be5e7a76
DES
810/* Variant of store_reg which uses branch&exchange logic when storing
811 * to r15 in ARM architecture v5T and above. This is used for storing
812 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
813 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
0ecb72a5 814static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
39d5492a 815 int reg, TCGv_i32 var)
be5e7a76
DES
816{
817 if (reg == 15 && ENABLE_ARCH_5) {
818 gen_bx(s, var);
819 } else {
820 store_reg(s, reg, var);
821 }
822}
823
08307563
PM
824/* Abstractions of "generate code to do a guest load/store for
825 * AArch32", where a vaddr is always 32 bits (and is zero
826 * extended if we're a 64 bit core) and data is also
827 * 32 bits unless specifically doing a 64 bit access.
828 * These functions work like tcg_gen_qemu_{ld,st}* except
829 * that their arguments are TCGv_i32 rather than TCGv.
830 */
831#if TARGET_LONG_BITS == 32
832
833#define DO_GEN_LD(OP) \
834static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
835{ \
836 tcg_gen_qemu_##OP(val, addr, index); \
837}
838
839#define DO_GEN_ST(OP) \
840static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
841{ \
842 tcg_gen_qemu_##OP(val, addr, index); \
843}
844
845static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
846{
847 tcg_gen_qemu_ld64(val, addr, index);
848}
849
850static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
851{
852 tcg_gen_qemu_st64(val, addr, index);
853}
854
855#else
856
857#define DO_GEN_LD(OP) \
858static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
859{ \
860 TCGv addr64 = tcg_temp_new(); \
861 TCGv val64 = tcg_temp_new(); \
862 tcg_gen_extu_i32_i64(addr64, addr); \
863 tcg_gen_qemu_##OP(val64, addr64, index); \
864 tcg_temp_free(addr64); \
865 tcg_gen_trunc_i64_i32(val, val64); \
866 tcg_temp_free(val64); \
867}
868
869#define DO_GEN_ST(OP) \
870static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
871{ \
872 TCGv addr64 = tcg_temp_new(); \
873 TCGv val64 = tcg_temp_new(); \
874 tcg_gen_extu_i32_i64(addr64, addr); \
875 tcg_gen_extu_i32_i64(val64, val); \
876 tcg_gen_qemu_##OP(val64, addr64, index); \
877 tcg_temp_free(addr64); \
878 tcg_temp_free(val64); \
879}
880
881static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
882{
883 TCGv addr64 = tcg_temp_new();
884 tcg_gen_extu_i32_i64(addr64, addr);
885 tcg_gen_qemu_ld64(val, addr64, index);
886 tcg_temp_free(addr64);
887}
888
889static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
890{
891 TCGv addr64 = tcg_temp_new();
892 tcg_gen_extu_i32_i64(addr64, addr);
893 tcg_gen_qemu_st64(val, addr64, index);
894 tcg_temp_free(addr64);
895}
896
897#endif
898
899DO_GEN_LD(ld8s)
900DO_GEN_LD(ld8u)
901DO_GEN_LD(ld16s)
902DO_GEN_LD(ld16u)
903DO_GEN_LD(ld32u)
904DO_GEN_ST(st8)
905DO_GEN_ST(st16)
906DO_GEN_ST(st32)
907
eaed129d 908static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 909{
155c3eac 910 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
911}
912
b5ff1b31
FB
913/* Force a TB lookup after an instruction that changes the CPU state. */
914static inline void gen_lookup_tb(DisasContext *s)
915{
a6445c52 916 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
917 s->is_jmp = DISAS_UPDATE;
918}
919
b0109805 920static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 921 TCGv_i32 var)
2c0262af 922{
1e8d4eec 923 int val, rm, shift, shiftop;
39d5492a 924 TCGv_i32 offset;
2c0262af
FB
925
926 if (!(insn & (1 << 25))) {
927 /* immediate */
928 val = insn & 0xfff;
929 if (!(insn & (1 << 23)))
930 val = -val;
537730b9 931 if (val != 0)
b0109805 932 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
933 } else {
934 /* shift/register */
935 rm = (insn) & 0xf;
936 shift = (insn >> 7) & 0x1f;
1e8d4eec 937 shiftop = (insn >> 5) & 3;
b26eefb6 938 offset = load_reg(s, rm);
9a119ff6 939 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 940 if (!(insn & (1 << 23)))
b0109805 941 tcg_gen_sub_i32(var, var, offset);
2c0262af 942 else
b0109805 943 tcg_gen_add_i32(var, var, offset);
7d1b0095 944 tcg_temp_free_i32(offset);
2c0262af
FB
945 }
946}
947
191f9a93 948static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 949 int extra, TCGv_i32 var)
2c0262af
FB
950{
951 int val, rm;
39d5492a 952 TCGv_i32 offset;
3b46e624 953
2c0262af
FB
954 if (insn & (1 << 22)) {
955 /* immediate */
956 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
957 if (!(insn & (1 << 23)))
958 val = -val;
18acad92 959 val += extra;
537730b9 960 if (val != 0)
b0109805 961 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
962 } else {
963 /* register */
191f9a93 964 if (extra)
b0109805 965 tcg_gen_addi_i32(var, var, extra);
2c0262af 966 rm = (insn) & 0xf;
b26eefb6 967 offset = load_reg(s, rm);
2c0262af 968 if (!(insn & (1 << 23)))
b0109805 969 tcg_gen_sub_i32(var, var, offset);
2c0262af 970 else
b0109805 971 tcg_gen_add_i32(var, var, offset);
7d1b0095 972 tcg_temp_free_i32(offset);
2c0262af
FB
973 }
974}
975
5aaebd13
PM
976static TCGv_ptr get_fpstatus_ptr(int neon)
977{
978 TCGv_ptr statusptr = tcg_temp_new_ptr();
979 int offset;
980 if (neon) {
0ecb72a5 981 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 982 } else {
0ecb72a5 983 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
984 }
985 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
986 return statusptr;
987}
988
4373f3ce
PB
989#define VFP_OP2(name) \
990static inline void gen_vfp_##name(int dp) \
991{ \
ae1857ec
PM
992 TCGv_ptr fpst = get_fpstatus_ptr(0); \
993 if (dp) { \
994 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
995 } else { \
996 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
997 } \
998 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
999}
1000
4373f3ce
PB
1001VFP_OP2(add)
1002VFP_OP2(sub)
1003VFP_OP2(mul)
1004VFP_OP2(div)
1005
1006#undef VFP_OP2
1007
605a6aed
PM
1008static inline void gen_vfp_F1_mul(int dp)
1009{
1010 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1011 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1012 if (dp) {
ae1857ec 1013 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1014 } else {
ae1857ec 1015 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1016 }
ae1857ec 1017 tcg_temp_free_ptr(fpst);
605a6aed
PM
1018}
1019
1020static inline void gen_vfp_F1_neg(int dp)
1021{
1022 /* Like gen_vfp_neg() but put result in F1 */
1023 if (dp) {
1024 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1025 } else {
1026 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1027 }
1028}
1029
4373f3ce
PB
1030static inline void gen_vfp_abs(int dp)
1031{
1032 if (dp)
1033 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1034 else
1035 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1036}
1037
1038static inline void gen_vfp_neg(int dp)
1039{
1040 if (dp)
1041 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1042 else
1043 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1044}
1045
1046static inline void gen_vfp_sqrt(int dp)
1047{
1048 if (dp)
1049 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1050 else
1051 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1052}
1053
1054static inline void gen_vfp_cmp(int dp)
1055{
1056 if (dp)
1057 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1058 else
1059 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1060}
1061
1062static inline void gen_vfp_cmpe(int dp)
1063{
1064 if (dp)
1065 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1066 else
1067 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1068}
1069
1070static inline void gen_vfp_F1_ld0(int dp)
1071{
1072 if (dp)
5b340b51 1073 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1074 else
5b340b51 1075 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1076}
1077
5500b06c
PM
1078#define VFP_GEN_ITOF(name) \
1079static inline void gen_vfp_##name(int dp, int neon) \
1080{ \
5aaebd13 1081 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1082 if (dp) { \
1083 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1084 } else { \
1085 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1086 } \
b7fa9214 1087 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1088}
1089
5500b06c
PM
1090VFP_GEN_ITOF(uito)
1091VFP_GEN_ITOF(sito)
1092#undef VFP_GEN_ITOF
4373f3ce 1093
5500b06c
PM
1094#define VFP_GEN_FTOI(name) \
1095static inline void gen_vfp_##name(int dp, int neon) \
1096{ \
5aaebd13 1097 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1098 if (dp) { \
1099 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1100 } else { \
1101 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1102 } \
b7fa9214 1103 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1104}
1105
5500b06c
PM
1106VFP_GEN_FTOI(toui)
1107VFP_GEN_FTOI(touiz)
1108VFP_GEN_FTOI(tosi)
1109VFP_GEN_FTOI(tosiz)
1110#undef VFP_GEN_FTOI
4373f3ce
PB
1111
1112#define VFP_GEN_FIX(name) \
5500b06c 1113static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1114{ \
39d5492a 1115 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1116 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1117 if (dp) { \
1118 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1119 } else { \
1120 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1121 } \
b75263d6 1122 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1123 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1124}
4373f3ce
PB
1125VFP_GEN_FIX(tosh)
1126VFP_GEN_FIX(tosl)
1127VFP_GEN_FIX(touh)
1128VFP_GEN_FIX(toul)
1129VFP_GEN_FIX(shto)
1130VFP_GEN_FIX(slto)
1131VFP_GEN_FIX(uhto)
1132VFP_GEN_FIX(ulto)
1133#undef VFP_GEN_FIX
9ee6e8bb 1134
39d5492a 1135static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1136{
08307563
PM
1137 if (dp) {
1138 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1139 } else {
1140 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1141 }
b5ff1b31
FB
1142}
1143
39d5492a 1144static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1145{
08307563
PM
1146 if (dp) {
1147 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1148 } else {
1149 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1150 }
b5ff1b31
FB
1151}
1152
8e96005d
FB
1153static inline long
1154vfp_reg_offset (int dp, int reg)
1155{
1156 if (dp)
1157 return offsetof(CPUARMState, vfp.regs[reg]);
1158 else if (reg & 1) {
1159 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1160 + offsetof(CPU_DoubleU, l.upper);
1161 } else {
1162 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1163 + offsetof(CPU_DoubleU, l.lower);
1164 }
1165}
9ee6e8bb
PB
1166
1167/* Return the offset of a 32-bit piece of a NEON register.
1168 zero is the least significant end of the register. */
1169static inline long
1170neon_reg_offset (int reg, int n)
1171{
1172 int sreg;
1173 sreg = reg * 2 + n;
1174 return vfp_reg_offset(0, sreg);
1175}
1176
39d5492a 1177static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1178{
39d5492a 1179 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1180 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1181 return tmp;
1182}
1183
39d5492a 1184static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1185{
1186 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1187 tcg_temp_free_i32(var);
8f8e3aa4
PB
1188}
1189
a7812ae4 1190static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1191{
1192 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1193}
1194
a7812ae4 1195static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1196{
1197 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1198}
1199
4373f3ce
PB
1200#define tcg_gen_ld_f32 tcg_gen_ld_i32
1201#define tcg_gen_ld_f64 tcg_gen_ld_i64
1202#define tcg_gen_st_f32 tcg_gen_st_i32
1203#define tcg_gen_st_f64 tcg_gen_st_i64
1204
b7bcbe95
FB
1205static inline void gen_mov_F0_vreg(int dp, int reg)
1206{
1207 if (dp)
4373f3ce 1208 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1209 else
4373f3ce 1210 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1211}
1212
1213static inline void gen_mov_F1_vreg(int dp, int reg)
1214{
1215 if (dp)
4373f3ce 1216 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1217 else
4373f3ce 1218 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1219}
1220
1221static inline void gen_mov_vreg_F0(int dp, int reg)
1222{
1223 if (dp)
4373f3ce 1224 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1225 else
4373f3ce 1226 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1227}
1228
18c9b560
AZ
1229#define ARM_CP_RW_BIT (1 << 20)
1230
a7812ae4 1231static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1232{
0ecb72a5 1233 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1234}
1235
a7812ae4 1236static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1237{
0ecb72a5 1238 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1239}
1240
39d5492a 1241static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1242{
39d5492a 1243 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1244 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1245 return var;
e677137d
PB
1246}
1247
39d5492a 1248static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1249{
0ecb72a5 1250 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1251 tcg_temp_free_i32(var);
e677137d
PB
1252}
1253
1254static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1255{
1256 iwmmxt_store_reg(cpu_M0, rn);
1257}
1258
1259static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1260{
1261 iwmmxt_load_reg(cpu_M0, rn);
1262}
1263
1264static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1265{
1266 iwmmxt_load_reg(cpu_V1, rn);
1267 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1268}
1269
1270static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1271{
1272 iwmmxt_load_reg(cpu_V1, rn);
1273 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1274}
1275
1276static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1277{
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1280}
1281
1282#define IWMMXT_OP(name) \
1283static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1284{ \
1285 iwmmxt_load_reg(cpu_V1, rn); \
1286 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1287}
1288
477955bd
PM
1289#define IWMMXT_OP_ENV(name) \
1290static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1291{ \
1292 iwmmxt_load_reg(cpu_V1, rn); \
1293 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1294}
1295
1296#define IWMMXT_OP_ENV_SIZE(name) \
1297IWMMXT_OP_ENV(name##b) \
1298IWMMXT_OP_ENV(name##w) \
1299IWMMXT_OP_ENV(name##l)
e677137d 1300
477955bd 1301#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1302static inline void gen_op_iwmmxt_##name##_M0(void) \
1303{ \
477955bd 1304 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1305}
1306
1307IWMMXT_OP(maddsq)
1308IWMMXT_OP(madduq)
1309IWMMXT_OP(sadb)
1310IWMMXT_OP(sadw)
1311IWMMXT_OP(mulslw)
1312IWMMXT_OP(mulshw)
1313IWMMXT_OP(mululw)
1314IWMMXT_OP(muluhw)
1315IWMMXT_OP(macsw)
1316IWMMXT_OP(macuw)
1317
477955bd
PM
1318IWMMXT_OP_ENV_SIZE(unpackl)
1319IWMMXT_OP_ENV_SIZE(unpackh)
1320
1321IWMMXT_OP_ENV1(unpacklub)
1322IWMMXT_OP_ENV1(unpackluw)
1323IWMMXT_OP_ENV1(unpacklul)
1324IWMMXT_OP_ENV1(unpackhub)
1325IWMMXT_OP_ENV1(unpackhuw)
1326IWMMXT_OP_ENV1(unpackhul)
1327IWMMXT_OP_ENV1(unpacklsb)
1328IWMMXT_OP_ENV1(unpacklsw)
1329IWMMXT_OP_ENV1(unpacklsl)
1330IWMMXT_OP_ENV1(unpackhsb)
1331IWMMXT_OP_ENV1(unpackhsw)
1332IWMMXT_OP_ENV1(unpackhsl)
1333
1334IWMMXT_OP_ENV_SIZE(cmpeq)
1335IWMMXT_OP_ENV_SIZE(cmpgtu)
1336IWMMXT_OP_ENV_SIZE(cmpgts)
1337
1338IWMMXT_OP_ENV_SIZE(mins)
1339IWMMXT_OP_ENV_SIZE(minu)
1340IWMMXT_OP_ENV_SIZE(maxs)
1341IWMMXT_OP_ENV_SIZE(maxu)
1342
1343IWMMXT_OP_ENV_SIZE(subn)
1344IWMMXT_OP_ENV_SIZE(addn)
1345IWMMXT_OP_ENV_SIZE(subu)
1346IWMMXT_OP_ENV_SIZE(addu)
1347IWMMXT_OP_ENV_SIZE(subs)
1348IWMMXT_OP_ENV_SIZE(adds)
1349
1350IWMMXT_OP_ENV(avgb0)
1351IWMMXT_OP_ENV(avgb1)
1352IWMMXT_OP_ENV(avgw0)
1353IWMMXT_OP_ENV(avgw1)
e677137d
PB
1354
1355IWMMXT_OP(msadb)
1356
477955bd
PM
1357IWMMXT_OP_ENV(packuw)
1358IWMMXT_OP_ENV(packul)
1359IWMMXT_OP_ENV(packuq)
1360IWMMXT_OP_ENV(packsw)
1361IWMMXT_OP_ENV(packsl)
1362IWMMXT_OP_ENV(packsq)
e677137d 1363
e677137d
PB
1364static void gen_op_iwmmxt_set_mup(void)
1365{
39d5492a 1366 TCGv_i32 tmp;
e677137d
PB
1367 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1368 tcg_gen_ori_i32(tmp, tmp, 2);
1369 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1370}
1371
1372static void gen_op_iwmmxt_set_cup(void)
1373{
39d5492a 1374 TCGv_i32 tmp;
e677137d
PB
1375 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1376 tcg_gen_ori_i32(tmp, tmp, 1);
1377 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1378}
1379
1380static void gen_op_iwmmxt_setpsr_nz(void)
1381{
39d5492a 1382 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1383 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1384 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1385}
1386
1387static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1388{
1389 iwmmxt_load_reg(cpu_V1, rn);
86831435 1390 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1391 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1392}
1393
39d5492a
PM
1394static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1395 TCGv_i32 dest)
18c9b560
AZ
1396{
1397 int rd;
1398 uint32_t offset;
39d5492a 1399 TCGv_i32 tmp;
18c9b560
AZ
1400
1401 rd = (insn >> 16) & 0xf;
da6b5335 1402 tmp = load_reg(s, rd);
18c9b560
AZ
1403
1404 offset = (insn & 0xff) << ((insn >> 7) & 2);
1405 if (insn & (1 << 24)) {
1406 /* Pre indexed */
1407 if (insn & (1 << 23))
da6b5335 1408 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1409 else
da6b5335
FN
1410 tcg_gen_addi_i32(tmp, tmp, -offset);
1411 tcg_gen_mov_i32(dest, tmp);
18c9b560 1412 if (insn & (1 << 21))
da6b5335
FN
1413 store_reg(s, rd, tmp);
1414 else
7d1b0095 1415 tcg_temp_free_i32(tmp);
18c9b560
AZ
1416 } else if (insn & (1 << 21)) {
1417 /* Post indexed */
da6b5335 1418 tcg_gen_mov_i32(dest, tmp);
18c9b560 1419 if (insn & (1 << 23))
da6b5335 1420 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1421 else
da6b5335
FN
1422 tcg_gen_addi_i32(tmp, tmp, -offset);
1423 store_reg(s, rd, tmp);
18c9b560
AZ
1424 } else if (!(insn & (1 << 23)))
1425 return 1;
1426 return 0;
1427}
1428
39d5492a 1429static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1430{
1431 int rd = (insn >> 0) & 0xf;
39d5492a 1432 TCGv_i32 tmp;
18c9b560 1433
da6b5335
FN
1434 if (insn & (1 << 8)) {
1435 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1436 return 1;
da6b5335
FN
1437 } else {
1438 tmp = iwmmxt_load_creg(rd);
1439 }
1440 } else {
7d1b0095 1441 tmp = tcg_temp_new_i32();
da6b5335
FN
1442 iwmmxt_load_reg(cpu_V0, rd);
1443 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1444 }
1445 tcg_gen_andi_i32(tmp, tmp, mask);
1446 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1447 tcg_temp_free_i32(tmp);
18c9b560
AZ
1448 return 0;
1449}
1450
a1c7273b 1451/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1452 (ie. an undefined instruction). */
0ecb72a5 1453static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
1454{
1455 int rd, wrd;
1456 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1457 TCGv_i32 addr;
1458 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1459
1460 if ((insn & 0x0e000e00) == 0x0c000000) {
1461 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1462 wrd = insn & 0xf;
1463 rdlo = (insn >> 12) & 0xf;
1464 rdhi = (insn >> 16) & 0xf;
1465 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1466 iwmmxt_load_reg(cpu_V0, wrd);
1467 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1468 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1469 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1470 } else { /* TMCRR */
da6b5335
FN
1471 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1472 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1473 gen_op_iwmmxt_set_mup();
1474 }
1475 return 0;
1476 }
1477
1478 wrd = (insn >> 12) & 0xf;
7d1b0095 1479 addr = tcg_temp_new_i32();
da6b5335 1480 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1481 tcg_temp_free_i32(addr);
18c9b560 1482 return 1;
da6b5335 1483 }
18c9b560
AZ
1484 if (insn & ARM_CP_RW_BIT) {
1485 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1486 tmp = tcg_temp_new_i32();
08307563 1487 gen_aa32_ld32u(tmp, addr, IS_USER(s));
da6b5335 1488 iwmmxt_store_creg(wrd, tmp);
18c9b560 1489 } else {
e677137d
PB
1490 i = 1;
1491 if (insn & (1 << 8)) {
1492 if (insn & (1 << 22)) { /* WLDRD */
08307563 1493 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1494 i = 0;
1495 } else { /* WLDRW wRd */
29531141 1496 tmp = tcg_temp_new_i32();
08307563 1497 gen_aa32_ld32u(tmp, addr, IS_USER(s));
e677137d
PB
1498 }
1499 } else {
29531141 1500 tmp = tcg_temp_new_i32();
e677137d 1501 if (insn & (1 << 22)) { /* WLDRH */
08307563 1502 gen_aa32_ld16u(tmp, addr, IS_USER(s));
e677137d 1503 } else { /* WLDRB */
08307563 1504 gen_aa32_ld8u(tmp, addr, IS_USER(s));
e677137d
PB
1505 }
1506 }
1507 if (i) {
1508 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1509 tcg_temp_free_i32(tmp);
e677137d 1510 }
18c9b560
AZ
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 }
1513 } else {
1514 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1515 tmp = iwmmxt_load_creg(wrd);
08307563 1516 gen_aa32_st32(tmp, addr, IS_USER(s));
18c9b560
AZ
1517 } else {
1518 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1519 tmp = tcg_temp_new_i32();
e677137d
PB
1520 if (insn & (1 << 8)) {
1521 if (insn & (1 << 22)) { /* WSTRD */
08307563 1522 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
e677137d
PB
1523 } else { /* WSTRW wRd */
1524 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1525 gen_aa32_st32(tmp, addr, IS_USER(s));
e677137d
PB
1526 }
1527 } else {
1528 if (insn & (1 << 22)) { /* WSTRH */
1529 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1530 gen_aa32_st16(tmp, addr, IS_USER(s));
e677137d
PB
1531 } else { /* WSTRB */
1532 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
08307563 1533 gen_aa32_st8(tmp, addr, IS_USER(s));
e677137d
PB
1534 }
1535 }
18c9b560 1536 }
29531141 1537 tcg_temp_free_i32(tmp);
18c9b560 1538 }
7d1b0095 1539 tcg_temp_free_i32(addr);
18c9b560
AZ
1540 return 0;
1541 }
1542
1543 if ((insn & 0x0f000000) != 0x0e000000)
1544 return 1;
1545
1546 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1547 case 0x000: /* WOR */
1548 wrd = (insn >> 12) & 0xf;
1549 rd0 = (insn >> 0) & 0xf;
1550 rd1 = (insn >> 16) & 0xf;
1551 gen_op_iwmmxt_movq_M0_wRn(rd0);
1552 gen_op_iwmmxt_orq_M0_wRn(rd1);
1553 gen_op_iwmmxt_setpsr_nz();
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 gen_op_iwmmxt_set_cup();
1557 break;
1558 case 0x011: /* TMCR */
1559 if (insn & 0xf)
1560 return 1;
1561 rd = (insn >> 12) & 0xf;
1562 wrd = (insn >> 16) & 0xf;
1563 switch (wrd) {
1564 case ARM_IWMMXT_wCID:
1565 case ARM_IWMMXT_wCASF:
1566 break;
1567 case ARM_IWMMXT_wCon:
1568 gen_op_iwmmxt_set_cup();
1569 /* Fall through. */
1570 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1571 tmp = iwmmxt_load_creg(wrd);
1572 tmp2 = load_reg(s, rd);
f669df27 1573 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1574 tcg_temp_free_i32(tmp2);
da6b5335 1575 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1576 break;
1577 case ARM_IWMMXT_wCGR0:
1578 case ARM_IWMMXT_wCGR1:
1579 case ARM_IWMMXT_wCGR2:
1580 case ARM_IWMMXT_wCGR3:
1581 gen_op_iwmmxt_set_cup();
da6b5335
FN
1582 tmp = load_reg(s, rd);
1583 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1584 break;
1585 default:
1586 return 1;
1587 }
1588 break;
1589 case 0x100: /* WXOR */
1590 wrd = (insn >> 12) & 0xf;
1591 rd0 = (insn >> 0) & 0xf;
1592 rd1 = (insn >> 16) & 0xf;
1593 gen_op_iwmmxt_movq_M0_wRn(rd0);
1594 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1595 gen_op_iwmmxt_setpsr_nz();
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1599 break;
1600 case 0x111: /* TMRC */
1601 if (insn & 0xf)
1602 return 1;
1603 rd = (insn >> 12) & 0xf;
1604 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1605 tmp = iwmmxt_load_creg(wrd);
1606 store_reg(s, rd, tmp);
18c9b560
AZ
1607 break;
1608 case 0x300: /* WANDN */
1609 wrd = (insn >> 12) & 0xf;
1610 rd0 = (insn >> 0) & 0xf;
1611 rd1 = (insn >> 16) & 0xf;
1612 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1613 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1614 gen_op_iwmmxt_andq_M0_wRn(rd1);
1615 gen_op_iwmmxt_setpsr_nz();
1616 gen_op_iwmmxt_movq_wRn_M0(wrd);
1617 gen_op_iwmmxt_set_mup();
1618 gen_op_iwmmxt_set_cup();
1619 break;
1620 case 0x200: /* WAND */
1621 wrd = (insn >> 12) & 0xf;
1622 rd0 = (insn >> 0) & 0xf;
1623 rd1 = (insn >> 16) & 0xf;
1624 gen_op_iwmmxt_movq_M0_wRn(rd0);
1625 gen_op_iwmmxt_andq_M0_wRn(rd1);
1626 gen_op_iwmmxt_setpsr_nz();
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 gen_op_iwmmxt_set_cup();
1630 break;
1631 case 0x810: case 0xa10: /* WMADD */
1632 wrd = (insn >> 12) & 0xf;
1633 rd0 = (insn >> 0) & 0xf;
1634 rd1 = (insn >> 16) & 0xf;
1635 gen_op_iwmmxt_movq_M0_wRn(rd0);
1636 if (insn & (1 << 21))
1637 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1638 else
1639 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 break;
1643 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1644 wrd = (insn >> 12) & 0xf;
1645 rd0 = (insn >> 16) & 0xf;
1646 rd1 = (insn >> 0) & 0xf;
1647 gen_op_iwmmxt_movq_M0_wRn(rd0);
1648 switch ((insn >> 22) & 3) {
1649 case 0:
1650 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1651 break;
1652 case 1:
1653 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1654 break;
1655 case 2:
1656 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1657 break;
1658 case 3:
1659 return 1;
1660 }
1661 gen_op_iwmmxt_movq_wRn_M0(wrd);
1662 gen_op_iwmmxt_set_mup();
1663 gen_op_iwmmxt_set_cup();
1664 break;
1665 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1666 wrd = (insn >> 12) & 0xf;
1667 rd0 = (insn >> 16) & 0xf;
1668 rd1 = (insn >> 0) & 0xf;
1669 gen_op_iwmmxt_movq_M0_wRn(rd0);
1670 switch ((insn >> 22) & 3) {
1671 case 0:
1672 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1673 break;
1674 case 1:
1675 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1676 break;
1677 case 2:
1678 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1679 break;
1680 case 3:
1681 return 1;
1682 }
1683 gen_op_iwmmxt_movq_wRn_M0(wrd);
1684 gen_op_iwmmxt_set_mup();
1685 gen_op_iwmmxt_set_cup();
1686 break;
1687 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1688 wrd = (insn >> 12) & 0xf;
1689 rd0 = (insn >> 16) & 0xf;
1690 rd1 = (insn >> 0) & 0xf;
1691 gen_op_iwmmxt_movq_M0_wRn(rd0);
1692 if (insn & (1 << 22))
1693 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1694 else
1695 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1696 if (!(insn & (1 << 20)))
1697 gen_op_iwmmxt_addl_M0_wRn(wrd);
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 break;
1701 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1702 wrd = (insn >> 12) & 0xf;
1703 rd0 = (insn >> 16) & 0xf;
1704 rd1 = (insn >> 0) & 0xf;
1705 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1706 if (insn & (1 << 21)) {
1707 if (insn & (1 << 20))
1708 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1709 else
1710 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1711 } else {
1712 if (insn & (1 << 20))
1713 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1714 else
1715 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1716 }
18c9b560
AZ
1717 gen_op_iwmmxt_movq_wRn_M0(wrd);
1718 gen_op_iwmmxt_set_mup();
1719 break;
1720 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1721 wrd = (insn >> 12) & 0xf;
1722 rd0 = (insn >> 16) & 0xf;
1723 rd1 = (insn >> 0) & 0xf;
1724 gen_op_iwmmxt_movq_M0_wRn(rd0);
1725 if (insn & (1 << 21))
1726 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1727 else
1728 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1729 if (!(insn & (1 << 20))) {
e677137d
PB
1730 iwmmxt_load_reg(cpu_V1, wrd);
1731 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1732 }
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1737 wrd = (insn >> 12) & 0xf;
1738 rd0 = (insn >> 16) & 0xf;
1739 rd1 = (insn >> 0) & 0xf;
1740 gen_op_iwmmxt_movq_M0_wRn(rd0);
1741 switch ((insn >> 22) & 3) {
1742 case 0:
1743 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1744 break;
1745 case 1:
1746 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1747 break;
1748 case 2:
1749 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1750 break;
1751 case 3:
1752 return 1;
1753 }
1754 gen_op_iwmmxt_movq_wRn_M0(wrd);
1755 gen_op_iwmmxt_set_mup();
1756 gen_op_iwmmxt_set_cup();
1757 break;
1758 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1759 wrd = (insn >> 12) & 0xf;
1760 rd0 = (insn >> 16) & 0xf;
1761 rd1 = (insn >> 0) & 0xf;
1762 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1763 if (insn & (1 << 22)) {
1764 if (insn & (1 << 20))
1765 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1766 else
1767 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1768 } else {
1769 if (insn & (1 << 20))
1770 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1771 else
1772 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1773 }
18c9b560
AZ
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1779 wrd = (insn >> 12) & 0xf;
1780 rd0 = (insn >> 16) & 0xf;
1781 rd1 = (insn >> 0) & 0xf;
1782 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1783 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1784 tcg_gen_andi_i32(tmp, tmp, 7);
1785 iwmmxt_load_reg(cpu_V1, rd1);
1786 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1787 tcg_temp_free_i32(tmp);
18c9b560
AZ
1788 gen_op_iwmmxt_movq_wRn_M0(wrd);
1789 gen_op_iwmmxt_set_mup();
1790 break;
1791 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1792 if (((insn >> 6) & 3) == 3)
1793 return 1;
18c9b560
AZ
1794 rd = (insn >> 12) & 0xf;
1795 wrd = (insn >> 16) & 0xf;
da6b5335 1796 tmp = load_reg(s, rd);
18c9b560
AZ
1797 gen_op_iwmmxt_movq_M0_wRn(wrd);
1798 switch ((insn >> 6) & 3) {
1799 case 0:
da6b5335
FN
1800 tmp2 = tcg_const_i32(0xff);
1801 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1802 break;
1803 case 1:
da6b5335
FN
1804 tmp2 = tcg_const_i32(0xffff);
1805 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1806 break;
1807 case 2:
da6b5335
FN
1808 tmp2 = tcg_const_i32(0xffffffff);
1809 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1810 break;
da6b5335 1811 default:
39d5492a
PM
1812 TCGV_UNUSED_I32(tmp2);
1813 TCGV_UNUSED_I32(tmp3);
18c9b560 1814 }
da6b5335 1815 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1816 tcg_temp_free_i32(tmp3);
1817 tcg_temp_free_i32(tmp2);
7d1b0095 1818 tcg_temp_free_i32(tmp);
18c9b560
AZ
1819 gen_op_iwmmxt_movq_wRn_M0(wrd);
1820 gen_op_iwmmxt_set_mup();
1821 break;
1822 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1823 rd = (insn >> 12) & 0xf;
1824 wrd = (insn >> 16) & 0xf;
da6b5335 1825 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1826 return 1;
1827 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1828 tmp = tcg_temp_new_i32();
18c9b560
AZ
1829 switch ((insn >> 22) & 3) {
1830 case 0:
da6b5335
FN
1831 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1832 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1833 if (insn & 8) {
1834 tcg_gen_ext8s_i32(tmp, tmp);
1835 } else {
1836 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1837 }
1838 break;
1839 case 1:
da6b5335
FN
1840 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1841 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1842 if (insn & 8) {
1843 tcg_gen_ext16s_i32(tmp, tmp);
1844 } else {
1845 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1846 }
1847 break;
1848 case 2:
da6b5335
FN
1849 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1850 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1851 break;
18c9b560 1852 }
da6b5335 1853 store_reg(s, rd, tmp);
18c9b560
AZ
1854 break;
1855 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1856 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1857 return 1;
da6b5335 1858 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1859 switch ((insn >> 22) & 3) {
1860 case 0:
da6b5335 1861 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1862 break;
1863 case 1:
da6b5335 1864 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1865 break;
1866 case 2:
da6b5335 1867 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1868 break;
18c9b560 1869 }
da6b5335
FN
1870 tcg_gen_shli_i32(tmp, tmp, 28);
1871 gen_set_nzcv(tmp);
7d1b0095 1872 tcg_temp_free_i32(tmp);
18c9b560
AZ
1873 break;
1874 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1875 if (((insn >> 6) & 3) == 3)
1876 return 1;
18c9b560
AZ
1877 rd = (insn >> 12) & 0xf;
1878 wrd = (insn >> 16) & 0xf;
da6b5335 1879 tmp = load_reg(s, rd);
18c9b560
AZ
1880 switch ((insn >> 6) & 3) {
1881 case 0:
da6b5335 1882 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1883 break;
1884 case 1:
da6b5335 1885 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1886 break;
1887 case 2:
da6b5335 1888 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 1889 break;
18c9b560 1890 }
7d1b0095 1891 tcg_temp_free_i32(tmp);
18c9b560
AZ
1892 gen_op_iwmmxt_movq_wRn_M0(wrd);
1893 gen_op_iwmmxt_set_mup();
1894 break;
1895 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 1896 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1897 return 1;
da6b5335 1898 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1899 tmp2 = tcg_temp_new_i32();
da6b5335 1900 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1901 switch ((insn >> 22) & 3) {
1902 case 0:
1903 for (i = 0; i < 7; i ++) {
da6b5335
FN
1904 tcg_gen_shli_i32(tmp2, tmp2, 4);
1905 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1906 }
1907 break;
1908 case 1:
1909 for (i = 0; i < 3; i ++) {
da6b5335
FN
1910 tcg_gen_shli_i32(tmp2, tmp2, 8);
1911 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
1912 }
1913 break;
1914 case 2:
da6b5335
FN
1915 tcg_gen_shli_i32(tmp2, tmp2, 16);
1916 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 1917 break;
18c9b560 1918 }
da6b5335 1919 gen_set_nzcv(tmp);
7d1b0095
PM
1920 tcg_temp_free_i32(tmp2);
1921 tcg_temp_free_i32(tmp);
18c9b560
AZ
1922 break;
1923 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1924 wrd = (insn >> 12) & 0xf;
1925 rd0 = (insn >> 16) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0);
1927 switch ((insn >> 22) & 3) {
1928 case 0:
e677137d 1929 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1930 break;
1931 case 1:
e677137d 1932 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1933 break;
1934 case 2:
e677137d 1935 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1936 break;
1937 case 3:
1938 return 1;
1939 }
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 break;
1943 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 1944 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1945 return 1;
da6b5335 1946 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 1947 tmp2 = tcg_temp_new_i32();
da6b5335 1948 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
1949 switch ((insn >> 22) & 3) {
1950 case 0:
1951 for (i = 0; i < 7; i ++) {
da6b5335
FN
1952 tcg_gen_shli_i32(tmp2, tmp2, 4);
1953 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1954 }
1955 break;
1956 case 1:
1957 for (i = 0; i < 3; i ++) {
da6b5335
FN
1958 tcg_gen_shli_i32(tmp2, tmp2, 8);
1959 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
1960 }
1961 break;
1962 case 2:
da6b5335
FN
1963 tcg_gen_shli_i32(tmp2, tmp2, 16);
1964 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 1965 break;
18c9b560 1966 }
da6b5335 1967 gen_set_nzcv(tmp);
7d1b0095
PM
1968 tcg_temp_free_i32(tmp2);
1969 tcg_temp_free_i32(tmp);
18c9b560
AZ
1970 break;
1971 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1972 rd = (insn >> 12) & 0xf;
1973 rd0 = (insn >> 16) & 0xf;
da6b5335 1974 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1975 return 1;
1976 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 1977 tmp = tcg_temp_new_i32();
18c9b560
AZ
1978 switch ((insn >> 22) & 3) {
1979 case 0:
da6b5335 1980 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
1981 break;
1982 case 1:
da6b5335 1983 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
1984 break;
1985 case 2:
da6b5335 1986 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 1987 break;
18c9b560 1988 }
da6b5335 1989 store_reg(s, rd, tmp);
18c9b560
AZ
1990 break;
1991 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1992 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1993 wrd = (insn >> 12) & 0xf;
1994 rd0 = (insn >> 16) & 0xf;
1995 rd1 = (insn >> 0) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0);
1997 switch ((insn >> 22) & 3) {
1998 case 0:
1999 if (insn & (1 << 21))
2000 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2001 else
2002 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2003 break;
2004 case 1:
2005 if (insn & (1 << 21))
2006 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2009 break;
2010 case 2:
2011 if (insn & (1 << 21))
2012 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2013 else
2014 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2015 break;
2016 case 3:
2017 return 1;
2018 }
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2024 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2029 case 0:
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_unpacklsb_M0();
2032 else
2033 gen_op_iwmmxt_unpacklub_M0();
2034 break;
2035 case 1:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_unpacklsw_M0();
2038 else
2039 gen_op_iwmmxt_unpackluw_M0();
2040 break;
2041 case 2:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_unpacklsl_M0();
2044 else
2045 gen_op_iwmmxt_unpacklul_M0();
2046 break;
2047 case 3:
2048 return 1;
2049 }
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2055 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2060 case 0:
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpackhsb_M0();
2063 else
2064 gen_op_iwmmxt_unpackhub_M0();
2065 break;
2066 case 1:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpackhsw_M0();
2069 else
2070 gen_op_iwmmxt_unpackhuw_M0();
2071 break;
2072 case 2:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpackhsl_M0();
2075 else
2076 gen_op_iwmmxt_unpackhul_M0();
2077 break;
2078 case 3:
2079 return 1;
2080 }
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2086 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2087 if (((insn >> 22) & 3) == 0)
2088 return 1;
18c9b560
AZ
2089 wrd = (insn >> 12) & 0xf;
2090 rd0 = (insn >> 16) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2092 tmp = tcg_temp_new_i32();
da6b5335 2093 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2094 tcg_temp_free_i32(tmp);
18c9b560 2095 return 1;
da6b5335 2096 }
18c9b560 2097 switch ((insn >> 22) & 3) {
18c9b560 2098 case 1:
477955bd 2099 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2100 break;
2101 case 2:
477955bd 2102 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2103 break;
2104 case 3:
477955bd 2105 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2106 break;
2107 }
7d1b0095 2108 tcg_temp_free_i32(tmp);
18c9b560
AZ
2109 gen_op_iwmmxt_movq_wRn_M0(wrd);
2110 gen_op_iwmmxt_set_mup();
2111 gen_op_iwmmxt_set_cup();
2112 break;
2113 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2114 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2115 if (((insn >> 22) & 3) == 0)
2116 return 1;
18c9b560
AZ
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2120 tmp = tcg_temp_new_i32();
da6b5335 2121 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2122 tcg_temp_free_i32(tmp);
18c9b560 2123 return 1;
da6b5335 2124 }
18c9b560 2125 switch ((insn >> 22) & 3) {
18c9b560 2126 case 1:
477955bd 2127 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2128 break;
2129 case 2:
477955bd 2130 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2131 break;
2132 case 3:
477955bd 2133 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2134 break;
2135 }
7d1b0095 2136 tcg_temp_free_i32(tmp);
18c9b560
AZ
2137 gen_op_iwmmxt_movq_wRn_M0(wrd);
2138 gen_op_iwmmxt_set_mup();
2139 gen_op_iwmmxt_set_cup();
2140 break;
2141 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2142 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2143 if (((insn >> 22) & 3) == 0)
2144 return 1;
18c9b560
AZ
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2148 tmp = tcg_temp_new_i32();
da6b5335 2149 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2150 tcg_temp_free_i32(tmp);
18c9b560 2151 return 1;
da6b5335 2152 }
18c9b560 2153 switch ((insn >> 22) & 3) {
18c9b560 2154 case 1:
477955bd 2155 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2156 break;
2157 case 2:
477955bd 2158 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2159 break;
2160 case 3:
477955bd 2161 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2162 break;
2163 }
7d1b0095 2164 tcg_temp_free_i32(tmp);
18c9b560
AZ
2165 gen_op_iwmmxt_movq_wRn_M0(wrd);
2166 gen_op_iwmmxt_set_mup();
2167 gen_op_iwmmxt_set_cup();
2168 break;
2169 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2170 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2171 if (((insn >> 22) & 3) == 0)
2172 return 1;
18c9b560
AZ
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2176 tmp = tcg_temp_new_i32();
18c9b560 2177 switch ((insn >> 22) & 3) {
18c9b560 2178 case 1:
da6b5335 2179 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2180 tcg_temp_free_i32(tmp);
18c9b560 2181 return 1;
da6b5335 2182 }
477955bd 2183 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2184 break;
2185 case 2:
da6b5335 2186 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2187 tcg_temp_free_i32(tmp);
18c9b560 2188 return 1;
da6b5335 2189 }
477955bd 2190 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2191 break;
2192 case 3:
da6b5335 2193 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2194 tcg_temp_free_i32(tmp);
18c9b560 2195 return 1;
da6b5335 2196 }
477955bd 2197 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2198 break;
2199 }
7d1b0095 2200 tcg_temp_free_i32(tmp);
18c9b560
AZ
2201 gen_op_iwmmxt_movq_wRn_M0(wrd);
2202 gen_op_iwmmxt_set_mup();
2203 gen_op_iwmmxt_set_cup();
2204 break;
2205 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2206 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2207 wrd = (insn >> 12) & 0xf;
2208 rd0 = (insn >> 16) & 0xf;
2209 rd1 = (insn >> 0) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 switch ((insn >> 22) & 3) {
2212 case 0:
2213 if (insn & (1 << 21))
2214 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2215 else
2216 gen_op_iwmmxt_minub_M0_wRn(rd1);
2217 break;
2218 case 1:
2219 if (insn & (1 << 21))
2220 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2223 break;
2224 case 2:
2225 if (insn & (1 << 21))
2226 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2227 else
2228 gen_op_iwmmxt_minul_M0_wRn(rd1);
2229 break;
2230 case 3:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 break;
2236 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2237 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 switch ((insn >> 22) & 3) {
2243 case 0:
2244 if (insn & (1 << 21))
2245 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2246 else
2247 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2248 break;
2249 case 1:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2252 else
2253 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2254 break;
2255 case 2:
2256 if (insn & (1 << 21))
2257 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2258 else
2259 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2260 break;
2261 case 3:
2262 return 1;
2263 }
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 break;
2267 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2268 case 0x402: case 0x502: case 0x602: case 0x702:
2269 wrd = (insn >> 12) & 0xf;
2270 rd0 = (insn >> 16) & 0xf;
2271 rd1 = (insn >> 0) & 0xf;
2272 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2273 tmp = tcg_const_i32((insn >> 20) & 3);
2274 iwmmxt_load_reg(cpu_V1, rd1);
2275 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2276 tcg_temp_free_i32(tmp);
18c9b560
AZ
2277 gen_op_iwmmxt_movq_wRn_M0(wrd);
2278 gen_op_iwmmxt_set_mup();
2279 break;
2280 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2281 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2282 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2283 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2284 wrd = (insn >> 12) & 0xf;
2285 rd0 = (insn >> 16) & 0xf;
2286 rd1 = (insn >> 0) & 0xf;
2287 gen_op_iwmmxt_movq_M0_wRn(rd0);
2288 switch ((insn >> 20) & 0xf) {
2289 case 0x0:
2290 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2291 break;
2292 case 0x1:
2293 gen_op_iwmmxt_subub_M0_wRn(rd1);
2294 break;
2295 case 0x3:
2296 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2297 break;
2298 case 0x4:
2299 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2300 break;
2301 case 0x5:
2302 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2303 break;
2304 case 0x7:
2305 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2306 break;
2307 case 0x8:
2308 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2309 break;
2310 case 0x9:
2311 gen_op_iwmmxt_subul_M0_wRn(rd1);
2312 break;
2313 case 0xb:
2314 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2315 break;
2316 default:
2317 return 1;
2318 }
2319 gen_op_iwmmxt_movq_wRn_M0(wrd);
2320 gen_op_iwmmxt_set_mup();
2321 gen_op_iwmmxt_set_cup();
2322 break;
2323 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2324 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2325 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2326 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2327 wrd = (insn >> 12) & 0xf;
2328 rd0 = (insn >> 16) & 0xf;
2329 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2330 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2331 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2332 tcg_temp_free_i32(tmp);
18c9b560
AZ
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 gen_op_iwmmxt_set_cup();
2336 break;
2337 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2338 case 0x418: case 0x518: case 0x618: case 0x718:
2339 case 0x818: case 0x918: case 0xa18: case 0xb18:
2340 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2341 wrd = (insn >> 12) & 0xf;
2342 rd0 = (insn >> 16) & 0xf;
2343 rd1 = (insn >> 0) & 0xf;
2344 gen_op_iwmmxt_movq_M0_wRn(rd0);
2345 switch ((insn >> 20) & 0xf) {
2346 case 0x0:
2347 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2348 break;
2349 case 0x1:
2350 gen_op_iwmmxt_addub_M0_wRn(rd1);
2351 break;
2352 case 0x3:
2353 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2354 break;
2355 case 0x4:
2356 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2357 break;
2358 case 0x5:
2359 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2360 break;
2361 case 0x7:
2362 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2363 break;
2364 case 0x8:
2365 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2366 break;
2367 case 0x9:
2368 gen_op_iwmmxt_addul_M0_wRn(rd1);
2369 break;
2370 case 0xb:
2371 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2372 break;
2373 default:
2374 return 1;
2375 }
2376 gen_op_iwmmxt_movq_wRn_M0(wrd);
2377 gen_op_iwmmxt_set_mup();
2378 gen_op_iwmmxt_set_cup();
2379 break;
2380 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2381 case 0x408: case 0x508: case 0x608: case 0x708:
2382 case 0x808: case 0x908: case 0xa08: case 0xb08:
2383 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2384 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2385 return 1;
18c9b560
AZ
2386 wrd = (insn >> 12) & 0xf;
2387 rd0 = (insn >> 16) & 0xf;
2388 rd1 = (insn >> 0) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2390 switch ((insn >> 22) & 3) {
18c9b560
AZ
2391 case 1:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2394 else
2395 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2396 break;
2397 case 2:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_packul_M0_wRn(rd1);
2402 break;
2403 case 3:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2408 break;
2409 }
2410 gen_op_iwmmxt_movq_wRn_M0(wrd);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2413 break;
2414 case 0x201: case 0x203: case 0x205: case 0x207:
2415 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2416 case 0x211: case 0x213: case 0x215: case 0x217:
2417 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2418 wrd = (insn >> 5) & 0xf;
2419 rd0 = (insn >> 12) & 0xf;
2420 rd1 = (insn >> 0) & 0xf;
2421 if (rd0 == 0xf || rd1 == 0xf)
2422 return 1;
2423 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2424 tmp = load_reg(s, rd0);
2425 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2426 switch ((insn >> 16) & 0xf) {
2427 case 0x0: /* TMIA */
da6b5335 2428 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2429 break;
2430 case 0x8: /* TMIAPH */
da6b5335 2431 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2432 break;
2433 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2434 if (insn & (1 << 16))
da6b5335 2435 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2436 if (insn & (1 << 17))
da6b5335
FN
2437 tcg_gen_shri_i32(tmp2, tmp2, 16);
2438 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2439 break;
2440 default:
7d1b0095
PM
2441 tcg_temp_free_i32(tmp2);
2442 tcg_temp_free_i32(tmp);
18c9b560
AZ
2443 return 1;
2444 }
7d1b0095
PM
2445 tcg_temp_free_i32(tmp2);
2446 tcg_temp_free_i32(tmp);
18c9b560
AZ
2447 gen_op_iwmmxt_movq_wRn_M0(wrd);
2448 gen_op_iwmmxt_set_mup();
2449 break;
2450 default:
2451 return 1;
2452 }
2453
2454 return 0;
2455}
2456
a1c7273b 2457/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2458 (ie. an undefined instruction). */
0ecb72a5 2459static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
18c9b560
AZ
2460{
2461 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2462 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2463
2464 if ((insn & 0x0ff00f10) == 0x0e200010) {
2465 /* Multiply with Internal Accumulate Format */
2466 rd0 = (insn >> 12) & 0xf;
2467 rd1 = insn & 0xf;
2468 acc = (insn >> 5) & 7;
2469
2470 if (acc != 0)
2471 return 1;
2472
3a554c0f
FN
2473 tmp = load_reg(s, rd0);
2474 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2475 switch ((insn >> 16) & 0xf) {
2476 case 0x0: /* MIA */
3a554c0f 2477 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2478 break;
2479 case 0x8: /* MIAPH */
3a554c0f 2480 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2481 break;
2482 case 0xc: /* MIABB */
2483 case 0xd: /* MIABT */
2484 case 0xe: /* MIATB */
2485 case 0xf: /* MIATT */
18c9b560 2486 if (insn & (1 << 16))
3a554c0f 2487 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2488 if (insn & (1 << 17))
3a554c0f
FN
2489 tcg_gen_shri_i32(tmp2, tmp2, 16);
2490 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2491 break;
2492 default:
2493 return 1;
2494 }
7d1b0095
PM
2495 tcg_temp_free_i32(tmp2);
2496 tcg_temp_free_i32(tmp);
18c9b560
AZ
2497
2498 gen_op_iwmmxt_movq_wRn_M0(acc);
2499 return 0;
2500 }
2501
2502 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2503 /* Internal Accumulator Access Format */
2504 rdhi = (insn >> 16) & 0xf;
2505 rdlo = (insn >> 12) & 0xf;
2506 acc = insn & 7;
2507
2508 if (acc != 0)
2509 return 1;
2510
2511 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2512 iwmmxt_load_reg(cpu_V0, acc);
2513 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2514 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2515 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2516 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2517 } else { /* MAR */
3a554c0f
FN
2518 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2519 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2520 }
2521 return 0;
2522 }
2523
2524 return 1;
2525}
2526
9ee6e8bb
PB
2527#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2528#define VFP_SREG(insn, bigbit, smallbit) \
2529 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2530#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2531 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2532 reg = (((insn) >> (bigbit)) & 0x0f) \
2533 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2534 } else { \
2535 if (insn & (1 << (smallbit))) \
2536 return 1; \
2537 reg = ((insn) >> (bigbit)) & 0x0f; \
2538 }} while (0)
2539
2540#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2541#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2542#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2543#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2544#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2545#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2546
4373f3ce 2547/* Move between integer and VFP cores. */
39d5492a 2548static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2549{
39d5492a 2550 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2551 tcg_gen_mov_i32(tmp, cpu_F0s);
2552 return tmp;
2553}
2554
39d5492a 2555static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2556{
2557 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2558 tcg_temp_free_i32(tmp);
4373f3ce
PB
2559}
2560
39d5492a 2561static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2562{
39d5492a 2563 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2564 if (shift)
2565 tcg_gen_shri_i32(var, var, shift);
86831435 2566 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2567 tcg_gen_shli_i32(tmp, var, 8);
2568 tcg_gen_or_i32(var, var, tmp);
2569 tcg_gen_shli_i32(tmp, var, 16);
2570 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2571 tcg_temp_free_i32(tmp);
ad69471c
PB
2572}
2573
39d5492a 2574static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2575{
39d5492a 2576 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2577 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2578 tcg_gen_shli_i32(tmp, var, 16);
2579 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2580 tcg_temp_free_i32(tmp);
ad69471c
PB
2581}
2582
39d5492a 2583static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2584{
39d5492a 2585 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2586 tcg_gen_andi_i32(var, var, 0xffff0000);
2587 tcg_gen_shri_i32(tmp, var, 16);
2588 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2589 tcg_temp_free_i32(tmp);
ad69471c
PB
2590}
2591
39d5492a 2592static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2593{
2594 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2595 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2596 switch (size) {
2597 case 0:
08307563 2598 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8e18cde3
PM
2599 gen_neon_dup_u8(tmp, 0);
2600 break;
2601 case 1:
08307563 2602 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8e18cde3
PM
2603 gen_neon_dup_low16(tmp);
2604 break;
2605 case 2:
08307563 2606 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8e18cde3
PM
2607 break;
2608 default: /* Avoid compiler warnings. */
2609 abort();
2610 }
2611 return tmp;
2612}
2613
a1c7273b 2614/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 2615 (ie. an undefined instruction). */
0ecb72a5 2616static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
b7bcbe95
FB
2617{
2618 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2619 int dp, veclen;
39d5492a
PM
2620 TCGv_i32 addr;
2621 TCGv_i32 tmp;
2622 TCGv_i32 tmp2;
b7bcbe95 2623
40f137e1
PB
2624 if (!arm_feature(env, ARM_FEATURE_VFP))
2625 return 1;
2626
5df8bac1 2627 if (!s->vfp_enabled) {
9ee6e8bb 2628 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2629 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2630 return 1;
2631 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2632 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2633 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2634 return 1;
2635 }
b7bcbe95
FB
2636 dp = ((insn & 0xf00) == 0xb00);
2637 switch ((insn >> 24) & 0xf) {
2638 case 0xe:
2639 if (insn & (1 << 4)) {
2640 /* single register transfer */
b7bcbe95
FB
2641 rd = (insn >> 12) & 0xf;
2642 if (dp) {
9ee6e8bb
PB
2643 int size;
2644 int pass;
2645
2646 VFP_DREG_N(rn, insn);
2647 if (insn & 0xf)
b7bcbe95 2648 return 1;
9ee6e8bb
PB
2649 if (insn & 0x00c00060
2650 && !arm_feature(env, ARM_FEATURE_NEON))
2651 return 1;
2652
2653 pass = (insn >> 21) & 1;
2654 if (insn & (1 << 22)) {
2655 size = 0;
2656 offset = ((insn >> 5) & 3) * 8;
2657 } else if (insn & (1 << 5)) {
2658 size = 1;
2659 offset = (insn & (1 << 6)) ? 16 : 0;
2660 } else {
2661 size = 2;
2662 offset = 0;
2663 }
18c9b560 2664 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2665 /* vfp->arm */
ad69471c 2666 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2667 switch (size) {
2668 case 0:
9ee6e8bb 2669 if (offset)
ad69471c 2670 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2671 if (insn & (1 << 23))
ad69471c 2672 gen_uxtb(tmp);
9ee6e8bb 2673 else
ad69471c 2674 gen_sxtb(tmp);
9ee6e8bb
PB
2675 break;
2676 case 1:
9ee6e8bb
PB
2677 if (insn & (1 << 23)) {
2678 if (offset) {
ad69471c 2679 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2680 } else {
ad69471c 2681 gen_uxth(tmp);
9ee6e8bb
PB
2682 }
2683 } else {
2684 if (offset) {
ad69471c 2685 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2686 } else {
ad69471c 2687 gen_sxth(tmp);
9ee6e8bb
PB
2688 }
2689 }
2690 break;
2691 case 2:
9ee6e8bb
PB
2692 break;
2693 }
ad69471c 2694 store_reg(s, rd, tmp);
b7bcbe95
FB
2695 } else {
2696 /* arm->vfp */
ad69471c 2697 tmp = load_reg(s, rd);
9ee6e8bb
PB
2698 if (insn & (1 << 23)) {
2699 /* VDUP */
2700 if (size == 0) {
ad69471c 2701 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2702 } else if (size == 1) {
ad69471c 2703 gen_neon_dup_low16(tmp);
9ee6e8bb 2704 }
cbbccffc 2705 for (n = 0; n <= pass * 2; n++) {
7d1b0095 2706 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
2707 tcg_gen_mov_i32(tmp2, tmp);
2708 neon_store_reg(rn, n, tmp2);
2709 }
2710 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2711 } else {
2712 /* VMOV */
2713 switch (size) {
2714 case 0:
ad69471c 2715 tmp2 = neon_load_reg(rn, pass);
d593c48e 2716 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 2717 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2718 break;
2719 case 1:
ad69471c 2720 tmp2 = neon_load_reg(rn, pass);
d593c48e 2721 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 2722 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
2723 break;
2724 case 2:
9ee6e8bb
PB
2725 break;
2726 }
ad69471c 2727 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2728 }
b7bcbe95 2729 }
9ee6e8bb
PB
2730 } else { /* !dp */
2731 if ((insn & 0x6f) != 0x00)
2732 return 1;
2733 rn = VFP_SREG_N(insn);
18c9b560 2734 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2735 /* vfp->arm */
2736 if (insn & (1 << 21)) {
2737 /* system register */
40f137e1 2738 rn >>= 1;
9ee6e8bb 2739
b7bcbe95 2740 switch (rn) {
40f137e1 2741 case ARM_VFP_FPSID:
4373f3ce 2742 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2743 VFP3 restricts all id registers to privileged
2744 accesses. */
2745 if (IS_USER(s)
2746 && arm_feature(env, ARM_FEATURE_VFP3))
2747 return 1;
4373f3ce 2748 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2749 break;
40f137e1 2750 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2751 if (IS_USER(s))
2752 return 1;
4373f3ce 2753 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2754 break;
40f137e1
PB
2755 case ARM_VFP_FPINST:
2756 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2757 /* Not present in VFP3. */
2758 if (IS_USER(s)
2759 || arm_feature(env, ARM_FEATURE_VFP3))
2760 return 1;
4373f3ce 2761 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2762 break;
40f137e1 2763 case ARM_VFP_FPSCR:
601d70b9 2764 if (rd == 15) {
4373f3ce
PB
2765 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2766 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2767 } else {
7d1b0095 2768 tmp = tcg_temp_new_i32();
4373f3ce
PB
2769 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2770 }
b7bcbe95 2771 break;
9ee6e8bb
PB
2772 case ARM_VFP_MVFR0:
2773 case ARM_VFP_MVFR1:
2774 if (IS_USER(s)
06ed5d66 2775 || !arm_feature(env, ARM_FEATURE_MVFR))
9ee6e8bb 2776 return 1;
4373f3ce 2777 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2778 break;
b7bcbe95
FB
2779 default:
2780 return 1;
2781 }
2782 } else {
2783 gen_mov_F0_vreg(0, rn);
4373f3ce 2784 tmp = gen_vfp_mrs();
b7bcbe95
FB
2785 }
2786 if (rd == 15) {
b5ff1b31 2787 /* Set the 4 flag bits in the CPSR. */
4373f3ce 2788 gen_set_nzcv(tmp);
7d1b0095 2789 tcg_temp_free_i32(tmp);
4373f3ce
PB
2790 } else {
2791 store_reg(s, rd, tmp);
2792 }
b7bcbe95
FB
2793 } else {
2794 /* arm->vfp */
b7bcbe95 2795 if (insn & (1 << 21)) {
40f137e1 2796 rn >>= 1;
b7bcbe95
FB
2797 /* system register */
2798 switch (rn) {
40f137e1 2799 case ARM_VFP_FPSID:
9ee6e8bb
PB
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
b7bcbe95
FB
2802 /* Writes are ignored. */
2803 break;
40f137e1 2804 case ARM_VFP_FPSCR:
e4c1cfa5 2805 tmp = load_reg(s, rd);
4373f3ce 2806 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 2807 tcg_temp_free_i32(tmp);
b5ff1b31 2808 gen_lookup_tb(s);
b7bcbe95 2809 break;
40f137e1 2810 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2811 if (IS_USER(s))
2812 return 1;
71b3c3de
JR
2813 /* TODO: VFP subarchitecture support.
2814 * For now, keep the EN bit only */
e4c1cfa5 2815 tmp = load_reg(s, rd);
71b3c3de 2816 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 2817 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2818 gen_lookup_tb(s);
2819 break;
2820 case ARM_VFP_FPINST:
2821 case ARM_VFP_FPINST2:
e4c1cfa5 2822 tmp = load_reg(s, rd);
4373f3ce 2823 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2824 break;
b7bcbe95
FB
2825 default:
2826 return 1;
2827 }
2828 } else {
e4c1cfa5 2829 tmp = load_reg(s, rd);
4373f3ce 2830 gen_vfp_msr(tmp);
b7bcbe95
FB
2831 gen_mov_vreg_F0(0, rn);
2832 }
2833 }
2834 }
2835 } else {
2836 /* data processing */
2837 /* The opcode is in bits 23, 21, 20 and 6. */
2838 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2839 if (dp) {
2840 if (op == 15) {
2841 /* rn is opcode */
2842 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2843 } else {
2844 /* rn is register number */
9ee6e8bb 2845 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2846 }
2847
04595bf6 2848 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
b7bcbe95 2849 /* Integer or single precision destination. */
9ee6e8bb 2850 rd = VFP_SREG_D(insn);
b7bcbe95 2851 } else {
9ee6e8bb 2852 VFP_DREG_D(rd, insn);
b7bcbe95 2853 }
04595bf6
PM
2854 if (op == 15 &&
2855 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2856 /* VCVT from int is always from S reg regardless of dp bit.
2857 * VCVT with immediate frac_bits has same format as SREG_M
2858 */
2859 rm = VFP_SREG_M(insn);
b7bcbe95 2860 } else {
9ee6e8bb 2861 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2862 }
2863 } else {
9ee6e8bb 2864 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2865 if (op == 15 && rn == 15) {
2866 /* Double precision destination. */
9ee6e8bb
PB
2867 VFP_DREG_D(rd, insn);
2868 } else {
2869 rd = VFP_SREG_D(insn);
2870 }
04595bf6
PM
2871 /* NB that we implicitly rely on the encoding for the frac_bits
2872 * in VCVT of fixed to float being the same as that of an SREG_M
2873 */
9ee6e8bb 2874 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2875 }
2876
69d1fc22 2877 veclen = s->vec_len;
b7bcbe95
FB
2878 if (op == 15 && rn > 3)
2879 veclen = 0;
2880
2881 /* Shut up compiler warnings. */
2882 delta_m = 0;
2883 delta_d = 0;
2884 bank_mask = 0;
3b46e624 2885
b7bcbe95
FB
2886 if (veclen > 0) {
2887 if (dp)
2888 bank_mask = 0xc;
2889 else
2890 bank_mask = 0x18;
2891
2892 /* Figure out what type of vector operation this is. */
2893 if ((rd & bank_mask) == 0) {
2894 /* scalar */
2895 veclen = 0;
2896 } else {
2897 if (dp)
69d1fc22 2898 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 2899 else
69d1fc22 2900 delta_d = s->vec_stride + 1;
b7bcbe95
FB
2901
2902 if ((rm & bank_mask) == 0) {
2903 /* mixed scalar/vector */
2904 delta_m = 0;
2905 } else {
2906 /* vector */
2907 delta_m = delta_d;
2908 }
2909 }
2910 }
2911
2912 /* Load the initial operands. */
2913 if (op == 15) {
2914 switch (rn) {
2915 case 16:
2916 case 17:
2917 /* Integer source */
2918 gen_mov_F0_vreg(0, rm);
2919 break;
2920 case 8:
2921 case 9:
2922 /* Compare */
2923 gen_mov_F0_vreg(dp, rd);
2924 gen_mov_F1_vreg(dp, rm);
2925 break;
2926 case 10:
2927 case 11:
2928 /* Compare with zero */
2929 gen_mov_F0_vreg(dp, rd);
2930 gen_vfp_F1_ld0(dp);
2931 break;
9ee6e8bb
PB
2932 case 20:
2933 case 21:
2934 case 22:
2935 case 23:
644ad806
PB
2936 case 28:
2937 case 29:
2938 case 30:
2939 case 31:
9ee6e8bb
PB
2940 /* Source and destination the same. */
2941 gen_mov_F0_vreg(dp, rd);
2942 break;
6e0c0ed1
PM
2943 case 4:
2944 case 5:
2945 case 6:
2946 case 7:
2947 /* VCVTB, VCVTT: only present with the halfprec extension,
2948 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2949 */
2950 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2951 return 1;
2952 }
2953 /* Otherwise fall through */
b7bcbe95
FB
2954 default:
2955 /* One source operand. */
2956 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2957 break;
b7bcbe95
FB
2958 }
2959 } else {
2960 /* Two source operands. */
2961 gen_mov_F0_vreg(dp, rn);
2962 gen_mov_F1_vreg(dp, rm);
2963 }
2964
2965 for (;;) {
2966 /* Perform the calculation. */
2967 switch (op) {
605a6aed
PM
2968 case 0: /* VMLA: fd + (fn * fm) */
2969 /* Note that order of inputs to the add matters for NaNs */
2970 gen_vfp_F1_mul(dp);
2971 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2972 gen_vfp_add(dp);
2973 break;
605a6aed 2974 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 2975 gen_vfp_mul(dp);
605a6aed
PM
2976 gen_vfp_F1_neg(dp);
2977 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
2978 gen_vfp_add(dp);
2979 break;
605a6aed
PM
2980 case 2: /* VNMLS: -fd + (fn * fm) */
2981 /* Note that it isn't valid to replace (-A + B) with (B - A)
2982 * or similar plausible looking simplifications
2983 * because this will give wrong results for NaNs.
2984 */
2985 gen_vfp_F1_mul(dp);
2986 gen_mov_F0_vreg(dp, rd);
2987 gen_vfp_neg(dp);
2988 gen_vfp_add(dp);
b7bcbe95 2989 break;
605a6aed 2990 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 2991 gen_vfp_mul(dp);
605a6aed
PM
2992 gen_vfp_F1_neg(dp);
2993 gen_mov_F0_vreg(dp, rd);
b7bcbe95 2994 gen_vfp_neg(dp);
605a6aed 2995 gen_vfp_add(dp);
b7bcbe95
FB
2996 break;
2997 case 4: /* mul: fn * fm */
2998 gen_vfp_mul(dp);
2999 break;
3000 case 5: /* nmul: -(fn * fm) */
3001 gen_vfp_mul(dp);
3002 gen_vfp_neg(dp);
3003 break;
3004 case 6: /* add: fn + fm */
3005 gen_vfp_add(dp);
3006 break;
3007 case 7: /* sub: fn - fm */
3008 gen_vfp_sub(dp);
3009 break;
3010 case 8: /* div: fn / fm */
3011 gen_vfp_div(dp);
3012 break;
da97f52c
PM
3013 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3014 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3015 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3016 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3017 /* These are fused multiply-add, and must be done as one
3018 * floating point operation with no rounding between the
3019 * multiplication and addition steps.
3020 * NB that doing the negations here as separate steps is
3021 * correct : an input NaN should come out with its sign bit
3022 * flipped if it is a negated-input.
3023 */
3024 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3025 return 1;
3026 }
3027 if (dp) {
3028 TCGv_ptr fpst;
3029 TCGv_i64 frd;
3030 if (op & 1) {
3031 /* VFNMS, VFMS */
3032 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3033 }
3034 frd = tcg_temp_new_i64();
3035 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3036 if (op & 2) {
3037 /* VFNMA, VFNMS */
3038 gen_helper_vfp_negd(frd, frd);
3039 }
3040 fpst = get_fpstatus_ptr(0);
3041 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3042 cpu_F1d, frd, fpst);
3043 tcg_temp_free_ptr(fpst);
3044 tcg_temp_free_i64(frd);
3045 } else {
3046 TCGv_ptr fpst;
3047 TCGv_i32 frd;
3048 if (op & 1) {
3049 /* VFNMS, VFMS */
3050 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3051 }
3052 frd = tcg_temp_new_i32();
3053 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3054 if (op & 2) {
3055 gen_helper_vfp_negs(frd, frd);
3056 }
3057 fpst = get_fpstatus_ptr(0);
3058 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3059 cpu_F1s, frd, fpst);
3060 tcg_temp_free_ptr(fpst);
3061 tcg_temp_free_i32(frd);
3062 }
3063 break;
9ee6e8bb
PB
3064 case 14: /* fconst */
3065 if (!arm_feature(env, ARM_FEATURE_VFP3))
3066 return 1;
3067
3068 n = (insn << 12) & 0x80000000;
3069 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3070 if (dp) {
3071 if (i & 0x40)
3072 i |= 0x3f80;
3073 else
3074 i |= 0x4000;
3075 n |= i << 16;
4373f3ce 3076 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3077 } else {
3078 if (i & 0x40)
3079 i |= 0x780;
3080 else
3081 i |= 0x800;
3082 n |= i << 19;
5b340b51 3083 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3084 }
9ee6e8bb 3085 break;
b7bcbe95
FB
3086 case 15: /* extension space */
3087 switch (rn) {
3088 case 0: /* cpy */
3089 /* no-op */
3090 break;
3091 case 1: /* abs */
3092 gen_vfp_abs(dp);
3093 break;
3094 case 2: /* neg */
3095 gen_vfp_neg(dp);
3096 break;
3097 case 3: /* sqrt */
3098 gen_vfp_sqrt(dp);
3099 break;
60011498 3100 case 4: /* vcvtb.f32.f16 */
60011498
PB
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3104 tcg_temp_free_i32(tmp);
60011498
PB
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
60011498
PB
3107 tmp = gen_vfp_mrs();
3108 tcg_gen_shri_i32(tmp, tmp, 16);
3109 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
7d1b0095 3110 tcg_temp_free_i32(tmp);
60011498
PB
3111 break;
3112 case 6: /* vcvtb.f16.f32 */
7d1b0095 3113 tmp = tcg_temp_new_i32();
60011498
PB
3114 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3115 gen_mov_F0_vreg(0, rd);
3116 tmp2 = gen_vfp_mrs();
3117 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3118 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3119 tcg_temp_free_i32(tmp2);
60011498
PB
3120 gen_vfp_msr(tmp);
3121 break;
3122 case 7: /* vcvtt.f16.f32 */
7d1b0095 3123 tmp = tcg_temp_new_i32();
60011498
PB
3124 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3125 tcg_gen_shli_i32(tmp, tmp, 16);
3126 gen_mov_F0_vreg(0, rd);
3127 tmp2 = gen_vfp_mrs();
3128 tcg_gen_ext16u_i32(tmp2, tmp2);
3129 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3130 tcg_temp_free_i32(tmp2);
60011498
PB
3131 gen_vfp_msr(tmp);
3132 break;
b7bcbe95
FB
3133 case 8: /* cmp */
3134 gen_vfp_cmp(dp);
3135 break;
3136 case 9: /* cmpe */
3137 gen_vfp_cmpe(dp);
3138 break;
3139 case 10: /* cmpz */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 11: /* cmpez */
3143 gen_vfp_F1_ld0(dp);
3144 gen_vfp_cmpe(dp);
3145 break;
3146 case 15: /* single<->double conversion */
3147 if (dp)
4373f3ce 3148 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3149 else
4373f3ce 3150 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3151 break;
3152 case 16: /* fuito */
5500b06c 3153 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3154 break;
3155 case 17: /* fsito */
5500b06c 3156 gen_vfp_sito(dp, 0);
b7bcbe95 3157 break;
9ee6e8bb
PB
3158 case 20: /* fshto */
3159 if (!arm_feature(env, ARM_FEATURE_VFP3))
3160 return 1;
5500b06c 3161 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3162 break;
3163 case 21: /* fslto */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
5500b06c 3166 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3167 break;
3168 case 22: /* fuhto */
3169 if (!arm_feature(env, ARM_FEATURE_VFP3))
3170 return 1;
5500b06c 3171 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3172 break;
3173 case 23: /* fulto */
3174 if (!arm_feature(env, ARM_FEATURE_VFP3))
3175 return 1;
5500b06c 3176 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3177 break;
b7bcbe95 3178 case 24: /* ftoui */
5500b06c 3179 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3180 break;
3181 case 25: /* ftouiz */
5500b06c 3182 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3183 break;
3184 case 26: /* ftosi */
5500b06c 3185 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3186 break;
3187 case 27: /* ftosiz */
5500b06c 3188 gen_vfp_tosiz(dp, 0);
b7bcbe95 3189 break;
9ee6e8bb
PB
3190 case 28: /* ftosh */
3191 if (!arm_feature(env, ARM_FEATURE_VFP3))
3192 return 1;
5500b06c 3193 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3194 break;
3195 case 29: /* ftosl */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
5500b06c 3198 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3199 break;
3200 case 30: /* ftouh */
3201 if (!arm_feature(env, ARM_FEATURE_VFP3))
3202 return 1;
5500b06c 3203 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3204 break;
3205 case 31: /* ftoul */
3206 if (!arm_feature(env, ARM_FEATURE_VFP3))
3207 return 1;
5500b06c 3208 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3209 break;
b7bcbe95 3210 default: /* undefined */
b7bcbe95
FB
3211 return 1;
3212 }
3213 break;
3214 default: /* undefined */
b7bcbe95
FB
3215 return 1;
3216 }
3217
3218 /* Write back the result. */
3219 if (op == 15 && (rn >= 8 && rn <= 11))
3220 ; /* Comparison, do nothing. */
04595bf6
PM
3221 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3222 /* VCVT double to int: always integer result. */
b7bcbe95
FB
3223 gen_mov_vreg_F0(0, rd);
3224 else if (op == 15 && rn == 15)
3225 /* conversion */
3226 gen_mov_vreg_F0(!dp, rd);
3227 else
3228 gen_mov_vreg_F0(dp, rd);
3229
3230 /* break out of the loop if we have finished */
3231 if (veclen == 0)
3232 break;
3233
3234 if (op == 15 && delta_m == 0) {
3235 /* single source one-many */
3236 while (veclen--) {
3237 rd = ((rd + delta_d) & (bank_mask - 1))
3238 | (rd & bank_mask);
3239 gen_mov_vreg_F0(dp, rd);
3240 }
3241 break;
3242 }
3243 /* Setup the next operands. */
3244 veclen--;
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247
3248 if (op == 15) {
3249 /* One source operand. */
3250 rm = ((rm + delta_m) & (bank_mask - 1))
3251 | (rm & bank_mask);
3252 gen_mov_F0_vreg(dp, rm);
3253 } else {
3254 /* Two source operands. */
3255 rn = ((rn + delta_d) & (bank_mask - 1))
3256 | (rn & bank_mask);
3257 gen_mov_F0_vreg(dp, rn);
3258 if (delta_m) {
3259 rm = ((rm + delta_m) & (bank_mask - 1))
3260 | (rm & bank_mask);
3261 gen_mov_F1_vreg(dp, rm);
3262 }
3263 }
3264 }
3265 }
3266 break;
3267 case 0xc:
3268 case 0xd:
8387da81 3269 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3270 /* two-register transfer */
3271 rn = (insn >> 16) & 0xf;
3272 rd = (insn >> 12) & 0xf;
3273 if (dp) {
9ee6e8bb
PB
3274 VFP_DREG_M(rm, insn);
3275 } else {
3276 rm = VFP_SREG_M(insn);
3277 }
b7bcbe95 3278
18c9b560 3279 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3280 /* vfp->arm */
3281 if (dp) {
4373f3ce
PB
3282 gen_mov_F0_vreg(0, rm * 2);
3283 tmp = gen_vfp_mrs();
3284 store_reg(s, rd, tmp);
3285 gen_mov_F0_vreg(0, rm * 2 + 1);
3286 tmp = gen_vfp_mrs();
3287 store_reg(s, rn, tmp);
b7bcbe95
FB
3288 } else {
3289 gen_mov_F0_vreg(0, rm);
4373f3ce 3290 tmp = gen_vfp_mrs();
8387da81 3291 store_reg(s, rd, tmp);
b7bcbe95 3292 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3293 tmp = gen_vfp_mrs();
8387da81 3294 store_reg(s, rn, tmp);
b7bcbe95
FB
3295 }
3296 } else {
3297 /* arm->vfp */
3298 if (dp) {
4373f3ce
PB
3299 tmp = load_reg(s, rd);
3300 gen_vfp_msr(tmp);
3301 gen_mov_vreg_F0(0, rm * 2);
3302 tmp = load_reg(s, rn);
3303 gen_vfp_msr(tmp);
3304 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3305 } else {
8387da81 3306 tmp = load_reg(s, rd);
4373f3ce 3307 gen_vfp_msr(tmp);
b7bcbe95 3308 gen_mov_vreg_F0(0, rm);
8387da81 3309 tmp = load_reg(s, rn);
4373f3ce 3310 gen_vfp_msr(tmp);
b7bcbe95
FB
3311 gen_mov_vreg_F0(0, rm + 1);
3312 }
3313 }
3314 } else {
3315 /* Load/store */
3316 rn = (insn >> 16) & 0xf;
3317 if (dp)
9ee6e8bb 3318 VFP_DREG_D(rd, insn);
b7bcbe95 3319 else
9ee6e8bb 3320 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3321 if ((insn & 0x01200000) == 0x01000000) {
3322 /* Single load/store */
3323 offset = (insn & 0xff) << 2;
3324 if ((insn & (1 << 23)) == 0)
3325 offset = -offset;
934814f1
PM
3326 if (s->thumb && rn == 15) {
3327 /* This is actually UNPREDICTABLE */
3328 addr = tcg_temp_new_i32();
3329 tcg_gen_movi_i32(addr, s->pc & ~2);
3330 } else {
3331 addr = load_reg(s, rn);
3332 }
312eea9f 3333 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3334 if (insn & (1 << 20)) {
312eea9f 3335 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3336 gen_mov_vreg_F0(dp, rd);
3337 } else {
3338 gen_mov_F0_vreg(dp, rd);
312eea9f 3339 gen_vfp_st(s, dp, addr);
b7bcbe95 3340 }
7d1b0095 3341 tcg_temp_free_i32(addr);
b7bcbe95
FB
3342 } else {
3343 /* load/store multiple */
934814f1 3344 int w = insn & (1 << 21);
b7bcbe95
FB
3345 if (dp)
3346 n = (insn >> 1) & 0x7f;
3347 else
3348 n = insn & 0xff;
3349
934814f1
PM
3350 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3351 /* P == U , W == 1 => UNDEF */
3352 return 1;
3353 }
3354 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3355 /* UNPREDICTABLE cases for bad immediates: we choose to
3356 * UNDEF to avoid generating huge numbers of TCG ops
3357 */
3358 return 1;
3359 }
3360 if (rn == 15 && w) {
3361 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3362 return 1;
3363 }
3364
3365 if (s->thumb && rn == 15) {
3366 /* This is actually UNPREDICTABLE */
3367 addr = tcg_temp_new_i32();
3368 tcg_gen_movi_i32(addr, s->pc & ~2);
3369 } else {
3370 addr = load_reg(s, rn);
3371 }
b7bcbe95 3372 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3373 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3374
3375 if (dp)
3376 offset = 8;
3377 else
3378 offset = 4;
3379 for (i = 0; i < n; i++) {
18c9b560 3380 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3381 /* load */
312eea9f 3382 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3383 gen_mov_vreg_F0(dp, rd + i);
3384 } else {
3385 /* store */
3386 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3387 gen_vfp_st(s, dp, addr);
b7bcbe95 3388 }
312eea9f 3389 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3390 }
934814f1 3391 if (w) {
b7bcbe95
FB
3392 /* writeback */
3393 if (insn & (1 << 24))
3394 offset = -offset * n;
3395 else if (dp && (insn & 1))
3396 offset = 4;
3397 else
3398 offset = 0;
3399
3400 if (offset != 0)
312eea9f
FN
3401 tcg_gen_addi_i32(addr, addr, offset);
3402 store_reg(s, rn, addr);
3403 } else {
7d1b0095 3404 tcg_temp_free_i32(addr);
b7bcbe95
FB
3405 }
3406 }
3407 }
3408 break;
3409 default:
3410 /* Should never happen. */
3411 return 1;
3412 }
3413 return 0;
3414}
3415
0a2461fa 3416static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3417{
6e256c93
FB
3418 TranslationBlock *tb;
3419
3420 tb = s->tb;
3421 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3422 tcg_gen_goto_tb(n);
eaed129d 3423 gen_set_pc_im(s, dest);
8cfd0495 3424 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3425 } else {
eaed129d 3426 gen_set_pc_im(s, dest);
57fec1fe 3427 tcg_gen_exit_tb(0);
6e256c93 3428 }
c53be334
FB
3429}
3430
8aaca4c0
FB
3431static inline void gen_jmp (DisasContext *s, uint32_t dest)
3432{
551bd27f 3433 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3434 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3435 if (s->thumb)
d9ba4830
PB
3436 dest |= 1;
3437 gen_bx_im(s, dest);
8aaca4c0 3438 } else {
6e256c93 3439 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3440 s->is_jmp = DISAS_TB_JUMP;
3441 }
3442}
3443
39d5492a 3444static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3445{
ee097184 3446 if (x)
d9ba4830 3447 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3448 else
d9ba4830 3449 gen_sxth(t0);
ee097184 3450 if (y)
d9ba4830 3451 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3452 else
d9ba4830
PB
3453 gen_sxth(t1);
3454 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3455}
3456
3457/* Return the mask of PSR bits set by a MSR instruction. */
0ecb72a5 3458static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3459 uint32_t mask;
3460
3461 mask = 0;
3462 if (flags & (1 << 0))
3463 mask |= 0xff;
3464 if (flags & (1 << 1))
3465 mask |= 0xff00;
3466 if (flags & (1 << 2))
3467 mask |= 0xff0000;
3468 if (flags & (1 << 3))
3469 mask |= 0xff000000;
9ee6e8bb 3470
2ae23e75 3471 /* Mask out undefined bits. */
9ee6e8bb 3472 mask &= ~CPSR_RESERVED;
be5e7a76
DES
3473 if (!arm_feature(env, ARM_FEATURE_V4T))
3474 mask &= ~CPSR_T;
3475 if (!arm_feature(env, ARM_FEATURE_V5))
3476 mask &= ~CPSR_Q; /* V5TE in reality*/
9ee6e8bb 3477 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3478 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3479 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3480 mask &= ~CPSR_IT;
9ee6e8bb 3481 /* Mask out execution state bits. */
2ae23e75 3482 if (!spsr)
e160c51c 3483 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3484 /* Mask out privileged bits. */
3485 if (IS_USER(s))
9ee6e8bb 3486 mask &= CPSR_USER;
b5ff1b31
FB
3487 return mask;
3488}
3489
2fbac54b 3490/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 3491static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 3492{
39d5492a 3493 TCGv_i32 tmp;
b5ff1b31
FB
3494 if (spsr) {
3495 /* ??? This is also undefined in system mode. */
3496 if (IS_USER(s))
3497 return 1;
d9ba4830
PB
3498
3499 tmp = load_cpu_field(spsr);
3500 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
3501 tcg_gen_andi_i32(t0, t0, mask);
3502 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 3503 store_cpu_field(tmp, spsr);
b5ff1b31 3504 } else {
2fbac54b 3505 gen_set_cpsr(t0, mask);
b5ff1b31 3506 }
7d1b0095 3507 tcg_temp_free_i32(t0);
b5ff1b31
FB
3508 gen_lookup_tb(s);
3509 return 0;
3510}
3511
2fbac54b
FN
3512/* Returns nonzero if access to the PSR is not permitted. */
3513static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3514{
39d5492a 3515 TCGv_i32 tmp;
7d1b0095 3516 tmp = tcg_temp_new_i32();
2fbac54b
FN
3517 tcg_gen_movi_i32(tmp, val);
3518 return gen_set_psr(s, mask, spsr, tmp);
3519}
3520
e9bb4aa9 3521/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 3522static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 3523{
39d5492a 3524 TCGv_i32 tmp;
e9bb4aa9 3525 store_reg(s, 15, pc);
d9ba4830
PB
3526 tmp = load_cpu_field(spsr);
3527 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 3528 tcg_temp_free_i32(tmp);
b5ff1b31
FB
3529 s->is_jmp = DISAS_UPDATE;
3530}
3531
b0109805 3532/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 3533static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 3534{
b0109805 3535 gen_set_cpsr(cpsr, 0xffffffff);
7d1b0095 3536 tcg_temp_free_i32(cpsr);
b0109805 3537 store_reg(s, 15, pc);
9ee6e8bb
PB
3538 s->is_jmp = DISAS_UPDATE;
3539}
3b46e624 3540
9ee6e8bb
PB
3541static inline void
3542gen_set_condexec (DisasContext *s)
3543{
3544 if (s->condexec_mask) {
8f01245e 3545 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
39d5492a 3546 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 3547 tcg_gen_movi_i32(tmp, val);
d9ba4830 3548 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3549 }
3550}
3b46e624 3551
bc4a0de0
PM
3552static void gen_exception_insn(DisasContext *s, int offset, int excp)
3553{
3554 gen_set_condexec(s);
eaed129d 3555 gen_set_pc_im(s, s->pc - offset);
bc4a0de0
PM
3556 gen_exception(excp);
3557 s->is_jmp = DISAS_JUMP;
3558}
3559
9ee6e8bb
PB
3560static void gen_nop_hint(DisasContext *s, int val)
3561{
3562 switch (val) {
3563 case 3: /* wfi */
eaed129d 3564 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
3565 s->is_jmp = DISAS_WFI;
3566 break;
3567 case 2: /* wfe */
3568 case 4: /* sev */
12b10571
MR
3569 case 5: /* sevl */
3570 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
3571 default: /* nop */
3572 break;
3573 }
3574}
99c475ab 3575
ad69471c 3576#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 3577
39d5492a 3578static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
3579{
3580 switch (size) {
dd8fbd78
FN
3581 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3582 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3583 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 3584 default: abort();
9ee6e8bb 3585 }
9ee6e8bb
PB
3586}
3587
39d5492a 3588static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
3589{
3590 switch (size) {
dd8fbd78
FN
3591 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3592 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3593 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
3594 default: return;
3595 }
3596}
3597
3598/* 32-bit pairwise ops end up the same as the elementwise versions. */
3599#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3600#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3601#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3602#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3603
ad69471c
PB
3604#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3605 switch ((size << 1) | u) { \
3606 case 0: \
dd8fbd78 3607 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3608 break; \
3609 case 1: \
dd8fbd78 3610 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3611 break; \
3612 case 2: \
dd8fbd78 3613 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3614 break; \
3615 case 3: \
dd8fbd78 3616 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3617 break; \
3618 case 4: \
dd8fbd78 3619 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3620 break; \
3621 case 5: \
dd8fbd78 3622 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
3623 break; \
3624 default: return 1; \
3625 }} while (0)
9ee6e8bb
PB
3626
3627#define GEN_NEON_INTEGER_OP(name) do { \
3628 switch ((size << 1) | u) { \
ad69471c 3629 case 0: \
dd8fbd78 3630 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
3631 break; \
3632 case 1: \
dd8fbd78 3633 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
3634 break; \
3635 case 2: \
dd8fbd78 3636 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
3637 break; \
3638 case 3: \
dd8fbd78 3639 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
3640 break; \
3641 case 4: \
dd8fbd78 3642 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
3643 break; \
3644 case 5: \
dd8fbd78 3645 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 3646 break; \
9ee6e8bb
PB
3647 default: return 1; \
3648 }} while (0)
3649
39d5492a 3650static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 3651{
39d5492a 3652 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
3653 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3654 return tmp;
9ee6e8bb
PB
3655}
3656
39d5492a 3657static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 3658{
dd8fbd78 3659 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 3660 tcg_temp_free_i32(var);
9ee6e8bb
PB
3661}
3662
39d5492a 3663static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 3664{
39d5492a 3665 TCGv_i32 tmp;
9ee6e8bb 3666 if (size == 1) {
0fad6efc
PM
3667 tmp = neon_load_reg(reg & 7, reg >> 4);
3668 if (reg & 8) {
dd8fbd78 3669 gen_neon_dup_high16(tmp);
0fad6efc
PM
3670 } else {
3671 gen_neon_dup_low16(tmp);
dd8fbd78 3672 }
0fad6efc
PM
3673 } else {
3674 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 3675 }
dd8fbd78 3676 return tmp;
9ee6e8bb
PB
3677}
3678
02acedf9 3679static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 3680{
39d5492a 3681 TCGv_i32 tmp, tmp2;
600b828c 3682 if (!q && size == 2) {
02acedf9
PM
3683 return 1;
3684 }
3685 tmp = tcg_const_i32(rd);
3686 tmp2 = tcg_const_i32(rm);
3687 if (q) {
3688 switch (size) {
3689 case 0:
02da0b2d 3690 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3691 break;
3692 case 1:
02da0b2d 3693 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3694 break;
3695 case 2:
02da0b2d 3696 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
3697 break;
3698 default:
3699 abort();
3700 }
3701 } else {
3702 switch (size) {
3703 case 0:
02da0b2d 3704 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
3705 break;
3706 case 1:
02da0b2d 3707 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
3708 break;
3709 default:
3710 abort();
3711 }
3712 }
3713 tcg_temp_free_i32(tmp);
3714 tcg_temp_free_i32(tmp2);
3715 return 0;
19457615
FN
3716}
3717
d68a6f3a 3718static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 3719{
39d5492a 3720 TCGv_i32 tmp, tmp2;
600b828c 3721 if (!q && size == 2) {
d68a6f3a
PM
3722 return 1;
3723 }
3724 tmp = tcg_const_i32(rd);
3725 tmp2 = tcg_const_i32(rm);
3726 if (q) {
3727 switch (size) {
3728 case 0:
02da0b2d 3729 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3730 break;
3731 case 1:
02da0b2d 3732 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3733 break;
3734 case 2:
02da0b2d 3735 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
3736 break;
3737 default:
3738 abort();
3739 }
3740 } else {
3741 switch (size) {
3742 case 0:
02da0b2d 3743 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
3744 break;
3745 case 1:
02da0b2d 3746 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
3747 break;
3748 default:
3749 abort();
3750 }
3751 }
3752 tcg_temp_free_i32(tmp);
3753 tcg_temp_free_i32(tmp2);
3754 return 0;
19457615
FN
3755}
3756
39d5492a 3757static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 3758{
39d5492a 3759 TCGv_i32 rd, tmp;
19457615 3760
7d1b0095
PM
3761 rd = tcg_temp_new_i32();
3762 tmp = tcg_temp_new_i32();
19457615
FN
3763
3764 tcg_gen_shli_i32(rd, t0, 8);
3765 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3766 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3767 tcg_gen_or_i32(rd, rd, tmp);
3768
3769 tcg_gen_shri_i32(t1, t1, 8);
3770 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3771 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3772 tcg_gen_or_i32(t1, t1, tmp);
3773 tcg_gen_mov_i32(t0, rd);
3774
7d1b0095
PM
3775 tcg_temp_free_i32(tmp);
3776 tcg_temp_free_i32(rd);
19457615
FN
3777}
3778
39d5492a 3779static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 3780{
39d5492a 3781 TCGv_i32 rd, tmp;
19457615 3782
7d1b0095
PM
3783 rd = tcg_temp_new_i32();
3784 tmp = tcg_temp_new_i32();
19457615
FN
3785
3786 tcg_gen_shli_i32(rd, t0, 16);
3787 tcg_gen_andi_i32(tmp, t1, 0xffff);
3788 tcg_gen_or_i32(rd, rd, tmp);
3789 tcg_gen_shri_i32(t1, t1, 16);
3790 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3791 tcg_gen_or_i32(t1, t1, tmp);
3792 tcg_gen_mov_i32(t0, rd);
3793
7d1b0095
PM
3794 tcg_temp_free_i32(tmp);
3795 tcg_temp_free_i32(rd);
19457615
FN
3796}
3797
3798
9ee6e8bb
PB
3799static struct {
3800 int nregs;
3801 int interleave;
3802 int spacing;
3803} neon_ls_element_type[11] = {
3804 {4, 4, 1},
3805 {4, 4, 2},
3806 {4, 1, 1},
3807 {4, 2, 1},
3808 {3, 3, 1},
3809 {3, 3, 2},
3810 {3, 1, 1},
3811 {1, 1, 1},
3812 {2, 2, 1},
3813 {2, 2, 2},
3814 {2, 1, 1}
3815};
3816
3817/* Translate a NEON load/store element instruction. Return nonzero if the
3818 instruction is invalid. */
0ecb72a5 3819static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
3820{
3821 int rd, rn, rm;
3822 int op;
3823 int nregs;
3824 int interleave;
84496233 3825 int spacing;
9ee6e8bb
PB
3826 int stride;
3827 int size;
3828 int reg;
3829 int pass;
3830 int load;
3831 int shift;
9ee6e8bb 3832 int n;
39d5492a
PM
3833 TCGv_i32 addr;
3834 TCGv_i32 tmp;
3835 TCGv_i32 tmp2;
84496233 3836 TCGv_i64 tmp64;
9ee6e8bb 3837
5df8bac1 3838 if (!s->vfp_enabled)
9ee6e8bb
PB
3839 return 1;
3840 VFP_DREG_D(rd, insn);
3841 rn = (insn >> 16) & 0xf;
3842 rm = insn & 0xf;
3843 load = (insn & (1 << 21)) != 0;
3844 if ((insn & (1 << 23)) == 0) {
3845 /* Load store all elements. */
3846 op = (insn >> 8) & 0xf;
3847 size = (insn >> 6) & 3;
84496233 3848 if (op > 10)
9ee6e8bb 3849 return 1;
f2dd89d0
PM
3850 /* Catch UNDEF cases for bad values of align field */
3851 switch (op & 0xc) {
3852 case 4:
3853 if (((insn >> 5) & 1) == 1) {
3854 return 1;
3855 }
3856 break;
3857 case 8:
3858 if (((insn >> 4) & 3) == 3) {
3859 return 1;
3860 }
3861 break;
3862 default:
3863 break;
3864 }
9ee6e8bb
PB
3865 nregs = neon_ls_element_type[op].nregs;
3866 interleave = neon_ls_element_type[op].interleave;
84496233
JR
3867 spacing = neon_ls_element_type[op].spacing;
3868 if (size == 3 && (interleave | spacing) != 1)
3869 return 1;
e318a60b 3870 addr = tcg_temp_new_i32();
dcc65026 3871 load_reg_var(s, addr, rn);
9ee6e8bb
PB
3872 stride = (1 << size) * interleave;
3873 for (reg = 0; reg < nregs; reg++) {
3874 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
3875 load_reg_var(s, addr, rn);
3876 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 3877 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
3878 load_reg_var(s, addr, rn);
3879 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 3880 }
84496233 3881 if (size == 3) {
8ed1237d 3882 tmp64 = tcg_temp_new_i64();
84496233 3883 if (load) {
08307563 3884 gen_aa32_ld64(tmp64, addr, IS_USER(s));
84496233 3885 neon_store_reg64(tmp64, rd);
84496233 3886 } else {
84496233 3887 neon_load_reg64(tmp64, rd);
08307563 3888 gen_aa32_st64(tmp64, addr, IS_USER(s));
84496233 3889 }
8ed1237d 3890 tcg_temp_free_i64(tmp64);
84496233
JR
3891 tcg_gen_addi_i32(addr, addr, stride);
3892 } else {
3893 for (pass = 0; pass < 2; pass++) {
3894 if (size == 2) {
3895 if (load) {
58ab8e96 3896 tmp = tcg_temp_new_i32();
08307563 3897 gen_aa32_ld32u(tmp, addr, IS_USER(s));
84496233
JR
3898 neon_store_reg(rd, pass, tmp);
3899 } else {
3900 tmp = neon_load_reg(rd, pass);
08307563 3901 gen_aa32_st32(tmp, addr, IS_USER(s));
58ab8e96 3902 tcg_temp_free_i32(tmp);
84496233 3903 }
1b2b1e54 3904 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
3905 } else if (size == 1) {
3906 if (load) {
58ab8e96 3907 tmp = tcg_temp_new_i32();
08307563 3908 gen_aa32_ld16u(tmp, addr, IS_USER(s));
84496233 3909 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 3910 tmp2 = tcg_temp_new_i32();
08307563 3911 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
84496233 3912 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
3913 tcg_gen_shli_i32(tmp2, tmp2, 16);
3914 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3915 tcg_temp_free_i32(tmp2);
84496233
JR
3916 neon_store_reg(rd, pass, tmp);
3917 } else {
3918 tmp = neon_load_reg(rd, pass);
7d1b0095 3919 tmp2 = tcg_temp_new_i32();
84496233 3920 tcg_gen_shri_i32(tmp2, tmp, 16);
08307563 3921 gen_aa32_st16(tmp, addr, IS_USER(s));
58ab8e96 3922 tcg_temp_free_i32(tmp);
84496233 3923 tcg_gen_addi_i32(addr, addr, stride);
08307563 3924 gen_aa32_st16(tmp2, addr, IS_USER(s));
58ab8e96 3925 tcg_temp_free_i32(tmp2);
1b2b1e54 3926 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 3927 }
84496233
JR
3928 } else /* size == 0 */ {
3929 if (load) {
39d5492a 3930 TCGV_UNUSED_I32(tmp2);
84496233 3931 for (n = 0; n < 4; n++) {
58ab8e96 3932 tmp = tcg_temp_new_i32();
08307563 3933 gen_aa32_ld8u(tmp, addr, IS_USER(s));
84496233
JR
3934 tcg_gen_addi_i32(addr, addr, stride);
3935 if (n == 0) {
3936 tmp2 = tmp;
3937 } else {
41ba8341
PB
3938 tcg_gen_shli_i32(tmp, tmp, n * 8);
3939 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 3940 tcg_temp_free_i32(tmp);
84496233 3941 }
9ee6e8bb 3942 }
84496233
JR
3943 neon_store_reg(rd, pass, tmp2);
3944 } else {
3945 tmp2 = neon_load_reg(rd, pass);
3946 for (n = 0; n < 4; n++) {
7d1b0095 3947 tmp = tcg_temp_new_i32();
84496233
JR
3948 if (n == 0) {
3949 tcg_gen_mov_i32(tmp, tmp2);
3950 } else {
3951 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3952 }
08307563 3953 gen_aa32_st8(tmp, addr, IS_USER(s));
58ab8e96 3954 tcg_temp_free_i32(tmp);
84496233
JR
3955 tcg_gen_addi_i32(addr, addr, stride);
3956 }
7d1b0095 3957 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3958 }
3959 }
3960 }
3961 }
84496233 3962 rd += spacing;
9ee6e8bb 3963 }
e318a60b 3964 tcg_temp_free_i32(addr);
9ee6e8bb
PB
3965 stride = nregs * 8;
3966 } else {
3967 size = (insn >> 10) & 3;
3968 if (size == 3) {
3969 /* Load single element to all lanes. */
8e18cde3
PM
3970 int a = (insn >> 4) & 1;
3971 if (!load) {
9ee6e8bb 3972 return 1;
8e18cde3 3973 }
9ee6e8bb
PB
3974 size = (insn >> 6) & 3;
3975 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
3976
3977 if (size == 3) {
3978 if (nregs != 4 || a == 0) {
9ee6e8bb 3979 return 1;
99c475ab 3980 }
8e18cde3
PM
3981 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3982 size = 2;
3983 }
3984 if (nregs == 1 && a == 1 && size == 0) {
3985 return 1;
3986 }
3987 if (nregs == 3 && a == 1) {
3988 return 1;
3989 }
e318a60b 3990 addr = tcg_temp_new_i32();
8e18cde3
PM
3991 load_reg_var(s, addr, rn);
3992 if (nregs == 1) {
3993 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3994 tmp = gen_load_and_replicate(s, addr, size);
3995 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3996 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3997 if (insn & (1 << 5)) {
3998 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3999 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4000 }
4001 tcg_temp_free_i32(tmp);
4002 } else {
4003 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4004 stride = (insn & (1 << 5)) ? 2 : 1;
4005 for (reg = 0; reg < nregs; reg++) {
4006 tmp = gen_load_and_replicate(s, addr, size);
4007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4008 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4009 tcg_temp_free_i32(tmp);
4010 tcg_gen_addi_i32(addr, addr, 1 << size);
4011 rd += stride;
4012 }
9ee6e8bb 4013 }
e318a60b 4014 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4015 stride = (1 << size) * nregs;
4016 } else {
4017 /* Single element. */
93262b16 4018 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4019 pass = (insn >> 7) & 1;
4020 switch (size) {
4021 case 0:
4022 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4023 stride = 1;
4024 break;
4025 case 1:
4026 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4027 stride = (insn & (1 << 5)) ? 2 : 1;
4028 break;
4029 case 2:
4030 shift = 0;
9ee6e8bb
PB
4031 stride = (insn & (1 << 6)) ? 2 : 1;
4032 break;
4033 default:
4034 abort();
4035 }
4036 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4037 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4038 switch (nregs) {
4039 case 1:
4040 if (((idx & (1 << size)) != 0) ||
4041 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4042 return 1;
4043 }
4044 break;
4045 case 3:
4046 if ((idx & 1) != 0) {
4047 return 1;
4048 }
4049 /* fall through */
4050 case 2:
4051 if (size == 2 && (idx & 2) != 0) {
4052 return 1;
4053 }
4054 break;
4055 case 4:
4056 if ((size == 2) && ((idx & 3) == 3)) {
4057 return 1;
4058 }
4059 break;
4060 default:
4061 abort();
4062 }
4063 if ((rd + stride * (nregs - 1)) > 31) {
4064 /* Attempts to write off the end of the register file
4065 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4066 * the neon_load_reg() would write off the end of the array.
4067 */
4068 return 1;
4069 }
e318a60b 4070 addr = tcg_temp_new_i32();
dcc65026 4071 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4072 for (reg = 0; reg < nregs; reg++) {
4073 if (load) {
58ab8e96 4074 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4075 switch (size) {
4076 case 0:
08307563 4077 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4078 break;
4079 case 1:
08307563 4080 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4081 break;
4082 case 2:
08307563 4083 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 4084 break;
a50f5b91
PB
4085 default: /* Avoid compiler warnings. */
4086 abort();
9ee6e8bb
PB
4087 }
4088 if (size != 2) {
8f8e3aa4 4089 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4090 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4091 shift, size ? 16 : 8);
7d1b0095 4092 tcg_temp_free_i32(tmp2);
9ee6e8bb 4093 }
8f8e3aa4 4094 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4095 } else { /* Store */
8f8e3aa4
PB
4096 tmp = neon_load_reg(rd, pass);
4097 if (shift)
4098 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4099 switch (size) {
4100 case 0:
08307563 4101 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4102 break;
4103 case 1:
08307563 4104 gen_aa32_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
4105 break;
4106 case 2:
08307563 4107 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 4108 break;
99c475ab 4109 }
58ab8e96 4110 tcg_temp_free_i32(tmp);
99c475ab 4111 }
9ee6e8bb 4112 rd += stride;
1b2b1e54 4113 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4114 }
e318a60b 4115 tcg_temp_free_i32(addr);
9ee6e8bb 4116 stride = nregs * (1 << size);
99c475ab 4117 }
9ee6e8bb
PB
4118 }
4119 if (rm != 15) {
39d5492a 4120 TCGv_i32 base;
b26eefb6
PB
4121
4122 base = load_reg(s, rn);
9ee6e8bb 4123 if (rm == 13) {
b26eefb6 4124 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4125 } else {
39d5492a 4126 TCGv_i32 index;
b26eefb6
PB
4127 index = load_reg(s, rm);
4128 tcg_gen_add_i32(base, base, index);
7d1b0095 4129 tcg_temp_free_i32(index);
9ee6e8bb 4130 }
b26eefb6 4131 store_reg(s, rn, base);
9ee6e8bb
PB
4132 }
4133 return 0;
4134}
3b46e624 4135
8f8e3aa4 4136/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4137static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4138{
4139 tcg_gen_and_i32(t, t, c);
f669df27 4140 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4141 tcg_gen_or_i32(dest, t, f);
4142}
4143
39d5492a 4144static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4145{
4146 switch (size) {
4147 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4148 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4149 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4150 default: abort();
4151 }
4152}
4153
39d5492a 4154static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4155{
4156 switch (size) {
02da0b2d
PM
4157 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4158 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4159 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4160 default: abort();
4161 }
4162}
4163
39d5492a 4164static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4165{
4166 switch (size) {
02da0b2d
PM
4167 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4168 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4169 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4170 default: abort();
4171 }
4172}
4173
39d5492a 4174static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4175{
4176 switch (size) {
02da0b2d
PM
4177 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4178 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4179 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4180 default: abort();
4181 }
4182}
4183
39d5492a 4184static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4185 int q, int u)
4186{
4187 if (q) {
4188 if (u) {
4189 switch (size) {
4190 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4191 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4192 default: abort();
4193 }
4194 } else {
4195 switch (size) {
4196 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4197 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4198 default: abort();
4199 }
4200 }
4201 } else {
4202 if (u) {
4203 switch (size) {
b408a9b0
CL
4204 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4205 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4206 default: abort();
4207 }
4208 } else {
4209 switch (size) {
4210 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4211 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4212 default: abort();
4213 }
4214 }
4215 }
4216}
4217
39d5492a 4218static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4219{
4220 if (u) {
4221 switch (size) {
4222 case 0: gen_helper_neon_widen_u8(dest, src); break;
4223 case 1: gen_helper_neon_widen_u16(dest, src); break;
4224 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4225 default: abort();
4226 }
4227 } else {
4228 switch (size) {
4229 case 0: gen_helper_neon_widen_s8(dest, src); break;
4230 case 1: gen_helper_neon_widen_s16(dest, src); break;
4231 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4232 default: abort();
4233 }
4234 }
7d1b0095 4235 tcg_temp_free_i32(src);
ad69471c
PB
4236}
4237
4238static inline void gen_neon_addl(int size)
4239{
4240 switch (size) {
4241 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4242 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4243 case 2: tcg_gen_add_i64(CPU_V001); break;
4244 default: abort();
4245 }
4246}
4247
4248static inline void gen_neon_subl(int size)
4249{
4250 switch (size) {
4251 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4252 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4253 case 2: tcg_gen_sub_i64(CPU_V001); break;
4254 default: abort();
4255 }
4256}
4257
a7812ae4 4258static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4259{
4260 switch (size) {
4261 case 0: gen_helper_neon_negl_u16(var, var); break;
4262 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4263 case 2:
4264 tcg_gen_neg_i64(var, var);
4265 break;
ad69471c
PB
4266 default: abort();
4267 }
4268}
4269
a7812ae4 4270static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4271{
4272 switch (size) {
02da0b2d
PM
4273 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4274 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4275 default: abort();
4276 }
4277}
4278
39d5492a
PM
4279static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4280 int size, int u)
ad69471c 4281{
a7812ae4 4282 TCGv_i64 tmp;
ad69471c
PB
4283
4284 switch ((size << 1) | u) {
4285 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4286 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4287 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4288 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4289 case 4:
4290 tmp = gen_muls_i64_i32(a, b);
4291 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4292 tcg_temp_free_i64(tmp);
ad69471c
PB
4293 break;
4294 case 5:
4295 tmp = gen_mulu_i64_i32(a, b);
4296 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4297 tcg_temp_free_i64(tmp);
ad69471c
PB
4298 break;
4299 default: abort();
4300 }
c6067f04
CL
4301
4302 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4303 Don't forget to clean them now. */
4304 if (size < 2) {
7d1b0095
PM
4305 tcg_temp_free_i32(a);
4306 tcg_temp_free_i32(b);
c6067f04 4307 }
ad69471c
PB
4308}
4309
39d5492a
PM
4310static void gen_neon_narrow_op(int op, int u, int size,
4311 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4312{
4313 if (op) {
4314 if (u) {
4315 gen_neon_unarrow_sats(size, dest, src);
4316 } else {
4317 gen_neon_narrow(size, dest, src);
4318 }
4319 } else {
4320 if (u) {
4321 gen_neon_narrow_satu(size, dest, src);
4322 } else {
4323 gen_neon_narrow_sats(size, dest, src);
4324 }
4325 }
4326}
4327
62698be3
PM
4328/* Symbolic constants for op fields for Neon 3-register same-length.
4329 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4330 * table A7-9.
4331 */
4332#define NEON_3R_VHADD 0
4333#define NEON_3R_VQADD 1
4334#define NEON_3R_VRHADD 2
4335#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4336#define NEON_3R_VHSUB 4
4337#define NEON_3R_VQSUB 5
4338#define NEON_3R_VCGT 6
4339#define NEON_3R_VCGE 7
4340#define NEON_3R_VSHL 8
4341#define NEON_3R_VQSHL 9
4342#define NEON_3R_VRSHL 10
4343#define NEON_3R_VQRSHL 11
4344#define NEON_3R_VMAX 12
4345#define NEON_3R_VMIN 13
4346#define NEON_3R_VABD 14
4347#define NEON_3R_VABA 15
4348#define NEON_3R_VADD_VSUB 16
4349#define NEON_3R_VTST_VCEQ 17
4350#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4351#define NEON_3R_VMUL 19
4352#define NEON_3R_VPMAX 20
4353#define NEON_3R_VPMIN 21
4354#define NEON_3R_VQDMULH_VQRDMULH 22
4355#define NEON_3R_VPADD 23
da97f52c 4356#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4357#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4358#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4359#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4360#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4361#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4362#define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4363
4364static const uint8_t neon_3r_sizes[] = {
4365 [NEON_3R_VHADD] = 0x7,
4366 [NEON_3R_VQADD] = 0xf,
4367 [NEON_3R_VRHADD] = 0x7,
4368 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4369 [NEON_3R_VHSUB] = 0x7,
4370 [NEON_3R_VQSUB] = 0xf,
4371 [NEON_3R_VCGT] = 0x7,
4372 [NEON_3R_VCGE] = 0x7,
4373 [NEON_3R_VSHL] = 0xf,
4374 [NEON_3R_VQSHL] = 0xf,
4375 [NEON_3R_VRSHL] = 0xf,
4376 [NEON_3R_VQRSHL] = 0xf,
4377 [NEON_3R_VMAX] = 0x7,
4378 [NEON_3R_VMIN] = 0x7,
4379 [NEON_3R_VABD] = 0x7,
4380 [NEON_3R_VABA] = 0x7,
4381 [NEON_3R_VADD_VSUB] = 0xf,
4382 [NEON_3R_VTST_VCEQ] = 0x7,
4383 [NEON_3R_VML] = 0x7,
4384 [NEON_3R_VMUL] = 0x7,
4385 [NEON_3R_VPMAX] = 0x7,
4386 [NEON_3R_VPMIN] = 0x7,
4387 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4388 [NEON_3R_VPADD] = 0x7,
da97f52c 4389 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4390 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4391 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4392 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4393 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4394 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4395 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4396};
4397
600b828c
PM
4398/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4399 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4400 * table A7-13.
4401 */
4402#define NEON_2RM_VREV64 0
4403#define NEON_2RM_VREV32 1
4404#define NEON_2RM_VREV16 2
4405#define NEON_2RM_VPADDL 4
4406#define NEON_2RM_VPADDL_U 5
4407#define NEON_2RM_VCLS 8
4408#define NEON_2RM_VCLZ 9
4409#define NEON_2RM_VCNT 10
4410#define NEON_2RM_VMVN 11
4411#define NEON_2RM_VPADAL 12
4412#define NEON_2RM_VPADAL_U 13
4413#define NEON_2RM_VQABS 14
4414#define NEON_2RM_VQNEG 15
4415#define NEON_2RM_VCGT0 16
4416#define NEON_2RM_VCGE0 17
4417#define NEON_2RM_VCEQ0 18
4418#define NEON_2RM_VCLE0 19
4419#define NEON_2RM_VCLT0 20
4420#define NEON_2RM_VABS 22
4421#define NEON_2RM_VNEG 23
4422#define NEON_2RM_VCGT0_F 24
4423#define NEON_2RM_VCGE0_F 25
4424#define NEON_2RM_VCEQ0_F 26
4425#define NEON_2RM_VCLE0_F 27
4426#define NEON_2RM_VCLT0_F 28
4427#define NEON_2RM_VABS_F 30
4428#define NEON_2RM_VNEG_F 31
4429#define NEON_2RM_VSWP 32
4430#define NEON_2RM_VTRN 33
4431#define NEON_2RM_VUZP 34
4432#define NEON_2RM_VZIP 35
4433#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4434#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4435#define NEON_2RM_VSHLL 38
4436#define NEON_2RM_VCVT_F16_F32 44
4437#define NEON_2RM_VCVT_F32_F16 46
4438#define NEON_2RM_VRECPE 56
4439#define NEON_2RM_VRSQRTE 57
4440#define NEON_2RM_VRECPE_F 58
4441#define NEON_2RM_VRSQRTE_F 59
4442#define NEON_2RM_VCVT_FS 60
4443#define NEON_2RM_VCVT_FU 61
4444#define NEON_2RM_VCVT_SF 62
4445#define NEON_2RM_VCVT_UF 63
4446
4447static int neon_2rm_is_float_op(int op)
4448{
4449 /* Return true if this neon 2reg-misc op is float-to-float */
4450 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4451 op >= NEON_2RM_VRECPE_F);
4452}
4453
4454/* Each entry in this array has bit n set if the insn allows
4455 * size value n (otherwise it will UNDEF). Since unallocated
4456 * op values will have no bits set they always UNDEF.
4457 */
4458static const uint8_t neon_2rm_sizes[] = {
4459 [NEON_2RM_VREV64] = 0x7,
4460 [NEON_2RM_VREV32] = 0x3,
4461 [NEON_2RM_VREV16] = 0x1,
4462 [NEON_2RM_VPADDL] = 0x7,
4463 [NEON_2RM_VPADDL_U] = 0x7,
4464 [NEON_2RM_VCLS] = 0x7,
4465 [NEON_2RM_VCLZ] = 0x7,
4466 [NEON_2RM_VCNT] = 0x1,
4467 [NEON_2RM_VMVN] = 0x1,
4468 [NEON_2RM_VPADAL] = 0x7,
4469 [NEON_2RM_VPADAL_U] = 0x7,
4470 [NEON_2RM_VQABS] = 0x7,
4471 [NEON_2RM_VQNEG] = 0x7,
4472 [NEON_2RM_VCGT0] = 0x7,
4473 [NEON_2RM_VCGE0] = 0x7,
4474 [NEON_2RM_VCEQ0] = 0x7,
4475 [NEON_2RM_VCLE0] = 0x7,
4476 [NEON_2RM_VCLT0] = 0x7,
4477 [NEON_2RM_VABS] = 0x7,
4478 [NEON_2RM_VNEG] = 0x7,
4479 [NEON_2RM_VCGT0_F] = 0x4,
4480 [NEON_2RM_VCGE0_F] = 0x4,
4481 [NEON_2RM_VCEQ0_F] = 0x4,
4482 [NEON_2RM_VCLE0_F] = 0x4,
4483 [NEON_2RM_VCLT0_F] = 0x4,
4484 [NEON_2RM_VABS_F] = 0x4,
4485 [NEON_2RM_VNEG_F] = 0x4,
4486 [NEON_2RM_VSWP] = 0x1,
4487 [NEON_2RM_VTRN] = 0x7,
4488 [NEON_2RM_VUZP] = 0x7,
4489 [NEON_2RM_VZIP] = 0x7,
4490 [NEON_2RM_VMOVN] = 0x7,
4491 [NEON_2RM_VQMOVN] = 0x7,
4492 [NEON_2RM_VSHLL] = 0x7,
4493 [NEON_2RM_VCVT_F16_F32] = 0x2,
4494 [NEON_2RM_VCVT_F32_F16] = 0x2,
4495 [NEON_2RM_VRECPE] = 0x4,
4496 [NEON_2RM_VRSQRTE] = 0x4,
4497 [NEON_2RM_VRECPE_F] = 0x4,
4498 [NEON_2RM_VRSQRTE_F] = 0x4,
4499 [NEON_2RM_VCVT_FS] = 0x4,
4500 [NEON_2RM_VCVT_FU] = 0x4,
4501 [NEON_2RM_VCVT_SF] = 0x4,
4502 [NEON_2RM_VCVT_UF] = 0x4,
4503};
4504
9ee6e8bb
PB
4505/* Translate a NEON data processing instruction. Return nonzero if the
4506 instruction is invalid.
ad69471c
PB
4507 We process data in a mixture of 32-bit and 64-bit chunks.
4508 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4509
0ecb72a5 4510static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4511{
4512 int op;
4513 int q;
4514 int rd, rn, rm;
4515 int size;
4516 int shift;
4517 int pass;
4518 int count;
4519 int pairwise;
4520 int u;
ca9a32e4 4521 uint32_t imm, mask;
39d5492a 4522 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 4523 TCGv_i64 tmp64;
9ee6e8bb 4524
5df8bac1 4525 if (!s->vfp_enabled)
9ee6e8bb
PB
4526 return 1;
4527 q = (insn & (1 << 6)) != 0;
4528 u = (insn >> 24) & 1;
4529 VFP_DREG_D(rd, insn);
4530 VFP_DREG_N(rn, insn);
4531 VFP_DREG_M(rm, insn);
4532 size = (insn >> 20) & 3;
4533 if ((insn & (1 << 23)) == 0) {
4534 /* Three register same length. */
4535 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
4536 /* Catch invalid op and bad size combinations: UNDEF */
4537 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4538 return 1;
4539 }
25f84f79
PM
4540 /* All insns of this form UNDEF for either this condition or the
4541 * superset of cases "Q==1"; we catch the latter later.
4542 */
4543 if (q && ((rd | rn | rm) & 1)) {
4544 return 1;
4545 }
62698be3
PM
4546 if (size == 3 && op != NEON_3R_LOGIC) {
4547 /* 64-bit element instructions. */
9ee6e8bb 4548 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4549 neon_load_reg64(cpu_V0, rn + pass);
4550 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4551 switch (op) {
62698be3 4552 case NEON_3R_VQADD:
9ee6e8bb 4553 if (u) {
02da0b2d
PM
4554 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4555 cpu_V0, cpu_V1);
2c0262af 4556 } else {
02da0b2d
PM
4557 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4558 cpu_V0, cpu_V1);
2c0262af 4559 }
9ee6e8bb 4560 break;
62698be3 4561 case NEON_3R_VQSUB:
9ee6e8bb 4562 if (u) {
02da0b2d
PM
4563 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4564 cpu_V0, cpu_V1);
ad69471c 4565 } else {
02da0b2d
PM
4566 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4567 cpu_V0, cpu_V1);
ad69471c
PB
4568 }
4569 break;
62698be3 4570 case NEON_3R_VSHL:
ad69471c
PB
4571 if (u) {
4572 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4573 } else {
4574 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4575 }
4576 break;
62698be3 4577 case NEON_3R_VQSHL:
ad69471c 4578 if (u) {
02da0b2d
PM
4579 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4580 cpu_V1, cpu_V0);
ad69471c 4581 } else {
02da0b2d
PM
4582 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4583 cpu_V1, cpu_V0);
ad69471c
PB
4584 }
4585 break;
62698be3 4586 case NEON_3R_VRSHL:
ad69471c
PB
4587 if (u) {
4588 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4589 } else {
ad69471c
PB
4590 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4591 }
4592 break;
62698be3 4593 case NEON_3R_VQRSHL:
ad69471c 4594 if (u) {
02da0b2d
PM
4595 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4596 cpu_V1, cpu_V0);
ad69471c 4597 } else {
02da0b2d
PM
4598 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4599 cpu_V1, cpu_V0);
1e8d4eec 4600 }
9ee6e8bb 4601 break;
62698be3 4602 case NEON_3R_VADD_VSUB:
9ee6e8bb 4603 if (u) {
ad69471c 4604 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4605 } else {
ad69471c 4606 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4607 }
4608 break;
4609 default:
4610 abort();
2c0262af 4611 }
ad69471c 4612 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4613 }
9ee6e8bb 4614 return 0;
2c0262af 4615 }
25f84f79 4616 pairwise = 0;
9ee6e8bb 4617 switch (op) {
62698be3
PM
4618 case NEON_3R_VSHL:
4619 case NEON_3R_VQSHL:
4620 case NEON_3R_VRSHL:
4621 case NEON_3R_VQRSHL:
9ee6e8bb 4622 {
ad69471c
PB
4623 int rtmp;
4624 /* Shift instruction operands are reversed. */
4625 rtmp = rn;
9ee6e8bb 4626 rn = rm;
ad69471c 4627 rm = rtmp;
9ee6e8bb 4628 }
2c0262af 4629 break;
25f84f79
PM
4630 case NEON_3R_VPADD:
4631 if (u) {
4632 return 1;
4633 }
4634 /* Fall through */
62698be3
PM
4635 case NEON_3R_VPMAX:
4636 case NEON_3R_VPMIN:
9ee6e8bb 4637 pairwise = 1;
2c0262af 4638 break;
25f84f79
PM
4639 case NEON_3R_FLOAT_ARITH:
4640 pairwise = (u && size < 2); /* if VPADD (float) */
4641 break;
4642 case NEON_3R_FLOAT_MINMAX:
4643 pairwise = u; /* if VPMIN/VPMAX (float) */
4644 break;
4645 case NEON_3R_FLOAT_CMP:
4646 if (!u && size) {
4647 /* no encoding for U=0 C=1x */
4648 return 1;
4649 }
4650 break;
4651 case NEON_3R_FLOAT_ACMP:
4652 if (!u) {
4653 return 1;
4654 }
4655 break;
4656 case NEON_3R_VRECPS_VRSQRTS:
4657 if (u) {
4658 return 1;
4659 }
2c0262af 4660 break;
25f84f79
PM
4661 case NEON_3R_VMUL:
4662 if (u && (size != 0)) {
4663 /* UNDEF on invalid size for polynomial subcase */
4664 return 1;
4665 }
2c0262af 4666 break;
da97f52c
PM
4667 case NEON_3R_VFM:
4668 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4669 return 1;
4670 }
4671 break;
9ee6e8bb 4672 default:
2c0262af 4673 break;
9ee6e8bb 4674 }
dd8fbd78 4675
25f84f79
PM
4676 if (pairwise && q) {
4677 /* All the pairwise insns UNDEF if Q is set */
4678 return 1;
4679 }
4680
9ee6e8bb
PB
4681 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4682
4683 if (pairwise) {
4684 /* Pairwise. */
a5a14945
JR
4685 if (pass < 1) {
4686 tmp = neon_load_reg(rn, 0);
4687 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 4688 } else {
a5a14945
JR
4689 tmp = neon_load_reg(rm, 0);
4690 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
4691 }
4692 } else {
4693 /* Elementwise. */
dd8fbd78
FN
4694 tmp = neon_load_reg(rn, pass);
4695 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
4696 }
4697 switch (op) {
62698be3 4698 case NEON_3R_VHADD:
9ee6e8bb
PB
4699 GEN_NEON_INTEGER_OP(hadd);
4700 break;
62698be3 4701 case NEON_3R_VQADD:
02da0b2d 4702 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4703 break;
62698be3 4704 case NEON_3R_VRHADD:
9ee6e8bb 4705 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4706 break;
62698be3 4707 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
4708 switch ((u << 2) | size) {
4709 case 0: /* VAND */
dd8fbd78 4710 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4711 break;
4712 case 1: /* BIC */
f669df27 4713 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4714 break;
4715 case 2: /* VORR */
dd8fbd78 4716 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4717 break;
4718 case 3: /* VORN */
f669df27 4719 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4720 break;
4721 case 4: /* VEOR */
dd8fbd78 4722 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
4723 break;
4724 case 5: /* VBSL */
dd8fbd78
FN
4725 tmp3 = neon_load_reg(rd, pass);
4726 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 4727 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4728 break;
4729 case 6: /* VBIT */
dd8fbd78
FN
4730 tmp3 = neon_load_reg(rd, pass);
4731 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 4732 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
4733 break;
4734 case 7: /* VBIF */
dd8fbd78
FN
4735 tmp3 = neon_load_reg(rd, pass);
4736 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 4737 tcg_temp_free_i32(tmp3);
9ee6e8bb 4738 break;
2c0262af
FB
4739 }
4740 break;
62698be3 4741 case NEON_3R_VHSUB:
9ee6e8bb
PB
4742 GEN_NEON_INTEGER_OP(hsub);
4743 break;
62698be3 4744 case NEON_3R_VQSUB:
02da0b2d 4745 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4746 break;
62698be3 4747 case NEON_3R_VCGT:
9ee6e8bb
PB
4748 GEN_NEON_INTEGER_OP(cgt);
4749 break;
62698be3 4750 case NEON_3R_VCGE:
9ee6e8bb
PB
4751 GEN_NEON_INTEGER_OP(cge);
4752 break;
62698be3 4753 case NEON_3R_VSHL:
ad69471c 4754 GEN_NEON_INTEGER_OP(shl);
2c0262af 4755 break;
62698be3 4756 case NEON_3R_VQSHL:
02da0b2d 4757 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4758 break;
62698be3 4759 case NEON_3R_VRSHL:
ad69471c 4760 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4761 break;
62698be3 4762 case NEON_3R_VQRSHL:
02da0b2d 4763 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 4764 break;
62698be3 4765 case NEON_3R_VMAX:
9ee6e8bb
PB
4766 GEN_NEON_INTEGER_OP(max);
4767 break;
62698be3 4768 case NEON_3R_VMIN:
9ee6e8bb
PB
4769 GEN_NEON_INTEGER_OP(min);
4770 break;
62698be3 4771 case NEON_3R_VABD:
9ee6e8bb
PB
4772 GEN_NEON_INTEGER_OP(abd);
4773 break;
62698be3 4774 case NEON_3R_VABA:
9ee6e8bb 4775 GEN_NEON_INTEGER_OP(abd);
7d1b0095 4776 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
4777 tmp2 = neon_load_reg(rd, pass);
4778 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 4779 break;
62698be3 4780 case NEON_3R_VADD_VSUB:
9ee6e8bb 4781 if (!u) { /* VADD */
62698be3 4782 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4783 } else { /* VSUB */
4784 switch (size) {
dd8fbd78
FN
4785 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4786 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4787 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 4788 default: abort();
9ee6e8bb
PB
4789 }
4790 }
4791 break;
62698be3 4792 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
4793 if (!u) { /* VTST */
4794 switch (size) {
dd8fbd78
FN
4795 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4796 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4797 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 4798 default: abort();
9ee6e8bb
PB
4799 }
4800 } else { /* VCEQ */
4801 switch (size) {
dd8fbd78
FN
4802 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4803 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4804 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 4805 default: abort();
9ee6e8bb
PB
4806 }
4807 }
4808 break;
62698be3 4809 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 4810 switch (size) {
dd8fbd78
FN
4811 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4812 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4813 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4814 default: abort();
9ee6e8bb 4815 }
7d1b0095 4816 tcg_temp_free_i32(tmp2);
dd8fbd78 4817 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4818 if (u) { /* VMLS */
dd8fbd78 4819 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 4820 } else { /* VMLA */
dd8fbd78 4821 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
4822 }
4823 break;
62698be3 4824 case NEON_3R_VMUL:
9ee6e8bb 4825 if (u) { /* polynomial */
dd8fbd78 4826 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
4827 } else { /* Integer */
4828 switch (size) {
dd8fbd78
FN
4829 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4830 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4831 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 4832 default: abort();
9ee6e8bb
PB
4833 }
4834 }
4835 break;
62698be3 4836 case NEON_3R_VPMAX:
9ee6e8bb
PB
4837 GEN_NEON_INTEGER_OP(pmax);
4838 break;
62698be3 4839 case NEON_3R_VPMIN:
9ee6e8bb
PB
4840 GEN_NEON_INTEGER_OP(pmin);
4841 break;
62698be3 4842 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
4843 if (!u) { /* VQDMULH */
4844 switch (size) {
02da0b2d
PM
4845 case 1:
4846 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4847 break;
4848 case 2:
4849 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4850 break;
62698be3 4851 default: abort();
9ee6e8bb 4852 }
62698be3 4853 } else { /* VQRDMULH */
9ee6e8bb 4854 switch (size) {
02da0b2d
PM
4855 case 1:
4856 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4857 break;
4858 case 2:
4859 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4860 break;
62698be3 4861 default: abort();
9ee6e8bb
PB
4862 }
4863 }
4864 break;
62698be3 4865 case NEON_3R_VPADD:
9ee6e8bb 4866 switch (size) {
dd8fbd78
FN
4867 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4868 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4869 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 4870 default: abort();
9ee6e8bb
PB
4871 }
4872 break;
62698be3 4873 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
4874 {
4875 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
4876 switch ((u << 2) | size) {
4877 case 0: /* VADD */
aa47cfdd
PM
4878 case 4: /* VPADD */
4879 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4880 break;
4881 case 2: /* VSUB */
aa47cfdd 4882 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4883 break;
4884 case 6: /* VABD */
aa47cfdd 4885 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
4886 break;
4887 default:
62698be3 4888 abort();
9ee6e8bb 4889 }
aa47cfdd 4890 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4891 break;
aa47cfdd 4892 }
62698be3 4893 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
4894 {
4895 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4896 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4897 if (!u) {
7d1b0095 4898 tcg_temp_free_i32(tmp2);
dd8fbd78 4899 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 4900 if (size == 0) {
aa47cfdd 4901 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 4902 } else {
aa47cfdd 4903 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
4904 }
4905 }
aa47cfdd 4906 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4907 break;
aa47cfdd 4908 }
62698be3 4909 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
4910 {
4911 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 4912 if (!u) {
aa47cfdd 4913 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 4914 } else {
aa47cfdd
PM
4915 if (size == 0) {
4916 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4917 } else {
4918 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4919 }
b5ff1b31 4920 }
aa47cfdd 4921 tcg_temp_free_ptr(fpstatus);
2c0262af 4922 break;
aa47cfdd 4923 }
62698be3 4924 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
4925 {
4926 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4927 if (size == 0) {
4928 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4929 } else {
4930 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4931 }
4932 tcg_temp_free_ptr(fpstatus);
2c0262af 4933 break;
aa47cfdd 4934 }
62698be3 4935 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
4936 {
4937 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4938 if (size == 0) {
4939 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4940 } else {
4941 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4942 }
4943 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 4944 break;
aa47cfdd 4945 }
62698be3 4946 case NEON_3R_VRECPS_VRSQRTS:
9ee6e8bb 4947 if (size == 0)
dd8fbd78 4948 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
9ee6e8bb 4949 else
dd8fbd78 4950 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
2c0262af 4951 break;
da97f52c
PM
4952 case NEON_3R_VFM:
4953 {
4954 /* VFMA, VFMS: fused multiply-add */
4955 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4956 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4957 if (size) {
4958 /* VFMS */
4959 gen_helper_vfp_negs(tmp, tmp);
4960 }
4961 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4962 tcg_temp_free_i32(tmp3);
4963 tcg_temp_free_ptr(fpstatus);
4964 break;
4965 }
9ee6e8bb
PB
4966 default:
4967 abort();
2c0262af 4968 }
7d1b0095 4969 tcg_temp_free_i32(tmp2);
dd8fbd78 4970
9ee6e8bb
PB
4971 /* Save the result. For elementwise operations we can put it
4972 straight into the destination register. For pairwise operations
4973 we have to be careful to avoid clobbering the source operands. */
4974 if (pairwise && rd == rm) {
dd8fbd78 4975 neon_store_scratch(pass, tmp);
9ee6e8bb 4976 } else {
dd8fbd78 4977 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4978 }
4979
4980 } /* for pass */
4981 if (pairwise && rd == rm) {
4982 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
4983 tmp = neon_load_scratch(pass);
4984 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4985 }
4986 }
ad69471c 4987 /* End of 3 register same size operations. */
9ee6e8bb
PB
4988 } else if (insn & (1 << 4)) {
4989 if ((insn & 0x00380080) != 0) {
4990 /* Two registers and shift. */
4991 op = (insn >> 8) & 0xf;
4992 if (insn & (1 << 7)) {
cc13115b
PM
4993 /* 64-bit shift. */
4994 if (op > 7) {
4995 return 1;
4996 }
9ee6e8bb
PB
4997 size = 3;
4998 } else {
4999 size = 2;
5000 while ((insn & (1 << (size + 19))) == 0)
5001 size--;
5002 }
5003 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5004 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5005 by immediate using the variable shift operations. */
5006 if (op < 8) {
5007 /* Shift by immediate:
5008 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5009 if (q && ((rd | rm) & 1)) {
5010 return 1;
5011 }
5012 if (!u && (op == 4 || op == 6)) {
5013 return 1;
5014 }
9ee6e8bb
PB
5015 /* Right shifts are encoded as N - shift, where N is the
5016 element size in bits. */
5017 if (op <= 4)
5018 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5019 if (size == 3) {
5020 count = q + 1;
5021 } else {
5022 count = q ? 4: 2;
5023 }
5024 switch (size) {
5025 case 0:
5026 imm = (uint8_t) shift;
5027 imm |= imm << 8;
5028 imm |= imm << 16;
5029 break;
5030 case 1:
5031 imm = (uint16_t) shift;
5032 imm |= imm << 16;
5033 break;
5034 case 2:
5035 case 3:
5036 imm = shift;
5037 break;
5038 default:
5039 abort();
5040 }
5041
5042 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5043 if (size == 3) {
5044 neon_load_reg64(cpu_V0, rm + pass);
5045 tcg_gen_movi_i64(cpu_V1, imm);
5046 switch (op) {
5047 case 0: /* VSHR */
5048 case 1: /* VSRA */
5049 if (u)
5050 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5051 else
ad69471c 5052 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5053 break;
ad69471c
PB
5054 case 2: /* VRSHR */
5055 case 3: /* VRSRA */
5056 if (u)
5057 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5058 else
ad69471c 5059 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5060 break;
ad69471c 5061 case 4: /* VSRI */
ad69471c
PB
5062 case 5: /* VSHL, VSLI */
5063 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5064 break;
0322b26e 5065 case 6: /* VQSHLU */
02da0b2d
PM
5066 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5067 cpu_V0, cpu_V1);
ad69471c 5068 break;
0322b26e
PM
5069 case 7: /* VQSHL */
5070 if (u) {
02da0b2d 5071 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5072 cpu_V0, cpu_V1);
5073 } else {
02da0b2d 5074 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5075 cpu_V0, cpu_V1);
5076 }
9ee6e8bb 5077 break;
9ee6e8bb 5078 }
ad69471c
PB
5079 if (op == 1 || op == 3) {
5080 /* Accumulate. */
5371cb81 5081 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5082 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5083 } else if (op == 4 || (op == 5 && u)) {
5084 /* Insert */
923e6509
CL
5085 neon_load_reg64(cpu_V1, rd + pass);
5086 uint64_t mask;
5087 if (shift < -63 || shift > 63) {
5088 mask = 0;
5089 } else {
5090 if (op == 4) {
5091 mask = 0xffffffffffffffffull >> -shift;
5092 } else {
5093 mask = 0xffffffffffffffffull << shift;
5094 }
5095 }
5096 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5097 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5098 }
5099 neon_store_reg64(cpu_V0, rd + pass);
5100 } else { /* size < 3 */
5101 /* Operands in T0 and T1. */
dd8fbd78 5102 tmp = neon_load_reg(rm, pass);
7d1b0095 5103 tmp2 = tcg_temp_new_i32();
dd8fbd78 5104 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5105 switch (op) {
5106 case 0: /* VSHR */
5107 case 1: /* VSRA */
5108 GEN_NEON_INTEGER_OP(shl);
5109 break;
5110 case 2: /* VRSHR */
5111 case 3: /* VRSRA */
5112 GEN_NEON_INTEGER_OP(rshl);
5113 break;
5114 case 4: /* VSRI */
ad69471c
PB
5115 case 5: /* VSHL, VSLI */
5116 switch (size) {
dd8fbd78
FN
5117 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5118 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5119 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5120 default: abort();
ad69471c
PB
5121 }
5122 break;
0322b26e 5123 case 6: /* VQSHLU */
ad69471c 5124 switch (size) {
0322b26e 5125 case 0:
02da0b2d
PM
5126 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5127 tmp, tmp2);
0322b26e
PM
5128 break;
5129 case 1:
02da0b2d
PM
5130 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5131 tmp, tmp2);
0322b26e
PM
5132 break;
5133 case 2:
02da0b2d
PM
5134 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5135 tmp, tmp2);
0322b26e
PM
5136 break;
5137 default:
cc13115b 5138 abort();
ad69471c
PB
5139 }
5140 break;
0322b26e 5141 case 7: /* VQSHL */
02da0b2d 5142 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5143 break;
ad69471c 5144 }
7d1b0095 5145 tcg_temp_free_i32(tmp2);
ad69471c
PB
5146
5147 if (op == 1 || op == 3) {
5148 /* Accumulate. */
dd8fbd78 5149 tmp2 = neon_load_reg(rd, pass);
5371cb81 5150 gen_neon_add(size, tmp, tmp2);
7d1b0095 5151 tcg_temp_free_i32(tmp2);
ad69471c
PB
5152 } else if (op == 4 || (op == 5 && u)) {
5153 /* Insert */
5154 switch (size) {
5155 case 0:
5156 if (op == 4)
ca9a32e4 5157 mask = 0xff >> -shift;
ad69471c 5158 else
ca9a32e4
JR
5159 mask = (uint8_t)(0xff << shift);
5160 mask |= mask << 8;
5161 mask |= mask << 16;
ad69471c
PB
5162 break;
5163 case 1:
5164 if (op == 4)
ca9a32e4 5165 mask = 0xffff >> -shift;
ad69471c 5166 else
ca9a32e4
JR
5167 mask = (uint16_t)(0xffff << shift);
5168 mask |= mask << 16;
ad69471c
PB
5169 break;
5170 case 2:
ca9a32e4
JR
5171 if (shift < -31 || shift > 31) {
5172 mask = 0;
5173 } else {
5174 if (op == 4)
5175 mask = 0xffffffffu >> -shift;
5176 else
5177 mask = 0xffffffffu << shift;
5178 }
ad69471c
PB
5179 break;
5180 default:
5181 abort();
5182 }
dd8fbd78 5183 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5184 tcg_gen_andi_i32(tmp, tmp, mask);
5185 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5186 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5187 tcg_temp_free_i32(tmp2);
ad69471c 5188 }
dd8fbd78 5189 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5190 }
5191 } /* for pass */
5192 } else if (op < 10) {
ad69471c 5193 /* Shift by immediate and narrow:
9ee6e8bb 5194 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5195 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5196 if (rm & 1) {
5197 return 1;
5198 }
9ee6e8bb
PB
5199 shift = shift - (1 << (size + 3));
5200 size++;
92cdfaeb 5201 if (size == 3) {
a7812ae4 5202 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5203 neon_load_reg64(cpu_V0, rm);
5204 neon_load_reg64(cpu_V1, rm + 1);
5205 for (pass = 0; pass < 2; pass++) {
5206 TCGv_i64 in;
5207 if (pass == 0) {
5208 in = cpu_V0;
5209 } else {
5210 in = cpu_V1;
5211 }
ad69471c 5212 if (q) {
0b36f4cd 5213 if (input_unsigned) {
92cdfaeb 5214 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5215 } else {
92cdfaeb 5216 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5217 }
ad69471c 5218 } else {
0b36f4cd 5219 if (input_unsigned) {
92cdfaeb 5220 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5221 } else {
92cdfaeb 5222 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5223 }
ad69471c 5224 }
7d1b0095 5225 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5226 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5227 neon_store_reg(rd, pass, tmp);
5228 } /* for pass */
5229 tcg_temp_free_i64(tmp64);
5230 } else {
5231 if (size == 1) {
5232 imm = (uint16_t)shift;
5233 imm |= imm << 16;
2c0262af 5234 } else {
92cdfaeb
PM
5235 /* size == 2 */
5236 imm = (uint32_t)shift;
5237 }
5238 tmp2 = tcg_const_i32(imm);
5239 tmp4 = neon_load_reg(rm + 1, 0);
5240 tmp5 = neon_load_reg(rm + 1, 1);
5241 for (pass = 0; pass < 2; pass++) {
5242 if (pass == 0) {
5243 tmp = neon_load_reg(rm, 0);
5244 } else {
5245 tmp = tmp4;
5246 }
0b36f4cd
CL
5247 gen_neon_shift_narrow(size, tmp, tmp2, q,
5248 input_unsigned);
92cdfaeb
PM
5249 if (pass == 0) {
5250 tmp3 = neon_load_reg(rm, 1);
5251 } else {
5252 tmp3 = tmp5;
5253 }
0b36f4cd
CL
5254 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5255 input_unsigned);
36aa55dc 5256 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5257 tcg_temp_free_i32(tmp);
5258 tcg_temp_free_i32(tmp3);
5259 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5260 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5261 neon_store_reg(rd, pass, tmp);
5262 } /* for pass */
c6067f04 5263 tcg_temp_free_i32(tmp2);
b75263d6 5264 }
9ee6e8bb 5265 } else if (op == 10) {
cc13115b
PM
5266 /* VSHLL, VMOVL */
5267 if (q || (rd & 1)) {
9ee6e8bb 5268 return 1;
cc13115b 5269 }
ad69471c
PB
5270 tmp = neon_load_reg(rm, 0);
5271 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5272 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5273 if (pass == 1)
5274 tmp = tmp2;
5275
5276 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5277
9ee6e8bb
PB
5278 if (shift != 0) {
5279 /* The shift is less than the width of the source
ad69471c
PB
5280 type, so we can just shift the whole register. */
5281 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5282 /* Widen the result of shift: we need to clear
5283 * the potential overflow bits resulting from
5284 * left bits of the narrow input appearing as
5285 * right bits of left the neighbour narrow
5286 * input. */
ad69471c
PB
5287 if (size < 2 || !u) {
5288 uint64_t imm64;
5289 if (size == 0) {
5290 imm = (0xffu >> (8 - shift));
5291 imm |= imm << 16;
acdf01ef 5292 } else if (size == 1) {
ad69471c 5293 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5294 } else {
5295 /* size == 2 */
5296 imm = 0xffffffff >> (32 - shift);
5297 }
5298 if (size < 2) {
5299 imm64 = imm | (((uint64_t)imm) << 32);
5300 } else {
5301 imm64 = imm;
9ee6e8bb 5302 }
acdf01ef 5303 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5304 }
5305 }
ad69471c 5306 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5307 }
f73534a5 5308 } else if (op >= 14) {
9ee6e8bb 5309 /* VCVT fixed-point. */
cc13115b
PM
5310 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5311 return 1;
5312 }
f73534a5
PM
5313 /* We have already masked out the must-be-1 top bit of imm6,
5314 * hence this 32-shift where the ARM ARM has 64-imm6.
5315 */
5316 shift = 32 - shift;
9ee6e8bb 5317 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5318 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5319 if (!(op & 1)) {
9ee6e8bb 5320 if (u)
5500b06c 5321 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5322 else
5500b06c 5323 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5324 } else {
5325 if (u)
5500b06c 5326 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5327 else
5500b06c 5328 gen_vfp_tosl(0, shift, 1);
2c0262af 5329 }
4373f3ce 5330 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5331 }
5332 } else {
9ee6e8bb
PB
5333 return 1;
5334 }
5335 } else { /* (insn & 0x00380080) == 0 */
5336 int invert;
7d80fee5
PM
5337 if (q && (rd & 1)) {
5338 return 1;
5339 }
9ee6e8bb
PB
5340
5341 op = (insn >> 8) & 0xf;
5342 /* One register and immediate. */
5343 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5344 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5345 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5346 * We choose to not special-case this and will behave as if a
5347 * valid constant encoding of 0 had been given.
5348 */
9ee6e8bb
PB
5349 switch (op) {
5350 case 0: case 1:
5351 /* no-op */
5352 break;
5353 case 2: case 3:
5354 imm <<= 8;
5355 break;
5356 case 4: case 5:
5357 imm <<= 16;
5358 break;
5359 case 6: case 7:
5360 imm <<= 24;
5361 break;
5362 case 8: case 9:
5363 imm |= imm << 16;
5364 break;
5365 case 10: case 11:
5366 imm = (imm << 8) | (imm << 24);
5367 break;
5368 case 12:
8e31209e 5369 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
5370 break;
5371 case 13:
5372 imm = (imm << 16) | 0xffff;
5373 break;
5374 case 14:
5375 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5376 if (invert)
5377 imm = ~imm;
5378 break;
5379 case 15:
7d80fee5
PM
5380 if (invert) {
5381 return 1;
5382 }
9ee6e8bb
PB
5383 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5384 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5385 break;
5386 }
5387 if (invert)
5388 imm = ~imm;
5389
9ee6e8bb
PB
5390 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5391 if (op & 1 && op < 12) {
ad69471c 5392 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
5393 if (invert) {
5394 /* The immediate value has already been inverted, so
5395 BIC becomes AND. */
ad69471c 5396 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 5397 } else {
ad69471c 5398 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 5399 }
9ee6e8bb 5400 } else {
ad69471c 5401 /* VMOV, VMVN. */
7d1b0095 5402 tmp = tcg_temp_new_i32();
9ee6e8bb 5403 if (op == 14 && invert) {
a5a14945 5404 int n;
ad69471c
PB
5405 uint32_t val;
5406 val = 0;
9ee6e8bb
PB
5407 for (n = 0; n < 4; n++) {
5408 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 5409 val |= 0xff << (n * 8);
9ee6e8bb 5410 }
ad69471c
PB
5411 tcg_gen_movi_i32(tmp, val);
5412 } else {
5413 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 5414 }
9ee6e8bb 5415 }
ad69471c 5416 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5417 }
5418 }
e4b3861d 5419 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
5420 if (size != 3) {
5421 op = (insn >> 8) & 0xf;
5422 if ((insn & (1 << 6)) == 0) {
5423 /* Three registers of different lengths. */
5424 int src1_wide;
5425 int src2_wide;
5426 int prewiden;
695272dc
PM
5427 /* undefreq: bit 0 : UNDEF if size != 0
5428 * bit 1 : UNDEF if size == 0
5429 * bit 2 : UNDEF if U == 1
5430 * Note that [1:0] set implies 'always UNDEF'
5431 */
5432 int undefreq;
5433 /* prewiden, src1_wide, src2_wide, undefreq */
5434 static const int neon_3reg_wide[16][4] = {
5435 {1, 0, 0, 0}, /* VADDL */
5436 {1, 1, 0, 0}, /* VADDW */
5437 {1, 0, 0, 0}, /* VSUBL */
5438 {1, 1, 0, 0}, /* VSUBW */
5439 {0, 1, 1, 0}, /* VADDHN */
5440 {0, 0, 0, 0}, /* VABAL */
5441 {0, 1, 1, 0}, /* VSUBHN */
5442 {0, 0, 0, 0}, /* VABDL */
5443 {0, 0, 0, 0}, /* VMLAL */
5444 {0, 0, 0, 6}, /* VQDMLAL */
5445 {0, 0, 0, 0}, /* VMLSL */
5446 {0, 0, 0, 6}, /* VQDMLSL */
5447 {0, 0, 0, 0}, /* Integer VMULL */
5448 {0, 0, 0, 2}, /* VQDMULL */
5449 {0, 0, 0, 5}, /* Polynomial VMULL */
5450 {0, 0, 0, 3}, /* Reserved: always UNDEF */
9ee6e8bb
PB
5451 };
5452
5453 prewiden = neon_3reg_wide[op][0];
5454 src1_wide = neon_3reg_wide[op][1];
5455 src2_wide = neon_3reg_wide[op][2];
695272dc 5456 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 5457
695272dc
PM
5458 if (((undefreq & 1) && (size != 0)) ||
5459 ((undefreq & 2) && (size == 0)) ||
5460 ((undefreq & 4) && u)) {
5461 return 1;
5462 }
5463 if ((src1_wide && (rn & 1)) ||
5464 (src2_wide && (rm & 1)) ||
5465 (!src2_wide && (rd & 1))) {
ad69471c 5466 return 1;
695272dc 5467 }
ad69471c 5468
9ee6e8bb
PB
5469 /* Avoid overlapping operands. Wide source operands are
5470 always aligned so will never overlap with wide
5471 destinations in problematic ways. */
8f8e3aa4 5472 if (rd == rm && !src2_wide) {
dd8fbd78
FN
5473 tmp = neon_load_reg(rm, 1);
5474 neon_store_scratch(2, tmp);
8f8e3aa4 5475 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
5476 tmp = neon_load_reg(rn, 1);
5477 neon_store_scratch(2, tmp);
9ee6e8bb 5478 }
39d5492a 5479 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 5480 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5481 if (src1_wide) {
5482 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 5483 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5484 } else {
ad69471c 5485 if (pass == 1 && rd == rn) {
dd8fbd78 5486 tmp = neon_load_scratch(2);
9ee6e8bb 5487 } else {
ad69471c
PB
5488 tmp = neon_load_reg(rn, pass);
5489 }
5490 if (prewiden) {
5491 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
5492 }
5493 }
ad69471c
PB
5494 if (src2_wide) {
5495 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 5496 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5497 } else {
ad69471c 5498 if (pass == 1 && rd == rm) {
dd8fbd78 5499 tmp2 = neon_load_scratch(2);
9ee6e8bb 5500 } else {
ad69471c
PB
5501 tmp2 = neon_load_reg(rm, pass);
5502 }
5503 if (prewiden) {
5504 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 5505 }
9ee6e8bb
PB
5506 }
5507 switch (op) {
5508 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 5509 gen_neon_addl(size);
9ee6e8bb 5510 break;
79b0e534 5511 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 5512 gen_neon_subl(size);
9ee6e8bb
PB
5513 break;
5514 case 5: case 7: /* VABAL, VABDL */
5515 switch ((size << 1) | u) {
ad69471c
PB
5516 case 0:
5517 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5518 break;
5519 case 1:
5520 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5521 break;
5522 case 2:
5523 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5524 break;
5525 case 3:
5526 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5527 break;
5528 case 4:
5529 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5530 break;
5531 case 5:
5532 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5533 break;
9ee6e8bb
PB
5534 default: abort();
5535 }
7d1b0095
PM
5536 tcg_temp_free_i32(tmp2);
5537 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
5538 break;
5539 case 8: case 9: case 10: case 11: case 12: case 13:
5540 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 5541 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
5542 break;
5543 case 14: /* Polynomial VMULL */
e5ca24cb 5544 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
5545 tcg_temp_free_i32(tmp2);
5546 tcg_temp_free_i32(tmp);
e5ca24cb 5547 break;
695272dc
PM
5548 default: /* 15 is RESERVED: caught earlier */
5549 abort();
9ee6e8bb 5550 }
ebcd88ce
PM
5551 if (op == 13) {
5552 /* VQDMULL */
5553 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5554 neon_store_reg64(cpu_V0, rd + pass);
5555 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 5556 /* Accumulate. */
ebcd88ce 5557 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5558 switch (op) {
4dc064e6
PM
5559 case 10: /* VMLSL */
5560 gen_neon_negl(cpu_V0, size);
5561 /* Fall through */
5562 case 5: case 8: /* VABAL, VMLAL */
ad69471c 5563 gen_neon_addl(size);
9ee6e8bb
PB
5564 break;
5565 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 5566 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5567 if (op == 11) {
5568 gen_neon_negl(cpu_V0, size);
5569 }
ad69471c
PB
5570 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5571 break;
9ee6e8bb
PB
5572 default:
5573 abort();
5574 }
ad69471c 5575 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5576 } else if (op == 4 || op == 6) {
5577 /* Narrowing operation. */
7d1b0095 5578 tmp = tcg_temp_new_i32();
79b0e534 5579 if (!u) {
9ee6e8bb 5580 switch (size) {
ad69471c
PB
5581 case 0:
5582 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5583 break;
5584 case 1:
5585 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5586 break;
5587 case 2:
5588 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5589 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5590 break;
9ee6e8bb
PB
5591 default: abort();
5592 }
5593 } else {
5594 switch (size) {
ad69471c
PB
5595 case 0:
5596 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5597 break;
5598 case 1:
5599 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5600 break;
5601 case 2:
5602 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5603 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5604 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5605 break;
9ee6e8bb
PB
5606 default: abort();
5607 }
5608 }
ad69471c
PB
5609 if (pass == 0) {
5610 tmp3 = tmp;
5611 } else {
5612 neon_store_reg(rd, 0, tmp3);
5613 neon_store_reg(rd, 1, tmp);
5614 }
9ee6e8bb
PB
5615 } else {
5616 /* Write back the result. */
ad69471c 5617 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5618 }
5619 }
5620 } else {
3e3326df
PM
5621 /* Two registers and a scalar. NB that for ops of this form
5622 * the ARM ARM labels bit 24 as Q, but it is in our variable
5623 * 'u', not 'q'.
5624 */
5625 if (size == 0) {
5626 return 1;
5627 }
9ee6e8bb 5628 switch (op) {
9ee6e8bb 5629 case 1: /* Float VMLA scalar */
9ee6e8bb 5630 case 5: /* Floating point VMLS scalar */
9ee6e8bb 5631 case 9: /* Floating point VMUL scalar */
3e3326df
PM
5632 if (size == 1) {
5633 return 1;
5634 }
5635 /* fall through */
5636 case 0: /* Integer VMLA scalar */
5637 case 4: /* Integer VMLS scalar */
5638 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
5639 case 12: /* VQDMULH scalar */
5640 case 13: /* VQRDMULH scalar */
3e3326df
PM
5641 if (u && ((rd | rn) & 1)) {
5642 return 1;
5643 }
dd8fbd78
FN
5644 tmp = neon_get_scalar(size, rm);
5645 neon_store_scratch(0, tmp);
9ee6e8bb 5646 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
5647 tmp = neon_load_scratch(0);
5648 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
5649 if (op == 12) {
5650 if (size == 1) {
02da0b2d 5651 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5652 } else {
02da0b2d 5653 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5654 }
5655 } else if (op == 13) {
5656 if (size == 1) {
02da0b2d 5657 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 5658 } else {
02da0b2d 5659 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
5660 }
5661 } else if (op & 1) {
aa47cfdd
PM
5662 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5663 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5664 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
5665 } else {
5666 switch (size) {
dd8fbd78
FN
5667 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5668 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5669 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 5670 default: abort();
9ee6e8bb
PB
5671 }
5672 }
7d1b0095 5673 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
5674 if (op < 8) {
5675 /* Accumulate. */
dd8fbd78 5676 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
5677 switch (op) {
5678 case 0:
dd8fbd78 5679 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5680 break;
5681 case 1:
aa47cfdd
PM
5682 {
5683 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5684 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5685 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5686 break;
aa47cfdd 5687 }
9ee6e8bb 5688 case 4:
dd8fbd78 5689 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
5690 break;
5691 case 5:
aa47cfdd
PM
5692 {
5693 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5694 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5695 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5696 break;
aa47cfdd 5697 }
9ee6e8bb
PB
5698 default:
5699 abort();
5700 }
7d1b0095 5701 tcg_temp_free_i32(tmp2);
9ee6e8bb 5702 }
dd8fbd78 5703 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5704 }
5705 break;
9ee6e8bb 5706 case 3: /* VQDMLAL scalar */
9ee6e8bb 5707 case 7: /* VQDMLSL scalar */
9ee6e8bb 5708 case 11: /* VQDMULL scalar */
3e3326df 5709 if (u == 1) {
ad69471c 5710 return 1;
3e3326df
PM
5711 }
5712 /* fall through */
5713 case 2: /* VMLAL sclar */
5714 case 6: /* VMLSL scalar */
5715 case 10: /* VMULL scalar */
5716 if (rd & 1) {
5717 return 1;
5718 }
dd8fbd78 5719 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
5720 /* We need a copy of tmp2 because gen_neon_mull
5721 * deletes it during pass 0. */
7d1b0095 5722 tmp4 = tcg_temp_new_i32();
c6067f04 5723 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 5724 tmp3 = neon_load_reg(rn, 1);
ad69471c 5725
9ee6e8bb 5726 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5727 if (pass == 0) {
5728 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5729 } else {
dd8fbd78 5730 tmp = tmp3;
c6067f04 5731 tmp2 = tmp4;
9ee6e8bb 5732 }
ad69471c 5733 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
5734 if (op != 11) {
5735 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5736 }
9ee6e8bb 5737 switch (op) {
4dc064e6
PM
5738 case 6:
5739 gen_neon_negl(cpu_V0, size);
5740 /* Fall through */
5741 case 2:
ad69471c 5742 gen_neon_addl(size);
9ee6e8bb
PB
5743 break;
5744 case 3: case 7:
ad69471c 5745 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
5746 if (op == 7) {
5747 gen_neon_negl(cpu_V0, size);
5748 }
ad69471c 5749 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5750 break;
5751 case 10:
5752 /* no-op */
5753 break;
5754 case 11:
ad69471c 5755 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5756 break;
5757 default:
5758 abort();
5759 }
ad69471c 5760 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5761 }
dd8fbd78 5762
dd8fbd78 5763
9ee6e8bb
PB
5764 break;
5765 default: /* 14 and 15 are RESERVED */
5766 return 1;
5767 }
5768 }
5769 } else { /* size == 3 */
5770 if (!u) {
5771 /* Extract. */
9ee6e8bb 5772 imm = (insn >> 8) & 0xf;
ad69471c
PB
5773
5774 if (imm > 7 && !q)
5775 return 1;
5776
52579ea1
PM
5777 if (q && ((rd | rn | rm) & 1)) {
5778 return 1;
5779 }
5780
ad69471c
PB
5781 if (imm == 0) {
5782 neon_load_reg64(cpu_V0, rn);
5783 if (q) {
5784 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5785 }
ad69471c
PB
5786 } else if (imm == 8) {
5787 neon_load_reg64(cpu_V0, rn + 1);
5788 if (q) {
5789 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5790 }
ad69471c 5791 } else if (q) {
a7812ae4 5792 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5793 if (imm < 8) {
5794 neon_load_reg64(cpu_V0, rn);
a7812ae4 5795 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5796 } else {
5797 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5798 neon_load_reg64(tmp64, rm);
ad69471c
PB
5799 }
5800 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5801 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5802 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5803 if (imm < 8) {
5804 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5805 } else {
ad69471c
PB
5806 neon_load_reg64(cpu_V1, rm + 1);
5807 imm -= 8;
9ee6e8bb 5808 }
ad69471c 5809 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5810 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5811 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 5812 tcg_temp_free_i64(tmp64);
ad69471c 5813 } else {
a7812ae4 5814 /* BUGFIX */
ad69471c 5815 neon_load_reg64(cpu_V0, rn);
a7812ae4 5816 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5817 neon_load_reg64(cpu_V1, rm);
a7812ae4 5818 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5819 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5820 }
5821 neon_store_reg64(cpu_V0, rd);
5822 if (q) {
5823 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5824 }
5825 } else if ((insn & (1 << 11)) == 0) {
5826 /* Two register misc. */
5827 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5828 size = (insn >> 18) & 3;
600b828c
PM
5829 /* UNDEF for unknown op values and bad op-size combinations */
5830 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5831 return 1;
5832 }
fc2a9b37
PM
5833 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5834 q && ((rm | rd) & 1)) {
5835 return 1;
5836 }
9ee6e8bb 5837 switch (op) {
600b828c 5838 case NEON_2RM_VREV64:
9ee6e8bb 5839 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
5840 tmp = neon_load_reg(rm, pass * 2);
5841 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 5842 switch (size) {
dd8fbd78
FN
5843 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5844 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
5845 case 2: /* no-op */ break;
5846 default: abort();
5847 }
dd8fbd78 5848 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 5849 if (size == 2) {
dd8fbd78 5850 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 5851 } else {
9ee6e8bb 5852 switch (size) {
dd8fbd78
FN
5853 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5854 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
5855 default: abort();
5856 }
dd8fbd78 5857 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
5858 }
5859 }
5860 break;
600b828c
PM
5861 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5862 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
5863 for (pass = 0; pass < q + 1; pass++) {
5864 tmp = neon_load_reg(rm, pass * 2);
5865 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5866 tmp = neon_load_reg(rm, pass * 2 + 1);
5867 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5868 switch (size) {
5869 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5870 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5871 case 2: tcg_gen_add_i64(CPU_V001); break;
5872 default: abort();
5873 }
600b828c 5874 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 5875 /* Accumulate. */
ad69471c
PB
5876 neon_load_reg64(cpu_V1, rd + pass);
5877 gen_neon_addl(size);
9ee6e8bb 5878 }
ad69471c 5879 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5880 }
5881 break;
600b828c 5882 case NEON_2RM_VTRN:
9ee6e8bb 5883 if (size == 2) {
a5a14945 5884 int n;
9ee6e8bb 5885 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
5886 tmp = neon_load_reg(rm, n);
5887 tmp2 = neon_load_reg(rd, n + 1);
5888 neon_store_reg(rm, n, tmp2);
5889 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
5890 }
5891 } else {
5892 goto elementwise;
5893 }
5894 break;
600b828c 5895 case NEON_2RM_VUZP:
02acedf9 5896 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 5897 return 1;
9ee6e8bb
PB
5898 }
5899 break;
600b828c 5900 case NEON_2RM_VZIP:
d68a6f3a 5901 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 5902 return 1;
9ee6e8bb
PB
5903 }
5904 break;
600b828c
PM
5905 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5906 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
5907 if (rm & 1) {
5908 return 1;
5909 }
39d5492a 5910 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 5911 for (pass = 0; pass < 2; pass++) {
ad69471c 5912 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 5913 tmp = tcg_temp_new_i32();
600b828c
PM
5914 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5915 tmp, cpu_V0);
ad69471c
PB
5916 if (pass == 0) {
5917 tmp2 = tmp;
5918 } else {
5919 neon_store_reg(rd, 0, tmp2);
5920 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5921 }
9ee6e8bb
PB
5922 }
5923 break;
600b828c 5924 case NEON_2RM_VSHLL:
fc2a9b37 5925 if (q || (rd & 1)) {
9ee6e8bb 5926 return 1;
600b828c 5927 }
ad69471c
PB
5928 tmp = neon_load_reg(rm, 0);
5929 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5930 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5931 if (pass == 1)
5932 tmp = tmp2;
5933 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 5934 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 5935 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5936 }
5937 break;
600b828c 5938 case NEON_2RM_VCVT_F16_F32:
fc2a9b37
PM
5939 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5940 q || (rm & 1)) {
5941 return 1;
5942 }
7d1b0095
PM
5943 tmp = tcg_temp_new_i32();
5944 tmp2 = tcg_temp_new_i32();
60011498 5945 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 5946 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 5947 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 5948 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5949 tcg_gen_shli_i32(tmp2, tmp2, 16);
5950 tcg_gen_or_i32(tmp2, tmp2, tmp);
5951 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 5952 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
5953 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5954 neon_store_reg(rd, 0, tmp2);
7d1b0095 5955 tmp2 = tcg_temp_new_i32();
2d981da7 5956 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
5957 tcg_gen_shli_i32(tmp2, tmp2, 16);
5958 tcg_gen_or_i32(tmp2, tmp2, tmp);
5959 neon_store_reg(rd, 1, tmp2);
7d1b0095 5960 tcg_temp_free_i32(tmp);
60011498 5961 break;
600b828c 5962 case NEON_2RM_VCVT_F32_F16:
fc2a9b37
PM
5963 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5964 q || (rd & 1)) {
5965 return 1;
5966 }
7d1b0095 5967 tmp3 = tcg_temp_new_i32();
60011498
PB
5968 tmp = neon_load_reg(rm, 0);
5969 tmp2 = neon_load_reg(rm, 1);
5970 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 5971 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5972 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5973 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 5974 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 5976 tcg_temp_free_i32(tmp);
60011498 5977 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 5978 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
5979 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5980 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 5981 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 5982 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
5983 tcg_temp_free_i32(tmp2);
5984 tcg_temp_free_i32(tmp3);
60011498 5985 break;
9ee6e8bb
PB
5986 default:
5987 elementwise:
5988 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 5989 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
5990 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5991 neon_reg_offset(rm, pass));
39d5492a 5992 TCGV_UNUSED_I32(tmp);
9ee6e8bb 5993 } else {
dd8fbd78 5994 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
5995 }
5996 switch (op) {
600b828c 5997 case NEON_2RM_VREV32:
9ee6e8bb 5998 switch (size) {
dd8fbd78
FN
5999 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6000 case 1: gen_swap_half(tmp); break;
600b828c 6001 default: abort();
9ee6e8bb
PB
6002 }
6003 break;
600b828c 6004 case NEON_2RM_VREV16:
dd8fbd78 6005 gen_rev16(tmp);
9ee6e8bb 6006 break;
600b828c 6007 case NEON_2RM_VCLS:
9ee6e8bb 6008 switch (size) {
dd8fbd78
FN
6009 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6010 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6011 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6012 default: abort();
9ee6e8bb
PB
6013 }
6014 break;
600b828c 6015 case NEON_2RM_VCLZ:
9ee6e8bb 6016 switch (size) {
dd8fbd78
FN
6017 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6018 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6019 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6020 default: abort();
9ee6e8bb
PB
6021 }
6022 break;
600b828c 6023 case NEON_2RM_VCNT:
dd8fbd78 6024 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6025 break;
600b828c 6026 case NEON_2RM_VMVN:
dd8fbd78 6027 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6028 break;
600b828c 6029 case NEON_2RM_VQABS:
9ee6e8bb 6030 switch (size) {
02da0b2d
PM
6031 case 0:
6032 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6033 break;
6034 case 1:
6035 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6036 break;
6037 case 2:
6038 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6039 break;
600b828c 6040 default: abort();
9ee6e8bb
PB
6041 }
6042 break;
600b828c 6043 case NEON_2RM_VQNEG:
9ee6e8bb 6044 switch (size) {
02da0b2d
PM
6045 case 0:
6046 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6047 break;
6048 case 1:
6049 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6050 break;
6051 case 2:
6052 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6053 break;
600b828c 6054 default: abort();
9ee6e8bb
PB
6055 }
6056 break;
600b828c 6057 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6058 tmp2 = tcg_const_i32(0);
9ee6e8bb 6059 switch(size) {
dd8fbd78
FN
6060 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6063 default: abort();
9ee6e8bb 6064 }
39d5492a 6065 tcg_temp_free_i32(tmp2);
600b828c 6066 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6067 tcg_gen_not_i32(tmp, tmp);
600b828c 6068 }
9ee6e8bb 6069 break;
600b828c 6070 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6071 tmp2 = tcg_const_i32(0);
9ee6e8bb 6072 switch(size) {
dd8fbd78
FN
6073 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6074 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6075 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6076 default: abort();
9ee6e8bb 6077 }
39d5492a 6078 tcg_temp_free_i32(tmp2);
600b828c 6079 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6080 tcg_gen_not_i32(tmp, tmp);
600b828c 6081 }
9ee6e8bb 6082 break;
600b828c 6083 case NEON_2RM_VCEQ0:
dd8fbd78 6084 tmp2 = tcg_const_i32(0);
9ee6e8bb 6085 switch(size) {
dd8fbd78
FN
6086 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6087 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6088 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6089 default: abort();
9ee6e8bb 6090 }
39d5492a 6091 tcg_temp_free_i32(tmp2);
9ee6e8bb 6092 break;
600b828c 6093 case NEON_2RM_VABS:
9ee6e8bb 6094 switch(size) {
dd8fbd78
FN
6095 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6096 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6097 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6098 default: abort();
9ee6e8bb
PB
6099 }
6100 break;
600b828c 6101 case NEON_2RM_VNEG:
dd8fbd78
FN
6102 tmp2 = tcg_const_i32(0);
6103 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6104 tcg_temp_free_i32(tmp2);
9ee6e8bb 6105 break;
600b828c 6106 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6107 {
6108 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6109 tmp2 = tcg_const_i32(0);
aa47cfdd 6110 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6111 tcg_temp_free_i32(tmp2);
aa47cfdd 6112 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6113 break;
aa47cfdd 6114 }
600b828c 6115 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6116 {
6117 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6118 tmp2 = tcg_const_i32(0);
aa47cfdd 6119 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6120 tcg_temp_free_i32(tmp2);
aa47cfdd 6121 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6122 break;
aa47cfdd 6123 }
600b828c 6124 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6125 {
6126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6127 tmp2 = tcg_const_i32(0);
aa47cfdd 6128 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6129 tcg_temp_free_i32(tmp2);
aa47cfdd 6130 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6131 break;
aa47cfdd 6132 }
600b828c 6133 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6134 {
6135 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6136 tmp2 = tcg_const_i32(0);
aa47cfdd 6137 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6138 tcg_temp_free_i32(tmp2);
aa47cfdd 6139 tcg_temp_free_ptr(fpstatus);
0e326109 6140 break;
aa47cfdd 6141 }
600b828c 6142 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6143 {
6144 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6145 tmp2 = tcg_const_i32(0);
aa47cfdd 6146 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6147 tcg_temp_free_i32(tmp2);
aa47cfdd 6148 tcg_temp_free_ptr(fpstatus);
0e326109 6149 break;
aa47cfdd 6150 }
600b828c 6151 case NEON_2RM_VABS_F:
4373f3ce 6152 gen_vfp_abs(0);
9ee6e8bb 6153 break;
600b828c 6154 case NEON_2RM_VNEG_F:
4373f3ce 6155 gen_vfp_neg(0);
9ee6e8bb 6156 break;
600b828c 6157 case NEON_2RM_VSWP:
dd8fbd78
FN
6158 tmp2 = neon_load_reg(rd, pass);
6159 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6160 break;
600b828c 6161 case NEON_2RM_VTRN:
dd8fbd78 6162 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6163 switch (size) {
dd8fbd78
FN
6164 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6165 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6166 default: abort();
9ee6e8bb 6167 }
dd8fbd78 6168 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6169 break;
600b828c 6170 case NEON_2RM_VRECPE:
dd8fbd78 6171 gen_helper_recpe_u32(tmp, tmp, cpu_env);
9ee6e8bb 6172 break;
600b828c 6173 case NEON_2RM_VRSQRTE:
dd8fbd78 6174 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
9ee6e8bb 6175 break;
600b828c 6176 case NEON_2RM_VRECPE_F:
4373f3ce 6177 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6178 break;
600b828c 6179 case NEON_2RM_VRSQRTE_F:
4373f3ce 6180 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb 6181 break;
600b828c 6182 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6183 gen_vfp_sito(0, 1);
9ee6e8bb 6184 break;
600b828c 6185 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6186 gen_vfp_uito(0, 1);
9ee6e8bb 6187 break;
600b828c 6188 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6189 gen_vfp_tosiz(0, 1);
9ee6e8bb 6190 break;
600b828c 6191 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6192 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6193 break;
6194 default:
600b828c
PM
6195 /* Reserved op values were caught by the
6196 * neon_2rm_sizes[] check earlier.
6197 */
6198 abort();
9ee6e8bb 6199 }
600b828c 6200 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6201 tcg_gen_st_f32(cpu_F0s, cpu_env,
6202 neon_reg_offset(rd, pass));
9ee6e8bb 6203 } else {
dd8fbd78 6204 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6205 }
6206 }
6207 break;
6208 }
6209 } else if ((insn & (1 << 10)) == 0) {
6210 /* VTBL, VTBX. */
56907d77
PM
6211 int n = ((insn >> 8) & 3) + 1;
6212 if ((rn + n) > 32) {
6213 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6214 * helper function running off the end of the register file.
6215 */
6216 return 1;
6217 }
6218 n <<= 3;
9ee6e8bb 6219 if (insn & (1 << 6)) {
8f8e3aa4 6220 tmp = neon_load_reg(rd, 0);
9ee6e8bb 6221 } else {
7d1b0095 6222 tmp = tcg_temp_new_i32();
8f8e3aa4 6223 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6224 }
8f8e3aa4 6225 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
6226 tmp4 = tcg_const_i32(rn);
6227 tmp5 = tcg_const_i32(n);
9ef39277 6228 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 6229 tcg_temp_free_i32(tmp);
9ee6e8bb 6230 if (insn & (1 << 6)) {
8f8e3aa4 6231 tmp = neon_load_reg(rd, 1);
9ee6e8bb 6232 } else {
7d1b0095 6233 tmp = tcg_temp_new_i32();
8f8e3aa4 6234 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6235 }
8f8e3aa4 6236 tmp3 = neon_load_reg(rm, 1);
9ef39277 6237 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
6238 tcg_temp_free_i32(tmp5);
6239 tcg_temp_free_i32(tmp4);
8f8e3aa4 6240 neon_store_reg(rd, 0, tmp2);
3018f259 6241 neon_store_reg(rd, 1, tmp3);
7d1b0095 6242 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6243 } else if ((insn & 0x380) == 0) {
6244 /* VDUP */
133da6aa
JR
6245 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6246 return 1;
6247 }
9ee6e8bb 6248 if (insn & (1 << 19)) {
dd8fbd78 6249 tmp = neon_load_reg(rm, 1);
9ee6e8bb 6250 } else {
dd8fbd78 6251 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
6252 }
6253 if (insn & (1 << 16)) {
dd8fbd78 6254 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
6255 } else if (insn & (1 << 17)) {
6256 if ((insn >> 18) & 1)
dd8fbd78 6257 gen_neon_dup_high16(tmp);
9ee6e8bb 6258 else
dd8fbd78 6259 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
6260 }
6261 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 6262 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
6263 tcg_gen_mov_i32(tmp2, tmp);
6264 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 6265 }
7d1b0095 6266 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6267 } else {
6268 return 1;
6269 }
6270 }
6271 }
6272 return 0;
6273}
6274
0ecb72a5 6275static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
9ee6e8bb 6276{
4b6a83fb
PM
6277 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6278 const ARMCPRegInfo *ri;
6279 ARMCPU *cpu = arm_env_get_cpu(env);
9ee6e8bb
PB
6280
6281 cpnum = (insn >> 8) & 0xf;
6282 if (arm_feature(env, ARM_FEATURE_XSCALE)
6283 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6284 return 1;
6285
4b6a83fb 6286 /* First check for coprocessor space used for actual instructions */
9ee6e8bb
PB
6287 switch (cpnum) {
6288 case 0:
6289 case 1:
6290 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6291 return disas_iwmmxt_insn(env, s, insn);
6292 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6293 return disas_dsp_insn(env, s, insn);
6294 }
6295 return 1;
6296 case 10:
6297 case 11:
6298 return disas_vfp_insn (env, s, insn);
4b6a83fb
PM
6299 default:
6300 break;
6301 }
6302
6303 /* Otherwise treat as a generic register access */
6304 is64 = (insn & (1 << 25)) == 0;
6305 if (!is64 && ((insn & (1 << 4)) == 0)) {
6306 /* cdp */
6307 return 1;
6308 }
6309
6310 crm = insn & 0xf;
6311 if (is64) {
6312 crn = 0;
6313 opc1 = (insn >> 4) & 0xf;
6314 opc2 = 0;
6315 rt2 = (insn >> 16) & 0xf;
6316 } else {
6317 crn = (insn >> 16) & 0xf;
6318 opc1 = (insn >> 21) & 7;
6319 opc2 = (insn >> 5) & 7;
6320 rt2 = 0;
6321 }
6322 isread = (insn >> 20) & 1;
6323 rt = (insn >> 12) & 0xf;
6324
6325 ri = get_arm_cp_reginfo(cpu,
6326 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6327 if (ri) {
6328 /* Check access permissions */
6329 if (!cp_access_ok(env, ri, isread)) {
6330 return 1;
6331 }
6332
6333 /* Handle special cases first */
6334 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6335 case ARM_CP_NOP:
6336 return 0;
6337 case ARM_CP_WFI:
6338 if (isread) {
6339 return 1;
6340 }
eaed129d 6341 gen_set_pc_im(s, s->pc);
4b6a83fb 6342 s->is_jmp = DISAS_WFI;
2bee5105 6343 return 0;
4b6a83fb
PM
6344 default:
6345 break;
6346 }
6347
2452731c
PM
6348 if (use_icount && (ri->type & ARM_CP_IO)) {
6349 gen_io_start();
6350 }
6351
4b6a83fb
PM
6352 if (isread) {
6353 /* Read */
6354 if (is64) {
6355 TCGv_i64 tmp64;
6356 TCGv_i32 tmp;
6357 if (ri->type & ARM_CP_CONST) {
6358 tmp64 = tcg_const_i64(ri->resetvalue);
6359 } else if (ri->readfn) {
6360 TCGv_ptr tmpptr;
eaed129d 6361 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6362 tmp64 = tcg_temp_new_i64();
6363 tmpptr = tcg_const_ptr(ri);
6364 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6365 tcg_temp_free_ptr(tmpptr);
6366 } else {
6367 tmp64 = tcg_temp_new_i64();
6368 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6369 }
6370 tmp = tcg_temp_new_i32();
6371 tcg_gen_trunc_i64_i32(tmp, tmp64);
6372 store_reg(s, rt, tmp);
6373 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 6374 tmp = tcg_temp_new_i32();
4b6a83fb 6375 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 6376 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
6377 store_reg(s, rt2, tmp);
6378 } else {
39d5492a 6379 TCGv_i32 tmp;
4b6a83fb
PM
6380 if (ri->type & ARM_CP_CONST) {
6381 tmp = tcg_const_i32(ri->resetvalue);
6382 } else if (ri->readfn) {
6383 TCGv_ptr tmpptr;
eaed129d 6384 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6385 tmp = tcg_temp_new_i32();
6386 tmpptr = tcg_const_ptr(ri);
6387 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6388 tcg_temp_free_ptr(tmpptr);
6389 } else {
6390 tmp = load_cpu_offset(ri->fieldoffset);
6391 }
6392 if (rt == 15) {
6393 /* Destination register of r15 for 32 bit loads sets
6394 * the condition codes from the high 4 bits of the value
6395 */
6396 gen_set_nzcv(tmp);
6397 tcg_temp_free_i32(tmp);
6398 } else {
6399 store_reg(s, rt, tmp);
6400 }
6401 }
6402 } else {
6403 /* Write */
6404 if (ri->type & ARM_CP_CONST) {
6405 /* If not forbidden by access permissions, treat as WI */
6406 return 0;
6407 }
6408
6409 if (is64) {
39d5492a 6410 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
6411 TCGv_i64 tmp64 = tcg_temp_new_i64();
6412 tmplo = load_reg(s, rt);
6413 tmphi = load_reg(s, rt2);
6414 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6415 tcg_temp_free_i32(tmplo);
6416 tcg_temp_free_i32(tmphi);
6417 if (ri->writefn) {
6418 TCGv_ptr tmpptr = tcg_const_ptr(ri);
eaed129d 6419 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6420 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6421 tcg_temp_free_ptr(tmpptr);
6422 } else {
6423 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6424 }
6425 tcg_temp_free_i64(tmp64);
6426 } else {
6427 if (ri->writefn) {
39d5492a 6428 TCGv_i32 tmp;
4b6a83fb 6429 TCGv_ptr tmpptr;
eaed129d 6430 gen_set_pc_im(s, s->pc);
4b6a83fb
PM
6431 tmp = load_reg(s, rt);
6432 tmpptr = tcg_const_ptr(ri);
6433 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6434 tcg_temp_free_ptr(tmpptr);
6435 tcg_temp_free_i32(tmp);
6436 } else {
39d5492a 6437 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
6438 store_cpu_offset(tmp, ri->fieldoffset);
6439 }
6440 }
2452731c
PM
6441 }
6442
6443 if (use_icount && (ri->type & ARM_CP_IO)) {
6444 /* I/O operations must end the TB here (whether read or write) */
6445 gen_io_end();
6446 gen_lookup_tb(s);
6447 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
6448 /* We default to ending the TB on a coprocessor register write,
6449 * but allow this to be suppressed by the register definition
6450 * (usually only necessary to work around guest bugs).
6451 */
2452731c 6452 gen_lookup_tb(s);
4b6a83fb 6453 }
2452731c 6454
4b6a83fb
PM
6455 return 0;
6456 }
6457
4a9a539f 6458 return 1;
9ee6e8bb
PB
6459}
6460
5e3f878a
PB
6461
6462/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 6463static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 6464{
39d5492a 6465 TCGv_i32 tmp;
7d1b0095 6466 tmp = tcg_temp_new_i32();
5e3f878a
PB
6467 tcg_gen_trunc_i64_i32(tmp, val);
6468 store_reg(s, rlow, tmp);
7d1b0095 6469 tmp = tcg_temp_new_i32();
5e3f878a
PB
6470 tcg_gen_shri_i64(val, val, 32);
6471 tcg_gen_trunc_i64_i32(tmp, val);
6472 store_reg(s, rhigh, tmp);
6473}
6474
6475/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 6476static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 6477{
a7812ae4 6478 TCGv_i64 tmp;
39d5492a 6479 TCGv_i32 tmp2;
5e3f878a 6480
36aa55dc 6481 /* Load value and extend to 64 bits. */
a7812ae4 6482 tmp = tcg_temp_new_i64();
5e3f878a
PB
6483 tmp2 = load_reg(s, rlow);
6484 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 6485 tcg_temp_free_i32(tmp2);
5e3f878a 6486 tcg_gen_add_i64(val, val, tmp);
b75263d6 6487 tcg_temp_free_i64(tmp);
5e3f878a
PB
6488}
6489
6490/* load and add a 64-bit value from a register pair. */
a7812ae4 6491static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 6492{
a7812ae4 6493 TCGv_i64 tmp;
39d5492a
PM
6494 TCGv_i32 tmpl;
6495 TCGv_i32 tmph;
5e3f878a
PB
6496
6497 /* Load 64-bit value rd:rn. */
36aa55dc
PB
6498 tmpl = load_reg(s, rlow);
6499 tmph = load_reg(s, rhigh);
a7812ae4 6500 tmp = tcg_temp_new_i64();
36aa55dc 6501 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
6502 tcg_temp_free_i32(tmpl);
6503 tcg_temp_free_i32(tmph);
5e3f878a 6504 tcg_gen_add_i64(val, val, tmp);
b75263d6 6505 tcg_temp_free_i64(tmp);
5e3f878a
PB
6506}
6507
c9f10124 6508/* Set N and Z flags from hi|lo. */
39d5492a 6509static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 6510{
c9f10124
RH
6511 tcg_gen_mov_i32(cpu_NF, hi);
6512 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
6513}
6514
426f5abc
PB
6515/* Load/Store exclusive instructions are implemented by remembering
6516 the value/address loaded, and seeing if these are the same
b90372ad 6517 when the store is performed. This should be sufficient to implement
426f5abc
PB
6518 the architecturally mandated semantics, and avoids having to monitor
6519 regular stores.
6520
6521 In system emulation mode only one CPU will be running at once, so
6522 this sequence is effectively atomic. In user emulation mode we
6523 throw an exception and handle the atomic operation elsewhere. */
6524static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 6525 TCGv_i32 addr, int size)
426f5abc 6526{
94ee24e7 6527 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc
PB
6528
6529 switch (size) {
6530 case 0:
08307563 6531 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6532 break;
6533 case 1:
08307563 6534 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6535 break;
6536 case 2:
6537 case 3:
08307563 6538 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6539 break;
6540 default:
6541 abort();
6542 }
6543 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6544 store_reg(s, rt, tmp);
6545 if (size == 3) {
39d5492a 6546 TCGv_i32 tmp2 = tcg_temp_new_i32();
2c9adbda 6547 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6548 tmp = tcg_temp_new_i32();
08307563 6549 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6550 tcg_temp_free_i32(tmp2);
426f5abc
PB
6551 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6552 store_reg(s, rt2, tmp);
6553 }
6554 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6555}
6556
6557static void gen_clrex(DisasContext *s)
6558{
6559 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6560}
6561
6562#ifdef CONFIG_USER_ONLY
6563static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6564 TCGv_i32 addr, int size)
426f5abc
PB
6565{
6566 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6567 tcg_gen_movi_i32(cpu_exclusive_info,
6568 size | (rd << 4) | (rt << 8) | (rt2 << 12));
bc4a0de0 6569 gen_exception_insn(s, 4, EXCP_STREX);
426f5abc
PB
6570}
6571#else
6572static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 6573 TCGv_i32 addr, int size)
426f5abc 6574{
39d5492a 6575 TCGv_i32 tmp;
426f5abc
PB
6576 int done_label;
6577 int fail_label;
6578
6579 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6580 [addr] = {Rt};
6581 {Rd} = 0;
6582 } else {
6583 {Rd} = 1;
6584 } */
6585 fail_label = gen_new_label();
6586 done_label = gen_new_label();
6587 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
94ee24e7 6588 tmp = tcg_temp_new_i32();
426f5abc
PB
6589 switch (size) {
6590 case 0:
08307563 6591 gen_aa32_ld8u(tmp, addr, IS_USER(s));
426f5abc
PB
6592 break;
6593 case 1:
08307563 6594 gen_aa32_ld16u(tmp, addr, IS_USER(s));
426f5abc
PB
6595 break;
6596 case 2:
6597 case 3:
08307563 6598 gen_aa32_ld32u(tmp, addr, IS_USER(s));
426f5abc
PB
6599 break;
6600 default:
6601 abort();
6602 }
6603 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
7d1b0095 6604 tcg_temp_free_i32(tmp);
426f5abc 6605 if (size == 3) {
39d5492a 6606 TCGv_i32 tmp2 = tcg_temp_new_i32();
426f5abc 6607 tcg_gen_addi_i32(tmp2, addr, 4);
94ee24e7 6608 tmp = tcg_temp_new_i32();
08307563 6609 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
7d1b0095 6610 tcg_temp_free_i32(tmp2);
426f5abc 6611 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
7d1b0095 6612 tcg_temp_free_i32(tmp);
426f5abc
PB
6613 }
6614 tmp = load_reg(s, rt);
6615 switch (size) {
6616 case 0:
08307563 6617 gen_aa32_st8(tmp, addr, IS_USER(s));
426f5abc
PB
6618 break;
6619 case 1:
08307563 6620 gen_aa32_st16(tmp, addr, IS_USER(s));
426f5abc
PB
6621 break;
6622 case 2:
6623 case 3:
08307563 6624 gen_aa32_st32(tmp, addr, IS_USER(s));
426f5abc
PB
6625 break;
6626 default:
6627 abort();
6628 }
94ee24e7 6629 tcg_temp_free_i32(tmp);
426f5abc
PB
6630 if (size == 3) {
6631 tcg_gen_addi_i32(addr, addr, 4);
6632 tmp = load_reg(s, rt2);
08307563 6633 gen_aa32_st32(tmp, addr, IS_USER(s));
94ee24e7 6634 tcg_temp_free_i32(tmp);
426f5abc
PB
6635 }
6636 tcg_gen_movi_i32(cpu_R[rd], 0);
6637 tcg_gen_br(done_label);
6638 gen_set_label(fail_label);
6639 tcg_gen_movi_i32(cpu_R[rd], 1);
6640 gen_set_label(done_label);
6641 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6642}
6643#endif
6644
81465888
PM
6645/* gen_srs:
6646 * @env: CPUARMState
6647 * @s: DisasContext
6648 * @mode: mode field from insn (which stack to store to)
6649 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6650 * @writeback: true if writeback bit set
6651 *
6652 * Generate code for the SRS (Store Return State) insn.
6653 */
6654static void gen_srs(DisasContext *s,
6655 uint32_t mode, uint32_t amode, bool writeback)
6656{
6657 int32_t offset;
6658 TCGv_i32 addr = tcg_temp_new_i32();
6659 TCGv_i32 tmp = tcg_const_i32(mode);
6660 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6661 tcg_temp_free_i32(tmp);
6662 switch (amode) {
6663 case 0: /* DA */
6664 offset = -4;
6665 break;
6666 case 1: /* IA */
6667 offset = 0;
6668 break;
6669 case 2: /* DB */
6670 offset = -8;
6671 break;
6672 case 3: /* IB */
6673 offset = 4;
6674 break;
6675 default:
6676 abort();
6677 }
6678 tcg_gen_addi_i32(addr, addr, offset);
6679 tmp = load_reg(s, 14);
08307563 6680 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6681 tcg_temp_free_i32(tmp);
81465888
PM
6682 tmp = load_cpu_field(spsr);
6683 tcg_gen_addi_i32(addr, addr, 4);
08307563 6684 gen_aa32_st32(tmp, addr, 0);
5a839c0d 6685 tcg_temp_free_i32(tmp);
81465888
PM
6686 if (writeback) {
6687 switch (amode) {
6688 case 0:
6689 offset = -8;
6690 break;
6691 case 1:
6692 offset = 4;
6693 break;
6694 case 2:
6695 offset = -4;
6696 break;
6697 case 3:
6698 offset = 0;
6699 break;
6700 default:
6701 abort();
6702 }
6703 tcg_gen_addi_i32(addr, addr, offset);
6704 tmp = tcg_const_i32(mode);
6705 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6706 tcg_temp_free_i32(tmp);
6707 }
6708 tcg_temp_free_i32(addr);
6709}
6710
0ecb72a5 6711static void disas_arm_insn(CPUARMState * env, DisasContext *s)
9ee6e8bb
PB
6712{
6713 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
6714 TCGv_i32 tmp;
6715 TCGv_i32 tmp2;
6716 TCGv_i32 tmp3;
6717 TCGv_i32 addr;
a7812ae4 6718 TCGv_i64 tmp64;
9ee6e8bb 6719
d31dd73e 6720 insn = arm_ldl_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
6721 s->pc += 4;
6722
6723 /* M variants do not implement ARM mode. */
6724 if (IS_M(env))
6725 goto illegal_op;
6726 cond = insn >> 28;
6727 if (cond == 0xf){
be5e7a76
DES
6728 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6729 * choose to UNDEF. In ARMv5 and above the space is used
6730 * for miscellaneous unconditional instructions.
6731 */
6732 ARCH(5);
6733
9ee6e8bb
PB
6734 /* Unconditional instructions. */
6735 if (((insn >> 25) & 7) == 1) {
6736 /* NEON Data processing. */
6737 if (!arm_feature(env, ARM_FEATURE_NEON))
6738 goto illegal_op;
6739
6740 if (disas_neon_data_insn(env, s, insn))
6741 goto illegal_op;
6742 return;
6743 }
6744 if ((insn & 0x0f100000) == 0x04000000) {
6745 /* NEON load/store. */
6746 if (!arm_feature(env, ARM_FEATURE_NEON))
6747 goto illegal_op;
6748
6749 if (disas_neon_ls_insn(env, s, insn))
6750 goto illegal_op;
6751 return;
6752 }
3d185e5d
PM
6753 if (((insn & 0x0f30f000) == 0x0510f000) ||
6754 ((insn & 0x0f30f010) == 0x0710f000)) {
6755 if ((insn & (1 << 22)) == 0) {
6756 /* PLDW; v7MP */
6757 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6758 goto illegal_op;
6759 }
6760 }
6761 /* Otherwise PLD; v5TE+ */
be5e7a76 6762 ARCH(5TE);
3d185e5d
PM
6763 return;
6764 }
6765 if (((insn & 0x0f70f000) == 0x0450f000) ||
6766 ((insn & 0x0f70f010) == 0x0650f000)) {
6767 ARCH(7);
6768 return; /* PLI; V7 */
6769 }
6770 if (((insn & 0x0f700000) == 0x04100000) ||
6771 ((insn & 0x0f700010) == 0x06100000)) {
6772 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6773 goto illegal_op;
6774 }
6775 return; /* v7MP: Unallocated memory hint: must NOP */
6776 }
6777
6778 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
6779 ARCH(6);
6780 /* setend */
10962fd5
PM
6781 if (((insn >> 9) & 1) != s->bswap_code) {
6782 /* Dynamic endianness switching not implemented. */
e0c270d9 6783 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
6784 goto illegal_op;
6785 }
6786 return;
6787 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6788 switch ((insn >> 4) & 0xf) {
6789 case 1: /* clrex */
6790 ARCH(6K);
426f5abc 6791 gen_clrex(s);
9ee6e8bb
PB
6792 return;
6793 case 4: /* dsb */
6794 case 5: /* dmb */
6795 case 6: /* isb */
6796 ARCH(7);
6797 /* We don't emulate caches so these are a no-op. */
6798 return;
6799 default:
6800 goto illegal_op;
6801 }
6802 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6803 /* srs */
81465888 6804 if (IS_USER(s)) {
9ee6e8bb 6805 goto illegal_op;
9ee6e8bb 6806 }
81465888
PM
6807 ARCH(6);
6808 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 6809 return;
ea825eee 6810 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 6811 /* rfe */
c67b6b71 6812 int32_t offset;
9ee6e8bb
PB
6813 if (IS_USER(s))
6814 goto illegal_op;
6815 ARCH(6);
6816 rn = (insn >> 16) & 0xf;
b0109805 6817 addr = load_reg(s, rn);
9ee6e8bb
PB
6818 i = (insn >> 23) & 3;
6819 switch (i) {
b0109805 6820 case 0: offset = -4; break; /* DA */
c67b6b71
FN
6821 case 1: offset = 0; break; /* IA */
6822 case 2: offset = -8; break; /* DB */
b0109805 6823 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
6824 default: abort();
6825 }
6826 if (offset)
b0109805
PB
6827 tcg_gen_addi_i32(addr, addr, offset);
6828 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 6829 tmp = tcg_temp_new_i32();
08307563 6830 gen_aa32_ld32u(tmp, addr, 0);
b0109805 6831 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 6832 tmp2 = tcg_temp_new_i32();
08307563 6833 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
6834 if (insn & (1 << 21)) {
6835 /* Base writeback. */
6836 switch (i) {
b0109805 6837 case 0: offset = -8; break;
c67b6b71
FN
6838 case 1: offset = 4; break;
6839 case 2: offset = -4; break;
b0109805 6840 case 3: offset = 0; break;
9ee6e8bb
PB
6841 default: abort();
6842 }
6843 if (offset)
b0109805
PB
6844 tcg_gen_addi_i32(addr, addr, offset);
6845 store_reg(s, rn, addr);
6846 } else {
7d1b0095 6847 tcg_temp_free_i32(addr);
9ee6e8bb 6848 }
b0109805 6849 gen_rfe(s, tmp, tmp2);
c67b6b71 6850 return;
9ee6e8bb
PB
6851 } else if ((insn & 0x0e000000) == 0x0a000000) {
6852 /* branch link and change to thumb (blx <offset>) */
6853 int32_t offset;
6854
6855 val = (uint32_t)s->pc;
7d1b0095 6856 tmp = tcg_temp_new_i32();
d9ba4830
PB
6857 tcg_gen_movi_i32(tmp, val);
6858 store_reg(s, 14, tmp);
9ee6e8bb
PB
6859 /* Sign-extend the 24-bit offset */
6860 offset = (((int32_t)insn) << 8) >> 8;
6861 /* offset * 4 + bit24 * 2 + (thumb bit) */
6862 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6863 /* pipeline offset */
6864 val += 4;
be5e7a76 6865 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 6866 gen_bx_im(s, val);
9ee6e8bb
PB
6867 return;
6868 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6869 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6870 /* iWMMXt register transfer. */
6871 if (env->cp15.c15_cpar & (1 << 1))
6872 if (!disas_iwmmxt_insn(env, s, insn))
6873 return;
6874 }
6875 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6876 /* Coprocessor double register transfer. */
be5e7a76 6877 ARCH(5TE);
9ee6e8bb
PB
6878 } else if ((insn & 0x0f000010) == 0x0e000010) {
6879 /* Additional coprocessor register transfer. */
7997d92f 6880 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
6881 uint32_t mask;
6882 uint32_t val;
6883 /* cps (privileged) */
6884 if (IS_USER(s))
6885 return;
6886 mask = val = 0;
6887 if (insn & (1 << 19)) {
6888 if (insn & (1 << 8))
6889 mask |= CPSR_A;
6890 if (insn & (1 << 7))
6891 mask |= CPSR_I;
6892 if (insn & (1 << 6))
6893 mask |= CPSR_F;
6894 if (insn & (1 << 18))
6895 val |= mask;
6896 }
7997d92f 6897 if (insn & (1 << 17)) {
9ee6e8bb
PB
6898 mask |= CPSR_M;
6899 val |= (insn & 0x1f);
6900 }
6901 if (mask) {
2fbac54b 6902 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
6903 }
6904 return;
6905 }
6906 goto illegal_op;
6907 }
6908 if (cond != 0xe) {
6909 /* if not always execute, we generate a conditional jump to
6910 next instruction */
6911 s->condlabel = gen_new_label();
d9ba4830 6912 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
6913 s->condjmp = 1;
6914 }
6915 if ((insn & 0x0f900000) == 0x03000000) {
6916 if ((insn & (1 << 21)) == 0) {
6917 ARCH(6T2);
6918 rd = (insn >> 12) & 0xf;
6919 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6920 if ((insn & (1 << 22)) == 0) {
6921 /* MOVW */
7d1b0095 6922 tmp = tcg_temp_new_i32();
5e3f878a 6923 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
6924 } else {
6925 /* MOVT */
5e3f878a 6926 tmp = load_reg(s, rd);
86831435 6927 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 6928 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 6929 }
5e3f878a 6930 store_reg(s, rd, tmp);
9ee6e8bb
PB
6931 } else {
6932 if (((insn >> 12) & 0xf) != 0xf)
6933 goto illegal_op;
6934 if (((insn >> 16) & 0xf) == 0) {
6935 gen_nop_hint(s, insn & 0xff);
6936 } else {
6937 /* CPSR = immediate */
6938 val = insn & 0xff;
6939 shift = ((insn >> 8) & 0xf) * 2;
6940 if (shift)
6941 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 6942 i = ((insn & (1 << 22)) != 0);
2fbac54b 6943 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
9ee6e8bb
PB
6944 goto illegal_op;
6945 }
6946 }
6947 } else if ((insn & 0x0f900000) == 0x01000000
6948 && (insn & 0x00000090) != 0x00000090) {
6949 /* miscellaneous instructions */
6950 op1 = (insn >> 21) & 3;
6951 sh = (insn >> 4) & 0xf;
6952 rm = insn & 0xf;
6953 switch (sh) {
6954 case 0x0: /* move program status register */
6955 if (op1 & 1) {
6956 /* PSR = reg */
2fbac54b 6957 tmp = load_reg(s, rm);
9ee6e8bb 6958 i = ((op1 & 2) != 0);
2fbac54b 6959 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
6960 goto illegal_op;
6961 } else {
6962 /* reg = PSR */
6963 rd = (insn >> 12) & 0xf;
6964 if (op1 & 2) {
6965 if (IS_USER(s))
6966 goto illegal_op;
d9ba4830 6967 tmp = load_cpu_field(spsr);
9ee6e8bb 6968 } else {
7d1b0095 6969 tmp = tcg_temp_new_i32();
9ef39277 6970 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 6971 }
d9ba4830 6972 store_reg(s, rd, tmp);
9ee6e8bb
PB
6973 }
6974 break;
6975 case 0x1:
6976 if (op1 == 1) {
6977 /* branch/exchange thumb (bx). */
be5e7a76 6978 ARCH(4T);
d9ba4830
PB
6979 tmp = load_reg(s, rm);
6980 gen_bx(s, tmp);
9ee6e8bb
PB
6981 } else if (op1 == 3) {
6982 /* clz */
be5e7a76 6983 ARCH(5);
9ee6e8bb 6984 rd = (insn >> 12) & 0xf;
1497c961
PB
6985 tmp = load_reg(s, rm);
6986 gen_helper_clz(tmp, tmp);
6987 store_reg(s, rd, tmp);
9ee6e8bb
PB
6988 } else {
6989 goto illegal_op;
6990 }
6991 break;
6992 case 0x2:
6993 if (op1 == 1) {
6994 ARCH(5J); /* bxj */
6995 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6996 tmp = load_reg(s, rm);
6997 gen_bx(s, tmp);
9ee6e8bb
PB
6998 } else {
6999 goto illegal_op;
7000 }
7001 break;
7002 case 0x3:
7003 if (op1 != 1)
7004 goto illegal_op;
7005
be5e7a76 7006 ARCH(5);
9ee6e8bb 7007 /* branch link/exchange thumb (blx) */
d9ba4830 7008 tmp = load_reg(s, rm);
7d1b0095 7009 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7010 tcg_gen_movi_i32(tmp2, s->pc);
7011 store_reg(s, 14, tmp2);
7012 gen_bx(s, tmp);
9ee6e8bb
PB
7013 break;
7014 case 0x5: /* saturating add/subtract */
be5e7a76 7015 ARCH(5TE);
9ee6e8bb
PB
7016 rd = (insn >> 12) & 0xf;
7017 rn = (insn >> 16) & 0xf;
b40d0353 7018 tmp = load_reg(s, rm);
5e3f878a 7019 tmp2 = load_reg(s, rn);
9ee6e8bb 7020 if (op1 & 2)
9ef39277 7021 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7022 if (op1 & 1)
9ef39277 7023 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7024 else
9ef39277 7025 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7026 tcg_temp_free_i32(tmp2);
5e3f878a 7027 store_reg(s, rd, tmp);
9ee6e8bb 7028 break;
49e14940
AL
7029 case 7:
7030 /* SMC instruction (op1 == 3)
7031 and undefined instructions (op1 == 0 || op1 == 2)
7032 will trap */
7033 if (op1 != 1) {
7034 goto illegal_op;
7035 }
7036 /* bkpt */
be5e7a76 7037 ARCH(5);
bc4a0de0 7038 gen_exception_insn(s, 4, EXCP_BKPT);
9ee6e8bb
PB
7039 break;
7040 case 0x8: /* signed multiply */
7041 case 0xa:
7042 case 0xc:
7043 case 0xe:
be5e7a76 7044 ARCH(5TE);
9ee6e8bb
PB
7045 rs = (insn >> 8) & 0xf;
7046 rn = (insn >> 12) & 0xf;
7047 rd = (insn >> 16) & 0xf;
7048 if (op1 == 1) {
7049 /* (32 * 16) >> 16 */
5e3f878a
PB
7050 tmp = load_reg(s, rm);
7051 tmp2 = load_reg(s, rs);
9ee6e8bb 7052 if (sh & 4)
5e3f878a 7053 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7054 else
5e3f878a 7055 gen_sxth(tmp2);
a7812ae4
PB
7056 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7057 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 7058 tmp = tcg_temp_new_i32();
a7812ae4 7059 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 7060 tcg_temp_free_i64(tmp64);
9ee6e8bb 7061 if ((sh & 2) == 0) {
5e3f878a 7062 tmp2 = load_reg(s, rn);
9ef39277 7063 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7064 tcg_temp_free_i32(tmp2);
9ee6e8bb 7065 }
5e3f878a 7066 store_reg(s, rd, tmp);
9ee6e8bb
PB
7067 } else {
7068 /* 16 * 16 */
5e3f878a
PB
7069 tmp = load_reg(s, rm);
7070 tmp2 = load_reg(s, rs);
7071 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 7072 tcg_temp_free_i32(tmp2);
9ee6e8bb 7073 if (op1 == 2) {
a7812ae4
PB
7074 tmp64 = tcg_temp_new_i64();
7075 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7076 tcg_temp_free_i32(tmp);
a7812ae4
PB
7077 gen_addq(s, tmp64, rn, rd);
7078 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 7079 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
7080 } else {
7081 if (op1 == 0) {
5e3f878a 7082 tmp2 = load_reg(s, rn);
9ef39277 7083 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7084 tcg_temp_free_i32(tmp2);
9ee6e8bb 7085 }
5e3f878a 7086 store_reg(s, rd, tmp);
9ee6e8bb
PB
7087 }
7088 }
7089 break;
7090 default:
7091 goto illegal_op;
7092 }
7093 } else if (((insn & 0x0e000000) == 0 &&
7094 (insn & 0x00000090) != 0x90) ||
7095 ((insn & 0x0e000000) == (1 << 25))) {
7096 int set_cc, logic_cc, shiftop;
7097
7098 op1 = (insn >> 21) & 0xf;
7099 set_cc = (insn >> 20) & 1;
7100 logic_cc = table_logic_cc[op1] & set_cc;
7101
7102 /* data processing instruction */
7103 if (insn & (1 << 25)) {
7104 /* immediate operand */
7105 val = insn & 0xff;
7106 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 7107 if (shift) {
9ee6e8bb 7108 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 7109 }
7d1b0095 7110 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
7111 tcg_gen_movi_i32(tmp2, val);
7112 if (logic_cc && shift) {
7113 gen_set_CF_bit31(tmp2);
7114 }
9ee6e8bb
PB
7115 } else {
7116 /* register */
7117 rm = (insn) & 0xf;
e9bb4aa9 7118 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7119 shiftop = (insn >> 5) & 3;
7120 if (!(insn & (1 << 4))) {
7121 shift = (insn >> 7) & 0x1f;
e9bb4aa9 7122 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
7123 } else {
7124 rs = (insn >> 8) & 0xf;
8984bd2e 7125 tmp = load_reg(s, rs);
e9bb4aa9 7126 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
7127 }
7128 }
7129 if (op1 != 0x0f && op1 != 0x0d) {
7130 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
7131 tmp = load_reg(s, rn);
7132 } else {
39d5492a 7133 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
7134 }
7135 rd = (insn >> 12) & 0xf;
7136 switch(op1) {
7137 case 0x00:
e9bb4aa9
JR
7138 tcg_gen_and_i32(tmp, tmp, tmp2);
7139 if (logic_cc) {
7140 gen_logic_CC(tmp);
7141 }
21aeb343 7142 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7143 break;
7144 case 0x01:
e9bb4aa9
JR
7145 tcg_gen_xor_i32(tmp, tmp, tmp2);
7146 if (logic_cc) {
7147 gen_logic_CC(tmp);
7148 }
21aeb343 7149 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7150 break;
7151 case 0x02:
7152 if (set_cc && rd == 15) {
7153 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 7154 if (IS_USER(s)) {
9ee6e8bb 7155 goto illegal_op;
e9bb4aa9 7156 }
72485ec4 7157 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 7158 gen_exception_return(s, tmp);
9ee6e8bb 7159 } else {
e9bb4aa9 7160 if (set_cc) {
72485ec4 7161 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7162 } else {
7163 tcg_gen_sub_i32(tmp, tmp, tmp2);
7164 }
21aeb343 7165 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7166 }
7167 break;
7168 case 0x03:
e9bb4aa9 7169 if (set_cc) {
72485ec4 7170 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7171 } else {
7172 tcg_gen_sub_i32(tmp, tmp2, tmp);
7173 }
21aeb343 7174 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7175 break;
7176 case 0x04:
e9bb4aa9 7177 if (set_cc) {
72485ec4 7178 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7179 } else {
7180 tcg_gen_add_i32(tmp, tmp, tmp2);
7181 }
21aeb343 7182 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7183 break;
7184 case 0x05:
e9bb4aa9 7185 if (set_cc) {
49b4c31e 7186 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7187 } else {
7188 gen_add_carry(tmp, tmp, tmp2);
7189 }
21aeb343 7190 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7191 break;
7192 case 0x06:
e9bb4aa9 7193 if (set_cc) {
2de68a49 7194 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
7195 } else {
7196 gen_sub_carry(tmp, tmp, tmp2);
7197 }
21aeb343 7198 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7199 break;
7200 case 0x07:
e9bb4aa9 7201 if (set_cc) {
2de68a49 7202 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
7203 } else {
7204 gen_sub_carry(tmp, tmp2, tmp);
7205 }
21aeb343 7206 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7207 break;
7208 case 0x08:
7209 if (set_cc) {
e9bb4aa9
JR
7210 tcg_gen_and_i32(tmp, tmp, tmp2);
7211 gen_logic_CC(tmp);
9ee6e8bb 7212 }
7d1b0095 7213 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7214 break;
7215 case 0x09:
7216 if (set_cc) {
e9bb4aa9
JR
7217 tcg_gen_xor_i32(tmp, tmp, tmp2);
7218 gen_logic_CC(tmp);
9ee6e8bb 7219 }
7d1b0095 7220 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7221 break;
7222 case 0x0a:
7223 if (set_cc) {
72485ec4 7224 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 7225 }
7d1b0095 7226 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7227 break;
7228 case 0x0b:
7229 if (set_cc) {
72485ec4 7230 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 7231 }
7d1b0095 7232 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7233 break;
7234 case 0x0c:
e9bb4aa9
JR
7235 tcg_gen_or_i32(tmp, tmp, tmp2);
7236 if (logic_cc) {
7237 gen_logic_CC(tmp);
7238 }
21aeb343 7239 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7240 break;
7241 case 0x0d:
7242 if (logic_cc && rd == 15) {
7243 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 7244 if (IS_USER(s)) {
9ee6e8bb 7245 goto illegal_op;
e9bb4aa9
JR
7246 }
7247 gen_exception_return(s, tmp2);
9ee6e8bb 7248 } else {
e9bb4aa9
JR
7249 if (logic_cc) {
7250 gen_logic_CC(tmp2);
7251 }
21aeb343 7252 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7253 }
7254 break;
7255 case 0x0e:
f669df27 7256 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
7257 if (logic_cc) {
7258 gen_logic_CC(tmp);
7259 }
21aeb343 7260 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7261 break;
7262 default:
7263 case 0x0f:
e9bb4aa9
JR
7264 tcg_gen_not_i32(tmp2, tmp2);
7265 if (logic_cc) {
7266 gen_logic_CC(tmp2);
7267 }
21aeb343 7268 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
7269 break;
7270 }
e9bb4aa9 7271 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 7272 tcg_temp_free_i32(tmp2);
e9bb4aa9 7273 }
9ee6e8bb
PB
7274 } else {
7275 /* other instructions */
7276 op1 = (insn >> 24) & 0xf;
7277 switch(op1) {
7278 case 0x0:
7279 case 0x1:
7280 /* multiplies, extra load/stores */
7281 sh = (insn >> 5) & 3;
7282 if (sh == 0) {
7283 if (op1 == 0x0) {
7284 rd = (insn >> 16) & 0xf;
7285 rn = (insn >> 12) & 0xf;
7286 rs = (insn >> 8) & 0xf;
7287 rm = (insn) & 0xf;
7288 op1 = (insn >> 20) & 0xf;
7289 switch (op1) {
7290 case 0: case 1: case 2: case 3: case 6:
7291 /* 32 bit mul */
5e3f878a
PB
7292 tmp = load_reg(s, rs);
7293 tmp2 = load_reg(s, rm);
7294 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 7295 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7296 if (insn & (1 << 22)) {
7297 /* Subtract (mls) */
7298 ARCH(6T2);
5e3f878a
PB
7299 tmp2 = load_reg(s, rn);
7300 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 7301 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7302 } else if (insn & (1 << 21)) {
7303 /* Add */
5e3f878a
PB
7304 tmp2 = load_reg(s, rn);
7305 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7306 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7307 }
7308 if (insn & (1 << 20))
5e3f878a
PB
7309 gen_logic_CC(tmp);
7310 store_reg(s, rd, tmp);
9ee6e8bb 7311 break;
8aac08b1
AJ
7312 case 4:
7313 /* 64 bit mul double accumulate (UMAAL) */
7314 ARCH(6);
7315 tmp = load_reg(s, rs);
7316 tmp2 = load_reg(s, rm);
7317 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7318 gen_addq_lo(s, tmp64, rn);
7319 gen_addq_lo(s, tmp64, rd);
7320 gen_storeq_reg(s, rn, rd, tmp64);
7321 tcg_temp_free_i64(tmp64);
7322 break;
7323 case 8: case 9: case 10: case 11:
7324 case 12: case 13: case 14: case 15:
7325 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
7326 tmp = load_reg(s, rs);
7327 tmp2 = load_reg(s, rm);
8aac08b1 7328 if (insn & (1 << 22)) {
c9f10124 7329 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 7330 } else {
c9f10124 7331 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
7332 }
7333 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
7334 TCGv_i32 al = load_reg(s, rn);
7335 TCGv_i32 ah = load_reg(s, rd);
c9f10124 7336 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
7337 tcg_temp_free_i32(al);
7338 tcg_temp_free_i32(ah);
9ee6e8bb 7339 }
8aac08b1 7340 if (insn & (1 << 20)) {
c9f10124 7341 gen_logicq_cc(tmp, tmp2);
8aac08b1 7342 }
c9f10124
RH
7343 store_reg(s, rn, tmp);
7344 store_reg(s, rd, tmp2);
9ee6e8bb 7345 break;
8aac08b1
AJ
7346 default:
7347 goto illegal_op;
9ee6e8bb
PB
7348 }
7349 } else {
7350 rn = (insn >> 16) & 0xf;
7351 rd = (insn >> 12) & 0xf;
7352 if (insn & (1 << 23)) {
7353 /* load/store exclusive */
2359bf80 7354 int op2 = (insn >> 8) & 3;
86753403 7355 op1 = (insn >> 21) & 0x3;
2359bf80
MR
7356
7357 switch (op2) {
7358 case 0: /* lda/stl */
7359 if (op1 == 1) {
7360 goto illegal_op;
7361 }
7362 ARCH(8);
7363 break;
7364 case 1: /* reserved */
7365 goto illegal_op;
7366 case 2: /* ldaex/stlex */
7367 ARCH(8);
7368 break;
7369 case 3: /* ldrex/strex */
7370 if (op1) {
7371 ARCH(6K);
7372 } else {
7373 ARCH(6);
7374 }
7375 break;
7376 }
7377
3174f8e9 7378 addr = tcg_temp_local_new_i32();
98a46317 7379 load_reg_var(s, addr, rn);
2359bf80
MR
7380
7381 /* Since the emulation does not have barriers,
7382 the acquire/release semantics need no special
7383 handling */
7384 if (op2 == 0) {
7385 if (insn & (1 << 20)) {
7386 tmp = tcg_temp_new_i32();
7387 switch (op1) {
7388 case 0: /* lda */
08307563 7389 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
7390 break;
7391 case 2: /* ldab */
08307563 7392 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
7393 break;
7394 case 3: /* ldah */
08307563 7395 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
7396 break;
7397 default:
7398 abort();
7399 }
7400 store_reg(s, rd, tmp);
7401 } else {
7402 rm = insn & 0xf;
7403 tmp = load_reg(s, rm);
7404 switch (op1) {
7405 case 0: /* stl */
08307563 7406 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
7407 break;
7408 case 2: /* stlb */
08307563 7409 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
7410 break;
7411 case 3: /* stlh */
08307563 7412 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
7413 break;
7414 default:
7415 abort();
7416 }
7417 tcg_temp_free_i32(tmp);
7418 }
7419 } else if (insn & (1 << 20)) {
86753403
PB
7420 switch (op1) {
7421 case 0: /* ldrex */
426f5abc 7422 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
7423 break;
7424 case 1: /* ldrexd */
426f5abc 7425 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
7426 break;
7427 case 2: /* ldrexb */
426f5abc 7428 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
7429 break;
7430 case 3: /* ldrexh */
426f5abc 7431 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
7432 break;
7433 default:
7434 abort();
7435 }
9ee6e8bb
PB
7436 } else {
7437 rm = insn & 0xf;
86753403
PB
7438 switch (op1) {
7439 case 0: /* strex */
426f5abc 7440 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
7441 break;
7442 case 1: /* strexd */
502e64fe 7443 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
7444 break;
7445 case 2: /* strexb */
426f5abc 7446 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
7447 break;
7448 case 3: /* strexh */
426f5abc 7449 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
7450 break;
7451 default:
7452 abort();
7453 }
9ee6e8bb 7454 }
39d5492a 7455 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7456 } else {
7457 /* SWP instruction */
7458 rm = (insn) & 0xf;
7459
8984bd2e
PB
7460 /* ??? This is not really atomic. However we know
7461 we never have multiple CPUs running in parallel,
7462 so it is good enough. */
7463 addr = load_reg(s, rn);
7464 tmp = load_reg(s, rm);
5a839c0d 7465 tmp2 = tcg_temp_new_i32();
9ee6e8bb 7466 if (insn & (1 << 22)) {
08307563
PM
7467 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
7468 gen_aa32_st8(tmp, addr, IS_USER(s));
9ee6e8bb 7469 } else {
08307563
PM
7470 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
7471 gen_aa32_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7472 }
5a839c0d 7473 tcg_temp_free_i32(tmp);
7d1b0095 7474 tcg_temp_free_i32(addr);
8984bd2e 7475 store_reg(s, rd, tmp2);
9ee6e8bb
PB
7476 }
7477 }
7478 } else {
7479 int address_offset;
7480 int load;
7481 /* Misc load/store */
7482 rn = (insn >> 16) & 0xf;
7483 rd = (insn >> 12) & 0xf;
b0109805 7484 addr = load_reg(s, rn);
9ee6e8bb 7485 if (insn & (1 << 24))
b0109805 7486 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
7487 address_offset = 0;
7488 if (insn & (1 << 20)) {
7489 /* load */
5a839c0d 7490 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
7491 switch(sh) {
7492 case 1:
08307563 7493 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7494 break;
7495 case 2:
08307563 7496 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7497 break;
7498 default:
7499 case 3:
08307563 7500 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7501 break;
7502 }
7503 load = 1;
7504 } else if (sh & 2) {
be5e7a76 7505 ARCH(5TE);
9ee6e8bb
PB
7506 /* doubleword */
7507 if (sh & 1) {
7508 /* store */
b0109805 7509 tmp = load_reg(s, rd);
08307563 7510 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7511 tcg_temp_free_i32(tmp);
b0109805
PB
7512 tcg_gen_addi_i32(addr, addr, 4);
7513 tmp = load_reg(s, rd + 1);
08307563 7514 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7515 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7516 load = 0;
7517 } else {
7518 /* load */
5a839c0d 7519 tmp = tcg_temp_new_i32();
08307563 7520 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
7521 store_reg(s, rd, tmp);
7522 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7523 tmp = tcg_temp_new_i32();
08307563 7524 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7525 rd++;
7526 load = 1;
7527 }
7528 address_offset = -4;
7529 } else {
7530 /* store */
b0109805 7531 tmp = load_reg(s, rd);
08307563 7532 gen_aa32_st16(tmp, addr, IS_USER(s));
5a839c0d 7533 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7534 load = 0;
7535 }
7536 /* Perform base writeback before the loaded value to
7537 ensure correct behavior with overlapping index registers.
7538 ldrd with base writeback is is undefined if the
7539 destination and index registers overlap. */
7540 if (!(insn & (1 << 24))) {
b0109805
PB
7541 gen_add_datah_offset(s, insn, address_offset, addr);
7542 store_reg(s, rn, addr);
9ee6e8bb
PB
7543 } else if (insn & (1 << 21)) {
7544 if (address_offset)
b0109805
PB
7545 tcg_gen_addi_i32(addr, addr, address_offset);
7546 store_reg(s, rn, addr);
7547 } else {
7d1b0095 7548 tcg_temp_free_i32(addr);
9ee6e8bb
PB
7549 }
7550 if (load) {
7551 /* Complete the load. */
b0109805 7552 store_reg(s, rd, tmp);
9ee6e8bb
PB
7553 }
7554 }
7555 break;
7556 case 0x4:
7557 case 0x5:
7558 goto do_ldst;
7559 case 0x6:
7560 case 0x7:
7561 if (insn & (1 << 4)) {
7562 ARCH(6);
7563 /* Armv6 Media instructions. */
7564 rm = insn & 0xf;
7565 rn = (insn >> 16) & 0xf;
2c0262af 7566 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
7567 rs = (insn >> 8) & 0xf;
7568 switch ((insn >> 23) & 3) {
7569 case 0: /* Parallel add/subtract. */
7570 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
7571 tmp = load_reg(s, rn);
7572 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7573 sh = (insn >> 5) & 7;
7574 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7575 goto illegal_op;
6ddbc6e4 7576 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 7577 tcg_temp_free_i32(tmp2);
6ddbc6e4 7578 store_reg(s, rd, tmp);
9ee6e8bb
PB
7579 break;
7580 case 1:
7581 if ((insn & 0x00700020) == 0) {
6c95676b 7582 /* Halfword pack. */
3670669c
PB
7583 tmp = load_reg(s, rn);
7584 tmp2 = load_reg(s, rm);
9ee6e8bb 7585 shift = (insn >> 7) & 0x1f;
3670669c
PB
7586 if (insn & (1 << 6)) {
7587 /* pkhtb */
22478e79
AZ
7588 if (shift == 0)
7589 shift = 31;
7590 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 7591 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 7592 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
7593 } else {
7594 /* pkhbt */
22478e79
AZ
7595 if (shift)
7596 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 7597 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
7598 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7599 }
7600 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 7601 tcg_temp_free_i32(tmp2);
3670669c 7602 store_reg(s, rd, tmp);
9ee6e8bb
PB
7603 } else if ((insn & 0x00200020) == 0x00200000) {
7604 /* [us]sat */
6ddbc6e4 7605 tmp = load_reg(s, rm);
9ee6e8bb
PB
7606 shift = (insn >> 7) & 0x1f;
7607 if (insn & (1 << 6)) {
7608 if (shift == 0)
7609 shift = 31;
6ddbc6e4 7610 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7611 } else {
6ddbc6e4 7612 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
7613 }
7614 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7615 tmp2 = tcg_const_i32(sh);
7616 if (insn & (1 << 22))
9ef39277 7617 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 7618 else
9ef39277 7619 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 7620 tcg_temp_free_i32(tmp2);
6ddbc6e4 7621 store_reg(s, rd, tmp);
9ee6e8bb
PB
7622 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7623 /* [us]sat16 */
6ddbc6e4 7624 tmp = load_reg(s, rm);
9ee6e8bb 7625 sh = (insn >> 16) & 0x1f;
40d3c433
CL
7626 tmp2 = tcg_const_i32(sh);
7627 if (insn & (1 << 22))
9ef39277 7628 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7629 else
9ef39277 7630 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 7631 tcg_temp_free_i32(tmp2);
6ddbc6e4 7632 store_reg(s, rd, tmp);
9ee6e8bb
PB
7633 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7634 /* Select bytes. */
6ddbc6e4
PB
7635 tmp = load_reg(s, rn);
7636 tmp2 = load_reg(s, rm);
7d1b0095 7637 tmp3 = tcg_temp_new_i32();
0ecb72a5 7638 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 7639 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
7640 tcg_temp_free_i32(tmp3);
7641 tcg_temp_free_i32(tmp2);
6ddbc6e4 7642 store_reg(s, rd, tmp);
9ee6e8bb 7643 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 7644 tmp = load_reg(s, rm);
9ee6e8bb 7645 shift = (insn >> 10) & 3;
1301f322 7646 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
7647 rotate, a shift is sufficient. */
7648 if (shift != 0)
f669df27 7649 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7650 op1 = (insn >> 20) & 7;
7651 switch (op1) {
5e3f878a
PB
7652 case 0: gen_sxtb16(tmp); break;
7653 case 2: gen_sxtb(tmp); break;
7654 case 3: gen_sxth(tmp); break;
7655 case 4: gen_uxtb16(tmp); break;
7656 case 6: gen_uxtb(tmp); break;
7657 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
7658 default: goto illegal_op;
7659 }
7660 if (rn != 15) {
5e3f878a 7661 tmp2 = load_reg(s, rn);
9ee6e8bb 7662 if ((op1 & 3) == 0) {
5e3f878a 7663 gen_add16(tmp, tmp2);
9ee6e8bb 7664 } else {
5e3f878a 7665 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7666 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7667 }
7668 }
6c95676b 7669 store_reg(s, rd, tmp);
9ee6e8bb
PB
7670 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7671 /* rev */
b0109805 7672 tmp = load_reg(s, rm);
9ee6e8bb
PB
7673 if (insn & (1 << 22)) {
7674 if (insn & (1 << 7)) {
b0109805 7675 gen_revsh(tmp);
9ee6e8bb
PB
7676 } else {
7677 ARCH(6T2);
b0109805 7678 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7679 }
7680 } else {
7681 if (insn & (1 << 7))
b0109805 7682 gen_rev16(tmp);
9ee6e8bb 7683 else
66896cb8 7684 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 7685 }
b0109805 7686 store_reg(s, rd, tmp);
9ee6e8bb
PB
7687 } else {
7688 goto illegal_op;
7689 }
7690 break;
7691 case 2: /* Multiplies (Type 3). */
41e9564d
PM
7692 switch ((insn >> 20) & 0x7) {
7693 case 5:
7694 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7695 /* op2 not 00x or 11x : UNDEF */
7696 goto illegal_op;
7697 }
838fa72d
AJ
7698 /* Signed multiply most significant [accumulate].
7699 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
7700 tmp = load_reg(s, rm);
7701 tmp2 = load_reg(s, rs);
a7812ae4 7702 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 7703
955a7dd5 7704 if (rd != 15) {
838fa72d 7705 tmp = load_reg(s, rd);
9ee6e8bb 7706 if (insn & (1 << 6)) {
838fa72d 7707 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 7708 } else {
838fa72d 7709 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
7710 }
7711 }
838fa72d
AJ
7712 if (insn & (1 << 5)) {
7713 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7714 }
7715 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 7716 tmp = tcg_temp_new_i32();
838fa72d
AJ
7717 tcg_gen_trunc_i64_i32(tmp, tmp64);
7718 tcg_temp_free_i64(tmp64);
955a7dd5 7719 store_reg(s, rn, tmp);
41e9564d
PM
7720 break;
7721 case 0:
7722 case 4:
7723 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7724 if (insn & (1 << 7)) {
7725 goto illegal_op;
7726 }
7727 tmp = load_reg(s, rm);
7728 tmp2 = load_reg(s, rs);
9ee6e8bb 7729 if (insn & (1 << 5))
5e3f878a
PB
7730 gen_swap_half(tmp2);
7731 gen_smul_dual(tmp, tmp2);
5e3f878a 7732 if (insn & (1 << 6)) {
e1d177b9 7733 /* This subtraction cannot overflow. */
5e3f878a
PB
7734 tcg_gen_sub_i32(tmp, tmp, tmp2);
7735 } else {
e1d177b9
PM
7736 /* This addition cannot overflow 32 bits;
7737 * however it may overflow considered as a signed
7738 * operation, in which case we must set the Q flag.
7739 */
9ef39277 7740 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
5e3f878a 7741 }
7d1b0095 7742 tcg_temp_free_i32(tmp2);
9ee6e8bb 7743 if (insn & (1 << 22)) {
5e3f878a 7744 /* smlald, smlsld */
a7812ae4
PB
7745 tmp64 = tcg_temp_new_i64();
7746 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 7747 tcg_temp_free_i32(tmp);
a7812ae4
PB
7748 gen_addq(s, tmp64, rd, rn);
7749 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 7750 tcg_temp_free_i64(tmp64);
9ee6e8bb 7751 } else {
5e3f878a 7752 /* smuad, smusd, smlad, smlsd */
22478e79 7753 if (rd != 15)
9ee6e8bb 7754 {
22478e79 7755 tmp2 = load_reg(s, rd);
9ef39277 7756 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 7757 tcg_temp_free_i32(tmp2);
9ee6e8bb 7758 }
22478e79 7759 store_reg(s, rn, tmp);
9ee6e8bb 7760 }
41e9564d 7761 break;
b8b8ea05
PM
7762 case 1:
7763 case 3:
7764 /* SDIV, UDIV */
7765 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7766 goto illegal_op;
7767 }
7768 if (((insn >> 5) & 7) || (rd != 15)) {
7769 goto illegal_op;
7770 }
7771 tmp = load_reg(s, rm);
7772 tmp2 = load_reg(s, rs);
7773 if (insn & (1 << 21)) {
7774 gen_helper_udiv(tmp, tmp, tmp2);
7775 } else {
7776 gen_helper_sdiv(tmp, tmp, tmp2);
7777 }
7778 tcg_temp_free_i32(tmp2);
7779 store_reg(s, rn, tmp);
7780 break;
41e9564d
PM
7781 default:
7782 goto illegal_op;
9ee6e8bb
PB
7783 }
7784 break;
7785 case 3:
7786 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7787 switch (op1) {
7788 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
7789 ARCH(6);
7790 tmp = load_reg(s, rm);
7791 tmp2 = load_reg(s, rs);
7792 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 7793 tcg_temp_free_i32(tmp2);
ded9d295
AZ
7794 if (rd != 15) {
7795 tmp2 = load_reg(s, rd);
6ddbc6e4 7796 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 7797 tcg_temp_free_i32(tmp2);
9ee6e8bb 7798 }
ded9d295 7799 store_reg(s, rn, tmp);
9ee6e8bb
PB
7800 break;
7801 case 0x20: case 0x24: case 0x28: case 0x2c:
7802 /* Bitfield insert/clear. */
7803 ARCH(6T2);
7804 shift = (insn >> 7) & 0x1f;
7805 i = (insn >> 16) & 0x1f;
7806 i = i + 1 - shift;
7807 if (rm == 15) {
7d1b0095 7808 tmp = tcg_temp_new_i32();
5e3f878a 7809 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7810 } else {
5e3f878a 7811 tmp = load_reg(s, rm);
9ee6e8bb
PB
7812 }
7813 if (i != 32) {
5e3f878a 7814 tmp2 = load_reg(s, rd);
d593c48e 7815 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 7816 tcg_temp_free_i32(tmp2);
9ee6e8bb 7817 }
5e3f878a 7818 store_reg(s, rd, tmp);
9ee6e8bb
PB
7819 break;
7820 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7821 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 7822 ARCH(6T2);
5e3f878a 7823 tmp = load_reg(s, rm);
9ee6e8bb
PB
7824 shift = (insn >> 7) & 0x1f;
7825 i = ((insn >> 16) & 0x1f) + 1;
7826 if (shift + i > 32)
7827 goto illegal_op;
7828 if (i < 32) {
7829 if (op1 & 0x20) {
5e3f878a 7830 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 7831 } else {
5e3f878a 7832 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
7833 }
7834 }
5e3f878a 7835 store_reg(s, rd, tmp);
9ee6e8bb
PB
7836 break;
7837 default:
7838 goto illegal_op;
7839 }
7840 break;
7841 }
7842 break;
7843 }
7844 do_ldst:
7845 /* Check for undefined extension instructions
7846 * per the ARM Bible IE:
7847 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7848 */
7849 sh = (0xf << 20) | (0xf << 4);
7850 if (op1 == 0x7 && ((insn & sh) == sh))
7851 {
7852 goto illegal_op;
7853 }
7854 /* load/store byte/word */
7855 rn = (insn >> 16) & 0xf;
7856 rd = (insn >> 12) & 0xf;
b0109805 7857 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
7858 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7859 if (insn & (1 << 24))
b0109805 7860 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
7861 if (insn & (1 << 20)) {
7862 /* load */
5a839c0d 7863 tmp = tcg_temp_new_i32();
9ee6e8bb 7864 if (insn & (1 << 22)) {
08307563 7865 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 7866 } else {
08307563 7867 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 7868 }
9ee6e8bb
PB
7869 } else {
7870 /* store */
b0109805 7871 tmp = load_reg(s, rd);
5a839c0d 7872 if (insn & (1 << 22)) {
08307563 7873 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 7874 } else {
08307563 7875 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
7876 }
7877 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7878 }
7879 if (!(insn & (1 << 24))) {
b0109805
PB
7880 gen_add_data_offset(s, insn, tmp2);
7881 store_reg(s, rn, tmp2);
7882 } else if (insn & (1 << 21)) {
7883 store_reg(s, rn, tmp2);
7884 } else {
7d1b0095 7885 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
7886 }
7887 if (insn & (1 << 20)) {
7888 /* Complete the load. */
be5e7a76 7889 store_reg_from_load(env, s, rd, tmp);
9ee6e8bb
PB
7890 }
7891 break;
7892 case 0x08:
7893 case 0x09:
7894 {
7895 int j, n, user, loaded_base;
39d5492a 7896 TCGv_i32 loaded_var;
9ee6e8bb
PB
7897 /* load/store multiple words */
7898 /* XXX: store correct base if write back */
7899 user = 0;
7900 if (insn & (1 << 22)) {
7901 if (IS_USER(s))
7902 goto illegal_op; /* only usable in supervisor mode */
7903
7904 if ((insn & (1 << 15)) == 0)
7905 user = 1;
7906 }
7907 rn = (insn >> 16) & 0xf;
b0109805 7908 addr = load_reg(s, rn);
9ee6e8bb
PB
7909
7910 /* compute total size */
7911 loaded_base = 0;
39d5492a 7912 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
7913 n = 0;
7914 for(i=0;i<16;i++) {
7915 if (insn & (1 << i))
7916 n++;
7917 }
7918 /* XXX: test invalid n == 0 case ? */
7919 if (insn & (1 << 23)) {
7920 if (insn & (1 << 24)) {
7921 /* pre increment */
b0109805 7922 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7923 } else {
7924 /* post increment */
7925 }
7926 } else {
7927 if (insn & (1 << 24)) {
7928 /* pre decrement */
b0109805 7929 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7930 } else {
7931 /* post decrement */
7932 if (n != 1)
b0109805 7933 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7934 }
7935 }
7936 j = 0;
7937 for(i=0;i<16;i++) {
7938 if (insn & (1 << i)) {
7939 if (insn & (1 << 20)) {
7940 /* load */
5a839c0d 7941 tmp = tcg_temp_new_i32();
08307563 7942 gen_aa32_ld32u(tmp, addr, IS_USER(s));
be5e7a76 7943 if (user) {
b75263d6 7944 tmp2 = tcg_const_i32(i);
1ce94f81 7945 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 7946 tcg_temp_free_i32(tmp2);
7d1b0095 7947 tcg_temp_free_i32(tmp);
9ee6e8bb 7948 } else if (i == rn) {
b0109805 7949 loaded_var = tmp;
9ee6e8bb
PB
7950 loaded_base = 1;
7951 } else {
be5e7a76 7952 store_reg_from_load(env, s, i, tmp);
9ee6e8bb
PB
7953 }
7954 } else {
7955 /* store */
7956 if (i == 15) {
7957 /* special case: r15 = PC + 8 */
7958 val = (long)s->pc + 4;
7d1b0095 7959 tmp = tcg_temp_new_i32();
b0109805 7960 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 7961 } else if (user) {
7d1b0095 7962 tmp = tcg_temp_new_i32();
b75263d6 7963 tmp2 = tcg_const_i32(i);
9ef39277 7964 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 7965 tcg_temp_free_i32(tmp2);
9ee6e8bb 7966 } else {
b0109805 7967 tmp = load_reg(s, i);
9ee6e8bb 7968 }
08307563 7969 gen_aa32_st32(tmp, addr, IS_USER(s));
5a839c0d 7970 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7971 }
7972 j++;
7973 /* no need to add after the last transfer */
7974 if (j != n)
b0109805 7975 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7976 }
7977 }
7978 if (insn & (1 << 21)) {
7979 /* write back */
7980 if (insn & (1 << 23)) {
7981 if (insn & (1 << 24)) {
7982 /* pre increment */
7983 } else {
7984 /* post increment */
b0109805 7985 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7986 }
7987 } else {
7988 if (insn & (1 << 24)) {
7989 /* pre decrement */
7990 if (n != 1)
b0109805 7991 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
7992 } else {
7993 /* post decrement */
b0109805 7994 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
7995 }
7996 }
b0109805
PB
7997 store_reg(s, rn, addr);
7998 } else {
7d1b0095 7999 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8000 }
8001 if (loaded_base) {
b0109805 8002 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
8003 }
8004 if ((insn & (1 << 22)) && !user) {
8005 /* Restore CPSR from SPSR. */
d9ba4830
PB
8006 tmp = load_cpu_field(spsr);
8007 gen_set_cpsr(tmp, 0xffffffff);
7d1b0095 8008 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8009 s->is_jmp = DISAS_UPDATE;
8010 }
8011 }
8012 break;
8013 case 0xa:
8014 case 0xb:
8015 {
8016 int32_t offset;
8017
8018 /* branch (and link) */
8019 val = (int32_t)s->pc;
8020 if (insn & (1 << 24)) {
7d1b0095 8021 tmp = tcg_temp_new_i32();
5e3f878a
PB
8022 tcg_gen_movi_i32(tmp, val);
8023 store_reg(s, 14, tmp);
9ee6e8bb 8024 }
534df156
PM
8025 offset = sextract32(insn << 2, 0, 26);
8026 val += offset + 4;
9ee6e8bb
PB
8027 gen_jmp(s, val);
8028 }
8029 break;
8030 case 0xc:
8031 case 0xd:
8032 case 0xe:
8033 /* Coprocessor. */
8034 if (disas_coproc_insn(env, s, insn))
8035 goto illegal_op;
8036 break;
8037 case 0xf:
8038 /* swi */
eaed129d 8039 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
8040 s->is_jmp = DISAS_SWI;
8041 break;
8042 default:
8043 illegal_op:
bc4a0de0 8044 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
8045 break;
8046 }
8047 }
8048}
8049
8050/* Return true if this is a Thumb-2 logical op. */
8051static int
8052thumb2_logic_op(int op)
8053{
8054 return (op < 8);
8055}
8056
8057/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8058 then set condition code flags based on the result of the operation.
8059 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8060 to the high bit of T1.
8061 Returns zero if the opcode is valid. */
8062
8063static int
39d5492a
PM
8064gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8065 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
8066{
8067 int logic_cc;
8068
8069 logic_cc = 0;
8070 switch (op) {
8071 case 0: /* and */
396e467c 8072 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
8073 logic_cc = conds;
8074 break;
8075 case 1: /* bic */
f669df27 8076 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
8077 logic_cc = conds;
8078 break;
8079 case 2: /* orr */
396e467c 8080 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
8081 logic_cc = conds;
8082 break;
8083 case 3: /* orn */
29501f1b 8084 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
8085 logic_cc = conds;
8086 break;
8087 case 4: /* eor */
396e467c 8088 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
8089 logic_cc = conds;
8090 break;
8091 case 8: /* add */
8092 if (conds)
72485ec4 8093 gen_add_CC(t0, t0, t1);
9ee6e8bb 8094 else
396e467c 8095 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
8096 break;
8097 case 10: /* adc */
8098 if (conds)
49b4c31e 8099 gen_adc_CC(t0, t0, t1);
9ee6e8bb 8100 else
396e467c 8101 gen_adc(t0, t1);
9ee6e8bb
PB
8102 break;
8103 case 11: /* sbc */
2de68a49
RH
8104 if (conds) {
8105 gen_sbc_CC(t0, t0, t1);
8106 } else {
396e467c 8107 gen_sub_carry(t0, t0, t1);
2de68a49 8108 }
9ee6e8bb
PB
8109 break;
8110 case 13: /* sub */
8111 if (conds)
72485ec4 8112 gen_sub_CC(t0, t0, t1);
9ee6e8bb 8113 else
396e467c 8114 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
8115 break;
8116 case 14: /* rsb */
8117 if (conds)
72485ec4 8118 gen_sub_CC(t0, t1, t0);
9ee6e8bb 8119 else
396e467c 8120 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
8121 break;
8122 default: /* 5, 6, 7, 9, 12, 15. */
8123 return 1;
8124 }
8125 if (logic_cc) {
396e467c 8126 gen_logic_CC(t0);
9ee6e8bb 8127 if (shifter_out)
396e467c 8128 gen_set_CF_bit31(t1);
9ee6e8bb
PB
8129 }
8130 return 0;
8131}
8132
8133/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8134 is not legal. */
0ecb72a5 8135static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 8136{
b0109805 8137 uint32_t insn, imm, shift, offset;
9ee6e8bb 8138 uint32_t rd, rn, rm, rs;
39d5492a
PM
8139 TCGv_i32 tmp;
8140 TCGv_i32 tmp2;
8141 TCGv_i32 tmp3;
8142 TCGv_i32 addr;
a7812ae4 8143 TCGv_i64 tmp64;
9ee6e8bb
PB
8144 int op;
8145 int shiftop;
8146 int conds;
8147 int logic_cc;
8148
8149 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8150 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 8151 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
8152 16-bit instructions to get correct prefetch abort behavior. */
8153 insn = insn_hw1;
8154 if ((insn & (1 << 12)) == 0) {
be5e7a76 8155 ARCH(5);
9ee6e8bb
PB
8156 /* Second half of blx. */
8157 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
8158 tmp = load_reg(s, 14);
8159 tcg_gen_addi_i32(tmp, tmp, offset);
8160 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 8161
7d1b0095 8162 tmp2 = tcg_temp_new_i32();
b0109805 8163 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8164 store_reg(s, 14, tmp2);
8165 gen_bx(s, tmp);
9ee6e8bb
PB
8166 return 0;
8167 }
8168 if (insn & (1 << 11)) {
8169 /* Second half of bl. */
8170 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 8171 tmp = load_reg(s, 14);
6a0d8a1d 8172 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 8173
7d1b0095 8174 tmp2 = tcg_temp_new_i32();
b0109805 8175 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
8176 store_reg(s, 14, tmp2);
8177 gen_bx(s, tmp);
9ee6e8bb
PB
8178 return 0;
8179 }
8180 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8181 /* Instruction spans a page boundary. Implement it as two
8182 16-bit instructions in case the second half causes an
8183 prefetch abort. */
8184 offset = ((int32_t)insn << 21) >> 9;
396e467c 8185 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
8186 return 0;
8187 }
8188 /* Fall through to 32-bit decode. */
8189 }
8190
d31dd73e 8191 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
8192 s->pc += 2;
8193 insn |= (uint32_t)insn_hw1 << 16;
8194
8195 if ((insn & 0xf800e800) != 0xf000e800) {
8196 ARCH(6T2);
8197 }
8198
8199 rn = (insn >> 16) & 0xf;
8200 rs = (insn >> 12) & 0xf;
8201 rd = (insn >> 8) & 0xf;
8202 rm = insn & 0xf;
8203 switch ((insn >> 25) & 0xf) {
8204 case 0: case 1: case 2: case 3:
8205 /* 16-bit instructions. Should never happen. */
8206 abort();
8207 case 4:
8208 if (insn & (1 << 22)) {
8209 /* Other load/store, table branch. */
8210 if (insn & 0x01200000) {
8211 /* Load/store doubleword. */
8212 if (rn == 15) {
7d1b0095 8213 addr = tcg_temp_new_i32();
b0109805 8214 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 8215 } else {
b0109805 8216 addr = load_reg(s, rn);
9ee6e8bb
PB
8217 }
8218 offset = (insn & 0xff) * 4;
8219 if ((insn & (1 << 23)) == 0)
8220 offset = -offset;
8221 if (insn & (1 << 24)) {
b0109805 8222 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
8223 offset = 0;
8224 }
8225 if (insn & (1 << 20)) {
8226 /* ldrd */
e2592fad 8227 tmp = tcg_temp_new_i32();
08307563 8228 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805
PB
8229 store_reg(s, rs, tmp);
8230 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8231 tmp = tcg_temp_new_i32();
08307563 8232 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 8233 store_reg(s, rd, tmp);
9ee6e8bb
PB
8234 } else {
8235 /* strd */
b0109805 8236 tmp = load_reg(s, rs);
08307563 8237 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8238 tcg_temp_free_i32(tmp);
b0109805
PB
8239 tcg_gen_addi_i32(addr, addr, 4);
8240 tmp = load_reg(s, rd);
08307563 8241 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8242 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8243 }
8244 if (insn & (1 << 21)) {
8245 /* Base writeback. */
8246 if (rn == 15)
8247 goto illegal_op;
b0109805
PB
8248 tcg_gen_addi_i32(addr, addr, offset - 4);
8249 store_reg(s, rn, addr);
8250 } else {
7d1b0095 8251 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8252 }
8253 } else if ((insn & (1 << 23)) == 0) {
8254 /* Load/store exclusive word. */
39d5492a 8255 addr = tcg_temp_local_new_i32();
98a46317 8256 load_reg_var(s, addr, rn);
426f5abc 8257 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 8258 if (insn & (1 << 20)) {
426f5abc 8259 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 8260 } else {
426f5abc 8261 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 8262 }
39d5492a 8263 tcg_temp_free_i32(addr);
2359bf80 8264 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
8265 /* Table Branch. */
8266 if (rn == 15) {
7d1b0095 8267 addr = tcg_temp_new_i32();
b0109805 8268 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 8269 } else {
b0109805 8270 addr = load_reg(s, rn);
9ee6e8bb 8271 }
b26eefb6 8272 tmp = load_reg(s, rm);
b0109805 8273 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
8274 if (insn & (1 << 4)) {
8275 /* tbh */
b0109805 8276 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 8277 tcg_temp_free_i32(tmp);
e2592fad 8278 tmp = tcg_temp_new_i32();
08307563 8279 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9ee6e8bb 8280 } else { /* tbb */
7d1b0095 8281 tcg_temp_free_i32(tmp);
e2592fad 8282 tmp = tcg_temp_new_i32();
08307563 8283 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9ee6e8bb 8284 }
7d1b0095 8285 tcg_temp_free_i32(addr);
b0109805
PB
8286 tcg_gen_shli_i32(tmp, tmp, 1);
8287 tcg_gen_addi_i32(tmp, tmp, s->pc);
8288 store_reg(s, 15, tmp);
9ee6e8bb 8289 } else {
2359bf80 8290 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 8291 op = (insn >> 4) & 0x3;
2359bf80
MR
8292 switch (op2) {
8293 case 0:
426f5abc 8294 goto illegal_op;
2359bf80
MR
8295 case 1:
8296 /* Load/store exclusive byte/halfword/doubleword */
8297 if (op == 2) {
8298 goto illegal_op;
8299 }
8300 ARCH(7);
8301 break;
8302 case 2:
8303 /* Load-acquire/store-release */
8304 if (op == 3) {
8305 goto illegal_op;
8306 }
8307 /* Fall through */
8308 case 3:
8309 /* Load-acquire/store-release exclusive */
8310 ARCH(8);
8311 break;
426f5abc 8312 }
39d5492a 8313 addr = tcg_temp_local_new_i32();
98a46317 8314 load_reg_var(s, addr, rn);
2359bf80
MR
8315 if (!(op2 & 1)) {
8316 if (insn & (1 << 20)) {
8317 tmp = tcg_temp_new_i32();
8318 switch (op) {
8319 case 0: /* ldab */
08307563 8320 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2359bf80
MR
8321 break;
8322 case 1: /* ldah */
08307563 8323 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2359bf80
MR
8324 break;
8325 case 2: /* lda */
08307563 8326 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2359bf80
MR
8327 break;
8328 default:
8329 abort();
8330 }
8331 store_reg(s, rs, tmp);
8332 } else {
8333 tmp = load_reg(s, rs);
8334 switch (op) {
8335 case 0: /* stlb */
08307563 8336 gen_aa32_st8(tmp, addr, IS_USER(s));
2359bf80
MR
8337 break;
8338 case 1: /* stlh */
08307563 8339 gen_aa32_st16(tmp, addr, IS_USER(s));
2359bf80
MR
8340 break;
8341 case 2: /* stl */
08307563 8342 gen_aa32_st32(tmp, addr, IS_USER(s));
2359bf80
MR
8343 break;
8344 default:
8345 abort();
8346 }
8347 tcg_temp_free_i32(tmp);
8348 }
8349 } else if (insn & (1 << 20)) {
426f5abc 8350 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 8351 } else {
426f5abc 8352 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 8353 }
39d5492a 8354 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8355 }
8356 } else {
8357 /* Load/store multiple, RFE, SRS. */
8358 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976
PM
8359 /* RFE, SRS: not available in user mode or on M profile */
8360 if (IS_USER(s) || IS_M(env)) {
9ee6e8bb 8361 goto illegal_op;
00115976 8362 }
9ee6e8bb
PB
8363 if (insn & (1 << 20)) {
8364 /* rfe */
b0109805
PB
8365 addr = load_reg(s, rn);
8366 if ((insn & (1 << 24)) == 0)
8367 tcg_gen_addi_i32(addr, addr, -8);
8368 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 8369 tmp = tcg_temp_new_i32();
08307563 8370 gen_aa32_ld32u(tmp, addr, 0);
b0109805 8371 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 8372 tmp2 = tcg_temp_new_i32();
08307563 8373 gen_aa32_ld32u(tmp2, addr, 0);
9ee6e8bb
PB
8374 if (insn & (1 << 21)) {
8375 /* Base writeback. */
b0109805
PB
8376 if (insn & (1 << 24)) {
8377 tcg_gen_addi_i32(addr, addr, 4);
8378 } else {
8379 tcg_gen_addi_i32(addr, addr, -4);
8380 }
8381 store_reg(s, rn, addr);
8382 } else {
7d1b0095 8383 tcg_temp_free_i32(addr);
9ee6e8bb 8384 }
b0109805 8385 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
8386 } else {
8387 /* srs */
81465888
PM
8388 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8389 insn & (1 << 21));
9ee6e8bb
PB
8390 }
8391 } else {
5856d44e 8392 int i, loaded_base = 0;
39d5492a 8393 TCGv_i32 loaded_var;
9ee6e8bb 8394 /* Load/store multiple. */
b0109805 8395 addr = load_reg(s, rn);
9ee6e8bb
PB
8396 offset = 0;
8397 for (i = 0; i < 16; i++) {
8398 if (insn & (1 << i))
8399 offset += 4;
8400 }
8401 if (insn & (1 << 24)) {
b0109805 8402 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8403 }
8404
39d5492a 8405 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8406 for (i = 0; i < 16; i++) {
8407 if ((insn & (1 << i)) == 0)
8408 continue;
8409 if (insn & (1 << 20)) {
8410 /* Load. */
e2592fad 8411 tmp = tcg_temp_new_i32();
08307563 8412 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9ee6e8bb 8413 if (i == 15) {
b0109805 8414 gen_bx(s, tmp);
5856d44e
YO
8415 } else if (i == rn) {
8416 loaded_var = tmp;
8417 loaded_base = 1;
9ee6e8bb 8418 } else {
b0109805 8419 store_reg(s, i, tmp);
9ee6e8bb
PB
8420 }
8421 } else {
8422 /* Store. */
b0109805 8423 tmp = load_reg(s, i);
08307563 8424 gen_aa32_st32(tmp, addr, IS_USER(s));
e2592fad 8425 tcg_temp_free_i32(tmp);
9ee6e8bb 8426 }
b0109805 8427 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 8428 }
5856d44e
YO
8429 if (loaded_base) {
8430 store_reg(s, rn, loaded_var);
8431 }
9ee6e8bb
PB
8432 if (insn & (1 << 21)) {
8433 /* Base register writeback. */
8434 if (insn & (1 << 24)) {
b0109805 8435 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
8436 }
8437 /* Fault if writeback register is in register list. */
8438 if (insn & (1 << rn))
8439 goto illegal_op;
b0109805
PB
8440 store_reg(s, rn, addr);
8441 } else {
7d1b0095 8442 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8443 }
8444 }
8445 }
8446 break;
2af9ab77
JB
8447 case 5:
8448
9ee6e8bb 8449 op = (insn >> 21) & 0xf;
2af9ab77
JB
8450 if (op == 6) {
8451 /* Halfword pack. */
8452 tmp = load_reg(s, rn);
8453 tmp2 = load_reg(s, rm);
8454 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8455 if (insn & (1 << 5)) {
8456 /* pkhtb */
8457 if (shift == 0)
8458 shift = 31;
8459 tcg_gen_sari_i32(tmp2, tmp2, shift);
8460 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8461 tcg_gen_ext16u_i32(tmp2, tmp2);
8462 } else {
8463 /* pkhbt */
8464 if (shift)
8465 tcg_gen_shli_i32(tmp2, tmp2, shift);
8466 tcg_gen_ext16u_i32(tmp, tmp);
8467 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8468 }
8469 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8470 tcg_temp_free_i32(tmp2);
3174f8e9
FN
8471 store_reg(s, rd, tmp);
8472 } else {
2af9ab77
JB
8473 /* Data processing register constant shift. */
8474 if (rn == 15) {
7d1b0095 8475 tmp = tcg_temp_new_i32();
2af9ab77
JB
8476 tcg_gen_movi_i32(tmp, 0);
8477 } else {
8478 tmp = load_reg(s, rn);
8479 }
8480 tmp2 = load_reg(s, rm);
8481
8482 shiftop = (insn >> 4) & 3;
8483 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8484 conds = (insn & (1 << 20)) != 0;
8485 logic_cc = (conds && thumb2_logic_op(op));
8486 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8487 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8488 goto illegal_op;
7d1b0095 8489 tcg_temp_free_i32(tmp2);
2af9ab77
JB
8490 if (rd != 15) {
8491 store_reg(s, rd, tmp);
8492 } else {
7d1b0095 8493 tcg_temp_free_i32(tmp);
2af9ab77 8494 }
3174f8e9 8495 }
9ee6e8bb
PB
8496 break;
8497 case 13: /* Misc data processing. */
8498 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8499 if (op < 4 && (insn & 0xf000) != 0xf000)
8500 goto illegal_op;
8501 switch (op) {
8502 case 0: /* Register controlled shift. */
8984bd2e
PB
8503 tmp = load_reg(s, rn);
8504 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8505 if ((insn & 0x70) != 0)
8506 goto illegal_op;
8507 op = (insn >> 21) & 3;
8984bd2e
PB
8508 logic_cc = (insn & (1 << 20)) != 0;
8509 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8510 if (logic_cc)
8511 gen_logic_CC(tmp);
21aeb343 8512 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
8513 break;
8514 case 1: /* Sign/zero extend. */
5e3f878a 8515 tmp = load_reg(s, rm);
9ee6e8bb 8516 shift = (insn >> 4) & 3;
1301f322 8517 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8518 rotate, a shift is sufficient. */
8519 if (shift != 0)
f669df27 8520 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8521 op = (insn >> 20) & 7;
8522 switch (op) {
5e3f878a
PB
8523 case 0: gen_sxth(tmp); break;
8524 case 1: gen_uxth(tmp); break;
8525 case 2: gen_sxtb16(tmp); break;
8526 case 3: gen_uxtb16(tmp); break;
8527 case 4: gen_sxtb(tmp); break;
8528 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
8529 default: goto illegal_op;
8530 }
8531 if (rn != 15) {
5e3f878a 8532 tmp2 = load_reg(s, rn);
9ee6e8bb 8533 if ((op >> 1) == 1) {
5e3f878a 8534 gen_add16(tmp, tmp2);
9ee6e8bb 8535 } else {
5e3f878a 8536 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8537 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8538 }
8539 }
5e3f878a 8540 store_reg(s, rd, tmp);
9ee6e8bb
PB
8541 break;
8542 case 2: /* SIMD add/subtract. */
8543 op = (insn >> 20) & 7;
8544 shift = (insn >> 4) & 7;
8545 if ((op & 3) == 3 || (shift & 3) == 3)
8546 goto illegal_op;
6ddbc6e4
PB
8547 tmp = load_reg(s, rn);
8548 tmp2 = load_reg(s, rm);
8549 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 8550 tcg_temp_free_i32(tmp2);
6ddbc6e4 8551 store_reg(s, rd, tmp);
9ee6e8bb
PB
8552 break;
8553 case 3: /* Other data processing. */
8554 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8555 if (op < 4) {
8556 /* Saturating add/subtract. */
d9ba4830
PB
8557 tmp = load_reg(s, rn);
8558 tmp2 = load_reg(s, rm);
9ee6e8bb 8559 if (op & 1)
9ef39277 8560 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 8561 if (op & 2)
9ef39277 8562 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 8563 else
9ef39277 8564 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 8565 tcg_temp_free_i32(tmp2);
9ee6e8bb 8566 } else {
d9ba4830 8567 tmp = load_reg(s, rn);
9ee6e8bb
PB
8568 switch (op) {
8569 case 0x0a: /* rbit */
d9ba4830 8570 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8571 break;
8572 case 0x08: /* rev */
66896cb8 8573 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
8574 break;
8575 case 0x09: /* rev16 */
d9ba4830 8576 gen_rev16(tmp);
9ee6e8bb
PB
8577 break;
8578 case 0x0b: /* revsh */
d9ba4830 8579 gen_revsh(tmp);
9ee6e8bb
PB
8580 break;
8581 case 0x10: /* sel */
d9ba4830 8582 tmp2 = load_reg(s, rm);
7d1b0095 8583 tmp3 = tcg_temp_new_i32();
0ecb72a5 8584 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 8585 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8586 tcg_temp_free_i32(tmp3);
8587 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8588 break;
8589 case 0x18: /* clz */
d9ba4830 8590 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
8591 break;
8592 default:
8593 goto illegal_op;
8594 }
8595 }
d9ba4830 8596 store_reg(s, rd, tmp);
9ee6e8bb
PB
8597 break;
8598 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8599 op = (insn >> 4) & 0xf;
d9ba4830
PB
8600 tmp = load_reg(s, rn);
8601 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8602 switch ((insn >> 20) & 7) {
8603 case 0: /* 32 x 32 -> 32 */
d9ba4830 8604 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8605 tcg_temp_free_i32(tmp2);
9ee6e8bb 8606 if (rs != 15) {
d9ba4830 8607 tmp2 = load_reg(s, rs);
9ee6e8bb 8608 if (op)
d9ba4830 8609 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 8610 else
d9ba4830 8611 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8612 tcg_temp_free_i32(tmp2);
9ee6e8bb 8613 }
9ee6e8bb
PB
8614 break;
8615 case 1: /* 16 x 16 -> 32 */
d9ba4830 8616 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8617 tcg_temp_free_i32(tmp2);
9ee6e8bb 8618 if (rs != 15) {
d9ba4830 8619 tmp2 = load_reg(s, rs);
9ef39277 8620 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8621 tcg_temp_free_i32(tmp2);
9ee6e8bb 8622 }
9ee6e8bb
PB
8623 break;
8624 case 2: /* Dual multiply add. */
8625 case 4: /* Dual multiply subtract. */
8626 if (op)
d9ba4830
PB
8627 gen_swap_half(tmp2);
8628 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8629 if (insn & (1 << 22)) {
e1d177b9 8630 /* This subtraction cannot overflow. */
d9ba4830 8631 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 8632 } else {
e1d177b9
PM
8633 /* This addition cannot overflow 32 bits;
8634 * however it may overflow considered as a signed
8635 * operation, in which case we must set the Q flag.
8636 */
9ef39277 8637 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8638 }
7d1b0095 8639 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8640 if (rs != 15)
8641 {
d9ba4830 8642 tmp2 = load_reg(s, rs);
9ef39277 8643 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8644 tcg_temp_free_i32(tmp2);
9ee6e8bb 8645 }
9ee6e8bb
PB
8646 break;
8647 case 3: /* 32 * 16 -> 32msb */
8648 if (op)
d9ba4830 8649 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8650 else
d9ba4830 8651 gen_sxth(tmp2);
a7812ae4
PB
8652 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8653 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8654 tmp = tcg_temp_new_i32();
a7812ae4 8655 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8656 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8657 if (rs != 15)
8658 {
d9ba4830 8659 tmp2 = load_reg(s, rs);
9ef39277 8660 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8661 tcg_temp_free_i32(tmp2);
9ee6e8bb 8662 }
9ee6e8bb 8663 break;
838fa72d
AJ
8664 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8665 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8666 if (rs != 15) {
838fa72d
AJ
8667 tmp = load_reg(s, rs);
8668 if (insn & (1 << 20)) {
8669 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 8670 } else {
838fa72d 8671 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 8672 }
2c0262af 8673 }
838fa72d
AJ
8674 if (insn & (1 << 4)) {
8675 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8676 }
8677 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8678 tmp = tcg_temp_new_i32();
838fa72d
AJ
8679 tcg_gen_trunc_i64_i32(tmp, tmp64);
8680 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8681 break;
8682 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 8683 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8684 tcg_temp_free_i32(tmp2);
9ee6e8bb 8685 if (rs != 15) {
d9ba4830
PB
8686 tmp2 = load_reg(s, rs);
8687 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8688 tcg_temp_free_i32(tmp2);
5fd46862 8689 }
9ee6e8bb 8690 break;
2c0262af 8691 }
d9ba4830 8692 store_reg(s, rd, tmp);
2c0262af 8693 break;
9ee6e8bb
PB
8694 case 6: case 7: /* 64-bit multiply, Divide. */
8695 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
8696 tmp = load_reg(s, rn);
8697 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8698 if ((op & 0x50) == 0x10) {
8699 /* sdiv, udiv */
47789990 8700 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 8701 goto illegal_op;
47789990 8702 }
9ee6e8bb 8703 if (op & 0x20)
5e3f878a 8704 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 8705 else
5e3f878a 8706 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 8707 tcg_temp_free_i32(tmp2);
5e3f878a 8708 store_reg(s, rd, tmp);
9ee6e8bb
PB
8709 } else if ((op & 0xe) == 0xc) {
8710 /* Dual multiply accumulate long. */
8711 if (op & 1)
5e3f878a
PB
8712 gen_swap_half(tmp2);
8713 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8714 if (op & 0x10) {
5e3f878a 8715 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 8716 } else {
5e3f878a 8717 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 8718 }
7d1b0095 8719 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8720 /* BUGFIX */
8721 tmp64 = tcg_temp_new_i64();
8722 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8723 tcg_temp_free_i32(tmp);
a7812ae4
PB
8724 gen_addq(s, tmp64, rs, rd);
8725 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8726 tcg_temp_free_i64(tmp64);
2c0262af 8727 } else {
9ee6e8bb
PB
8728 if (op & 0x20) {
8729 /* Unsigned 64-bit multiply */
a7812ae4 8730 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 8731 } else {
9ee6e8bb
PB
8732 if (op & 8) {
8733 /* smlalxy */
5e3f878a 8734 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 8735 tcg_temp_free_i32(tmp2);
a7812ae4
PB
8736 tmp64 = tcg_temp_new_i64();
8737 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8738 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8739 } else {
8740 /* Signed 64-bit multiply */
a7812ae4 8741 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 8742 }
b5ff1b31 8743 }
9ee6e8bb
PB
8744 if (op & 4) {
8745 /* umaal */
a7812ae4
PB
8746 gen_addq_lo(s, tmp64, rs);
8747 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
8748 } else if (op & 0x40) {
8749 /* 64-bit accumulate. */
a7812ae4 8750 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 8751 }
a7812ae4 8752 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 8753 tcg_temp_free_i64(tmp64);
5fd46862 8754 }
2c0262af 8755 break;
9ee6e8bb
PB
8756 }
8757 break;
8758 case 6: case 7: case 14: case 15:
8759 /* Coprocessor. */
8760 if (((insn >> 24) & 3) == 3) {
8761 /* Translate into the equivalent ARM encoding. */
f06053e3 8762 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
9ee6e8bb
PB
8763 if (disas_neon_data_insn(env, s, insn))
8764 goto illegal_op;
8765 } else {
8766 if (insn & (1 << 28))
8767 goto illegal_op;
8768 if (disas_coproc_insn (env, s, insn))
8769 goto illegal_op;
8770 }
8771 break;
8772 case 8: case 9: case 10: case 11:
8773 if (insn & (1 << 15)) {
8774 /* Branches, misc control. */
8775 if (insn & 0x5000) {
8776 /* Unconditional branch. */
8777 /* signextend(hw1[10:0]) -> offset[:12]. */
8778 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8779 /* hw1[10:0] -> offset[11:1]. */
8780 offset |= (insn & 0x7ff) << 1;
8781 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8782 offset[24:22] already have the same value because of the
8783 sign extension above. */
8784 offset ^= ((~insn) & (1 << 13)) << 10;
8785 offset ^= ((~insn) & (1 << 11)) << 11;
8786
9ee6e8bb
PB
8787 if (insn & (1 << 14)) {
8788 /* Branch and link. */
3174f8e9 8789 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 8790 }
3b46e624 8791
b0109805 8792 offset += s->pc;
9ee6e8bb
PB
8793 if (insn & (1 << 12)) {
8794 /* b/bl */
b0109805 8795 gen_jmp(s, offset);
9ee6e8bb
PB
8796 } else {
8797 /* blx */
b0109805 8798 offset &= ~(uint32_t)2;
be5e7a76 8799 /* thumb2 bx, no need to check */
b0109805 8800 gen_bx_im(s, offset);
2c0262af 8801 }
9ee6e8bb
PB
8802 } else if (((insn >> 23) & 7) == 7) {
8803 /* Misc control */
8804 if (insn & (1 << 13))
8805 goto illegal_op;
8806
8807 if (insn & (1 << 26)) {
8808 /* Secure monitor call (v6Z) */
e0c270d9
SW
8809 qemu_log_mask(LOG_UNIMP,
8810 "arm: unimplemented secure monitor call\n");
9ee6e8bb 8811 goto illegal_op; /* not implemented. */
2c0262af 8812 } else {
9ee6e8bb
PB
8813 op = (insn >> 20) & 7;
8814 switch (op) {
8815 case 0: /* msr cpsr. */
8816 if (IS_M(env)) {
8984bd2e
PB
8817 tmp = load_reg(s, rn);
8818 addr = tcg_const_i32(insn & 0xff);
8819 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 8820 tcg_temp_free_i32(addr);
7d1b0095 8821 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8822 gen_lookup_tb(s);
8823 break;
8824 }
8825 /* fall through */
8826 case 1: /* msr spsr. */
8827 if (IS_M(env))
8828 goto illegal_op;
2fbac54b
FN
8829 tmp = load_reg(s, rn);
8830 if (gen_set_psr(s,
9ee6e8bb 8831 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
2fbac54b 8832 op == 1, tmp))
9ee6e8bb
PB
8833 goto illegal_op;
8834 break;
8835 case 2: /* cps, nop-hint. */
8836 if (((insn >> 8) & 7) == 0) {
8837 gen_nop_hint(s, insn & 0xff);
8838 }
8839 /* Implemented as NOP in user mode. */
8840 if (IS_USER(s))
8841 break;
8842 offset = 0;
8843 imm = 0;
8844 if (insn & (1 << 10)) {
8845 if (insn & (1 << 7))
8846 offset |= CPSR_A;
8847 if (insn & (1 << 6))
8848 offset |= CPSR_I;
8849 if (insn & (1 << 5))
8850 offset |= CPSR_F;
8851 if (insn & (1 << 9))
8852 imm = CPSR_A | CPSR_I | CPSR_F;
8853 }
8854 if (insn & (1 << 8)) {
8855 offset |= 0x1f;
8856 imm |= (insn & 0x1f);
8857 }
8858 if (offset) {
2fbac54b 8859 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
8860 }
8861 break;
8862 case 3: /* Special control operations. */
426f5abc 8863 ARCH(7);
9ee6e8bb
PB
8864 op = (insn >> 4) & 0xf;
8865 switch (op) {
8866 case 2: /* clrex */
426f5abc 8867 gen_clrex(s);
9ee6e8bb
PB
8868 break;
8869 case 4: /* dsb */
8870 case 5: /* dmb */
8871 case 6: /* isb */
8872 /* These execute as NOPs. */
9ee6e8bb
PB
8873 break;
8874 default:
8875 goto illegal_op;
8876 }
8877 break;
8878 case 4: /* bxj */
8879 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
8880 tmp = load_reg(s, rn);
8881 gen_bx(s, tmp);
9ee6e8bb
PB
8882 break;
8883 case 5: /* Exception return. */
b8b45b68
RV
8884 if (IS_USER(s)) {
8885 goto illegal_op;
8886 }
8887 if (rn != 14 || rd != 15) {
8888 goto illegal_op;
8889 }
8890 tmp = load_reg(s, rn);
8891 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8892 gen_exception_return(s, tmp);
8893 break;
9ee6e8bb 8894 case 6: /* mrs cpsr. */
7d1b0095 8895 tmp = tcg_temp_new_i32();
9ee6e8bb 8896 if (IS_M(env)) {
8984bd2e
PB
8897 addr = tcg_const_i32(insn & 0xff);
8898 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 8899 tcg_temp_free_i32(addr);
9ee6e8bb 8900 } else {
9ef39277 8901 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 8902 }
8984bd2e 8903 store_reg(s, rd, tmp);
9ee6e8bb
PB
8904 break;
8905 case 7: /* mrs spsr. */
8906 /* Not accessible in user mode. */
8907 if (IS_USER(s) || IS_M(env))
8908 goto illegal_op;
d9ba4830
PB
8909 tmp = load_cpu_field(spsr);
8910 store_reg(s, rd, tmp);
9ee6e8bb 8911 break;
2c0262af
FB
8912 }
8913 }
9ee6e8bb
PB
8914 } else {
8915 /* Conditional branch. */
8916 op = (insn >> 22) & 0xf;
8917 /* Generate a conditional jump to next instruction. */
8918 s->condlabel = gen_new_label();
d9ba4830 8919 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
8920 s->condjmp = 1;
8921
8922 /* offset[11:1] = insn[10:0] */
8923 offset = (insn & 0x7ff) << 1;
8924 /* offset[17:12] = insn[21:16]. */
8925 offset |= (insn & 0x003f0000) >> 4;
8926 /* offset[31:20] = insn[26]. */
8927 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8928 /* offset[18] = insn[13]. */
8929 offset |= (insn & (1 << 13)) << 5;
8930 /* offset[19] = insn[11]. */
8931 offset |= (insn & (1 << 11)) << 8;
8932
8933 /* jump to the offset */
b0109805 8934 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
8935 }
8936 } else {
8937 /* Data processing immediate. */
8938 if (insn & (1 << 25)) {
8939 if (insn & (1 << 24)) {
8940 if (insn & (1 << 20))
8941 goto illegal_op;
8942 /* Bitfield/Saturate. */
8943 op = (insn >> 21) & 7;
8944 imm = insn & 0x1f;
8945 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 8946 if (rn == 15) {
7d1b0095 8947 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
8948 tcg_gen_movi_i32(tmp, 0);
8949 } else {
8950 tmp = load_reg(s, rn);
8951 }
9ee6e8bb
PB
8952 switch (op) {
8953 case 2: /* Signed bitfield extract. */
8954 imm++;
8955 if (shift + imm > 32)
8956 goto illegal_op;
8957 if (imm < 32)
6ddbc6e4 8958 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
8959 break;
8960 case 6: /* Unsigned bitfield extract. */
8961 imm++;
8962 if (shift + imm > 32)
8963 goto illegal_op;
8964 if (imm < 32)
6ddbc6e4 8965 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
8966 break;
8967 case 3: /* Bitfield insert/clear. */
8968 if (imm < shift)
8969 goto illegal_op;
8970 imm = imm + 1 - shift;
8971 if (imm != 32) {
6ddbc6e4 8972 tmp2 = load_reg(s, rd);
d593c48e 8973 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 8974 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8975 }
8976 break;
8977 case 7:
8978 goto illegal_op;
8979 default: /* Saturate. */
9ee6e8bb
PB
8980 if (shift) {
8981 if (op & 1)
6ddbc6e4 8982 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8983 else
6ddbc6e4 8984 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 8985 }
6ddbc6e4 8986 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
8987 if (op & 4) {
8988 /* Unsigned. */
9ee6e8bb 8989 if ((op & 1) && shift == 0)
9ef39277 8990 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8991 else
9ef39277 8992 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 8993 } else {
9ee6e8bb 8994 /* Signed. */
9ee6e8bb 8995 if ((op & 1) && shift == 0)
9ef39277 8996 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 8997 else
9ef39277 8998 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 8999 }
b75263d6 9000 tcg_temp_free_i32(tmp2);
9ee6e8bb 9001 break;
2c0262af 9002 }
6ddbc6e4 9003 store_reg(s, rd, tmp);
9ee6e8bb
PB
9004 } else {
9005 imm = ((insn & 0x04000000) >> 15)
9006 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9007 if (insn & (1 << 22)) {
9008 /* 16-bit immediate. */
9009 imm |= (insn >> 4) & 0xf000;
9010 if (insn & (1 << 23)) {
9011 /* movt */
5e3f878a 9012 tmp = load_reg(s, rd);
86831435 9013 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 9014 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 9015 } else {
9ee6e8bb 9016 /* movw */
7d1b0095 9017 tmp = tcg_temp_new_i32();
5e3f878a 9018 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
9019 }
9020 } else {
9ee6e8bb
PB
9021 /* Add/sub 12-bit immediate. */
9022 if (rn == 15) {
b0109805 9023 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 9024 if (insn & (1 << 23))
b0109805 9025 offset -= imm;
9ee6e8bb 9026 else
b0109805 9027 offset += imm;
7d1b0095 9028 tmp = tcg_temp_new_i32();
5e3f878a 9029 tcg_gen_movi_i32(tmp, offset);
2c0262af 9030 } else {
5e3f878a 9031 tmp = load_reg(s, rn);
9ee6e8bb 9032 if (insn & (1 << 23))
5e3f878a 9033 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 9034 else
5e3f878a 9035 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 9036 }
9ee6e8bb 9037 }
5e3f878a 9038 store_reg(s, rd, tmp);
191abaa2 9039 }
9ee6e8bb
PB
9040 } else {
9041 int shifter_out = 0;
9042 /* modified 12-bit immediate. */
9043 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9044 imm = (insn & 0xff);
9045 switch (shift) {
9046 case 0: /* XY */
9047 /* Nothing to do. */
9048 break;
9049 case 1: /* 00XY00XY */
9050 imm |= imm << 16;
9051 break;
9052 case 2: /* XY00XY00 */
9053 imm |= imm << 16;
9054 imm <<= 8;
9055 break;
9056 case 3: /* XYXYXYXY */
9057 imm |= imm << 16;
9058 imm |= imm << 8;
9059 break;
9060 default: /* Rotated constant. */
9061 shift = (shift << 1) | (imm >> 7);
9062 imm |= 0x80;
9063 imm = imm << (32 - shift);
9064 shifter_out = 1;
9065 break;
b5ff1b31 9066 }
7d1b0095 9067 tmp2 = tcg_temp_new_i32();
3174f8e9 9068 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 9069 rn = (insn >> 16) & 0xf;
3174f8e9 9070 if (rn == 15) {
7d1b0095 9071 tmp = tcg_temp_new_i32();
3174f8e9
FN
9072 tcg_gen_movi_i32(tmp, 0);
9073 } else {
9074 tmp = load_reg(s, rn);
9075 }
9ee6e8bb
PB
9076 op = (insn >> 21) & 0xf;
9077 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 9078 shifter_out, tmp, tmp2))
9ee6e8bb 9079 goto illegal_op;
7d1b0095 9080 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9081 rd = (insn >> 8) & 0xf;
9082 if (rd != 15) {
3174f8e9
FN
9083 store_reg(s, rd, tmp);
9084 } else {
7d1b0095 9085 tcg_temp_free_i32(tmp);
2c0262af 9086 }
2c0262af 9087 }
9ee6e8bb
PB
9088 }
9089 break;
9090 case 12: /* Load/store single data item. */
9091 {
9092 int postinc = 0;
9093 int writeback = 0;
b0109805 9094 int user;
9ee6e8bb
PB
9095 if ((insn & 0x01100000) == 0x01000000) {
9096 if (disas_neon_ls_insn(env, s, insn))
c1713132 9097 goto illegal_op;
9ee6e8bb
PB
9098 break;
9099 }
a2fdc890
PM
9100 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9101 if (rs == 15) {
9102 if (!(insn & (1 << 20))) {
9103 goto illegal_op;
9104 }
9105 if (op != 2) {
9106 /* Byte or halfword load space with dest == r15 : memory hints.
9107 * Catch them early so we don't emit pointless addressing code.
9108 * This space is a mix of:
9109 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9110 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9111 * cores)
9112 * unallocated hints, which must be treated as NOPs
9113 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9114 * which is easiest for the decoding logic
9115 * Some space which must UNDEF
9116 */
9117 int op1 = (insn >> 23) & 3;
9118 int op2 = (insn >> 6) & 0x3f;
9119 if (op & 2) {
9120 goto illegal_op;
9121 }
9122 if (rn == 15) {
02afbf64
PM
9123 /* UNPREDICTABLE, unallocated hint or
9124 * PLD/PLDW/PLI (literal)
9125 */
a2fdc890
PM
9126 return 0;
9127 }
9128 if (op1 & 1) {
02afbf64 9129 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9130 }
9131 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 9132 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
9133 }
9134 /* UNDEF space, or an UNPREDICTABLE */
9135 return 1;
9136 }
9137 }
b0109805 9138 user = IS_USER(s);
9ee6e8bb 9139 if (rn == 15) {
7d1b0095 9140 addr = tcg_temp_new_i32();
9ee6e8bb
PB
9141 /* PC relative. */
9142 /* s->pc has already been incremented by 4. */
9143 imm = s->pc & 0xfffffffc;
9144 if (insn & (1 << 23))
9145 imm += insn & 0xfff;
9146 else
9147 imm -= insn & 0xfff;
b0109805 9148 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 9149 } else {
b0109805 9150 addr = load_reg(s, rn);
9ee6e8bb
PB
9151 if (insn & (1 << 23)) {
9152 /* Positive offset. */
9153 imm = insn & 0xfff;
b0109805 9154 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 9155 } else {
9ee6e8bb 9156 imm = insn & 0xff;
2a0308c5
PM
9157 switch ((insn >> 8) & 0xf) {
9158 case 0x0: /* Shifted Register. */
9ee6e8bb 9159 shift = (insn >> 4) & 0xf;
2a0308c5
PM
9160 if (shift > 3) {
9161 tcg_temp_free_i32(addr);
18c9b560 9162 goto illegal_op;
2a0308c5 9163 }
b26eefb6 9164 tmp = load_reg(s, rm);
9ee6e8bb 9165 if (shift)
b26eefb6 9166 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 9167 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9168 tcg_temp_free_i32(tmp);
9ee6e8bb 9169 break;
2a0308c5 9170 case 0xc: /* Negative offset. */
b0109805 9171 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 9172 break;
2a0308c5 9173 case 0xe: /* User privilege. */
b0109805
PB
9174 tcg_gen_addi_i32(addr, addr, imm);
9175 user = 1;
9ee6e8bb 9176 break;
2a0308c5 9177 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
9178 imm = -imm;
9179 /* Fall through. */
2a0308c5 9180 case 0xb: /* Post-increment. */
9ee6e8bb
PB
9181 postinc = 1;
9182 writeback = 1;
9183 break;
2a0308c5 9184 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
9185 imm = -imm;
9186 /* Fall through. */
2a0308c5 9187 case 0xf: /* Pre-increment. */
b0109805 9188 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
9189 writeback = 1;
9190 break;
9191 default:
2a0308c5 9192 tcg_temp_free_i32(addr);
b7bcbe95 9193 goto illegal_op;
9ee6e8bb
PB
9194 }
9195 }
9196 }
9ee6e8bb
PB
9197 if (insn & (1 << 20)) {
9198 /* Load. */
5a839c0d 9199 tmp = tcg_temp_new_i32();
a2fdc890 9200 switch (op) {
5a839c0d 9201 case 0:
08307563 9202 gen_aa32_ld8u(tmp, addr, user);
5a839c0d
PM
9203 break;
9204 case 4:
08307563 9205 gen_aa32_ld8s(tmp, addr, user);
5a839c0d
PM
9206 break;
9207 case 1:
08307563 9208 gen_aa32_ld16u(tmp, addr, user);
5a839c0d
PM
9209 break;
9210 case 5:
08307563 9211 gen_aa32_ld16s(tmp, addr, user);
5a839c0d
PM
9212 break;
9213 case 2:
08307563 9214 gen_aa32_ld32u(tmp, addr, user);
5a839c0d 9215 break;
2a0308c5 9216 default:
5a839c0d 9217 tcg_temp_free_i32(tmp);
2a0308c5
PM
9218 tcg_temp_free_i32(addr);
9219 goto illegal_op;
a2fdc890
PM
9220 }
9221 if (rs == 15) {
9222 gen_bx(s, tmp);
9ee6e8bb 9223 } else {
a2fdc890 9224 store_reg(s, rs, tmp);
9ee6e8bb
PB
9225 }
9226 } else {
9227 /* Store. */
b0109805 9228 tmp = load_reg(s, rs);
9ee6e8bb 9229 switch (op) {
5a839c0d 9230 case 0:
08307563 9231 gen_aa32_st8(tmp, addr, user);
5a839c0d
PM
9232 break;
9233 case 1:
08307563 9234 gen_aa32_st16(tmp, addr, user);
5a839c0d
PM
9235 break;
9236 case 2:
08307563 9237 gen_aa32_st32(tmp, addr, user);
5a839c0d 9238 break;
2a0308c5 9239 default:
5a839c0d 9240 tcg_temp_free_i32(tmp);
2a0308c5
PM
9241 tcg_temp_free_i32(addr);
9242 goto illegal_op;
b7bcbe95 9243 }
5a839c0d 9244 tcg_temp_free_i32(tmp);
2c0262af 9245 }
9ee6e8bb 9246 if (postinc)
b0109805
PB
9247 tcg_gen_addi_i32(addr, addr, imm);
9248 if (writeback) {
9249 store_reg(s, rn, addr);
9250 } else {
7d1b0095 9251 tcg_temp_free_i32(addr);
b0109805 9252 }
9ee6e8bb
PB
9253 }
9254 break;
9255 default:
9256 goto illegal_op;
2c0262af 9257 }
9ee6e8bb
PB
9258 return 0;
9259illegal_op:
9260 return 1;
2c0262af
FB
9261}
9262
0ecb72a5 9263static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
9264{
9265 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9266 int32_t offset;
9267 int i;
39d5492a
PM
9268 TCGv_i32 tmp;
9269 TCGv_i32 tmp2;
9270 TCGv_i32 addr;
99c475ab 9271
9ee6e8bb
PB
9272 if (s->condexec_mask) {
9273 cond = s->condexec_cond;
bedd2912
JB
9274 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9275 s->condlabel = gen_new_label();
9276 gen_test_cc(cond ^ 1, s->condlabel);
9277 s->condjmp = 1;
9278 }
9ee6e8bb
PB
9279 }
9280
d31dd73e 9281 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 9282 s->pc += 2;
b5ff1b31 9283
99c475ab
FB
9284 switch (insn >> 12) {
9285 case 0: case 1:
396e467c 9286
99c475ab
FB
9287 rd = insn & 7;
9288 op = (insn >> 11) & 3;
9289 if (op == 3) {
9290 /* add/subtract */
9291 rn = (insn >> 3) & 7;
396e467c 9292 tmp = load_reg(s, rn);
99c475ab
FB
9293 if (insn & (1 << 10)) {
9294 /* immediate */
7d1b0095 9295 tmp2 = tcg_temp_new_i32();
396e467c 9296 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
9297 } else {
9298 /* reg */
9299 rm = (insn >> 6) & 7;
396e467c 9300 tmp2 = load_reg(s, rm);
99c475ab 9301 }
9ee6e8bb
PB
9302 if (insn & (1 << 9)) {
9303 if (s->condexec_mask)
396e467c 9304 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9305 else
72485ec4 9306 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
9307 } else {
9308 if (s->condexec_mask)
396e467c 9309 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 9310 else
72485ec4 9311 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 9312 }
7d1b0095 9313 tcg_temp_free_i32(tmp2);
396e467c 9314 store_reg(s, rd, tmp);
99c475ab
FB
9315 } else {
9316 /* shift immediate */
9317 rm = (insn >> 3) & 7;
9318 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
9319 tmp = load_reg(s, rm);
9320 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9321 if (!s->condexec_mask)
9322 gen_logic_CC(tmp);
9323 store_reg(s, rd, tmp);
99c475ab
FB
9324 }
9325 break;
9326 case 2: case 3:
9327 /* arithmetic large immediate */
9328 op = (insn >> 11) & 3;
9329 rd = (insn >> 8) & 0x7;
396e467c 9330 if (op == 0) { /* mov */
7d1b0095 9331 tmp = tcg_temp_new_i32();
396e467c 9332 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 9333 if (!s->condexec_mask)
396e467c
FN
9334 gen_logic_CC(tmp);
9335 store_reg(s, rd, tmp);
9336 } else {
9337 tmp = load_reg(s, rd);
7d1b0095 9338 tmp2 = tcg_temp_new_i32();
396e467c
FN
9339 tcg_gen_movi_i32(tmp2, insn & 0xff);
9340 switch (op) {
9341 case 1: /* cmp */
72485ec4 9342 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9343 tcg_temp_free_i32(tmp);
9344 tcg_temp_free_i32(tmp2);
396e467c
FN
9345 break;
9346 case 2: /* add */
9347 if (s->condexec_mask)
9348 tcg_gen_add_i32(tmp, tmp, tmp2);
9349 else
72485ec4 9350 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 9351 tcg_temp_free_i32(tmp2);
396e467c
FN
9352 store_reg(s, rd, tmp);
9353 break;
9354 case 3: /* sub */
9355 if (s->condexec_mask)
9356 tcg_gen_sub_i32(tmp, tmp, tmp2);
9357 else
72485ec4 9358 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 9359 tcg_temp_free_i32(tmp2);
396e467c
FN
9360 store_reg(s, rd, tmp);
9361 break;
9362 }
99c475ab 9363 }
99c475ab
FB
9364 break;
9365 case 4:
9366 if (insn & (1 << 11)) {
9367 rd = (insn >> 8) & 7;
5899f386
FB
9368 /* load pc-relative. Bit 1 of PC is ignored. */
9369 val = s->pc + 2 + ((insn & 0xff) * 4);
9370 val &= ~(uint32_t)2;
7d1b0095 9371 addr = tcg_temp_new_i32();
b0109805 9372 tcg_gen_movi_i32(addr, val);
c40c8556 9373 tmp = tcg_temp_new_i32();
08307563 9374 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7d1b0095 9375 tcg_temp_free_i32(addr);
b0109805 9376 store_reg(s, rd, tmp);
99c475ab
FB
9377 break;
9378 }
9379 if (insn & (1 << 10)) {
9380 /* data processing extended or blx */
9381 rd = (insn & 7) | ((insn >> 4) & 8);
9382 rm = (insn >> 3) & 0xf;
9383 op = (insn >> 8) & 3;
9384 switch (op) {
9385 case 0: /* add */
396e467c
FN
9386 tmp = load_reg(s, rd);
9387 tmp2 = load_reg(s, rm);
9388 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9389 tcg_temp_free_i32(tmp2);
396e467c 9390 store_reg(s, rd, tmp);
99c475ab
FB
9391 break;
9392 case 1: /* cmp */
396e467c
FN
9393 tmp = load_reg(s, rd);
9394 tmp2 = load_reg(s, rm);
72485ec4 9395 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
9396 tcg_temp_free_i32(tmp2);
9397 tcg_temp_free_i32(tmp);
99c475ab
FB
9398 break;
9399 case 2: /* mov/cpy */
396e467c
FN
9400 tmp = load_reg(s, rm);
9401 store_reg(s, rd, tmp);
99c475ab
FB
9402 break;
9403 case 3:/* branch [and link] exchange thumb register */
b0109805 9404 tmp = load_reg(s, rm);
99c475ab 9405 if (insn & (1 << 7)) {
be5e7a76 9406 ARCH(5);
99c475ab 9407 val = (uint32_t)s->pc | 1;
7d1b0095 9408 tmp2 = tcg_temp_new_i32();
b0109805
PB
9409 tcg_gen_movi_i32(tmp2, val);
9410 store_reg(s, 14, tmp2);
99c475ab 9411 }
be5e7a76 9412 /* already thumb, no need to check */
d9ba4830 9413 gen_bx(s, tmp);
99c475ab
FB
9414 break;
9415 }
9416 break;
9417 }
9418
9419 /* data processing register */
9420 rd = insn & 7;
9421 rm = (insn >> 3) & 7;
9422 op = (insn >> 6) & 0xf;
9423 if (op == 2 || op == 3 || op == 4 || op == 7) {
9424 /* the shift/rotate ops want the operands backwards */
9425 val = rm;
9426 rm = rd;
9427 rd = val;
9428 val = 1;
9429 } else {
9430 val = 0;
9431 }
9432
396e467c 9433 if (op == 9) { /* neg */
7d1b0095 9434 tmp = tcg_temp_new_i32();
396e467c
FN
9435 tcg_gen_movi_i32(tmp, 0);
9436 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9437 tmp = load_reg(s, rd);
9438 } else {
39d5492a 9439 TCGV_UNUSED_I32(tmp);
396e467c 9440 }
99c475ab 9441
396e467c 9442 tmp2 = load_reg(s, rm);
5899f386 9443 switch (op) {
99c475ab 9444 case 0x0: /* and */
396e467c 9445 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 9446 if (!s->condexec_mask)
396e467c 9447 gen_logic_CC(tmp);
99c475ab
FB
9448 break;
9449 case 0x1: /* eor */
396e467c 9450 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 9451 if (!s->condexec_mask)
396e467c 9452 gen_logic_CC(tmp);
99c475ab
FB
9453 break;
9454 case 0x2: /* lsl */
9ee6e8bb 9455 if (s->condexec_mask) {
365af80e 9456 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 9457 } else {
9ef39277 9458 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9459 gen_logic_CC(tmp2);
9ee6e8bb 9460 }
99c475ab
FB
9461 break;
9462 case 0x3: /* lsr */
9ee6e8bb 9463 if (s->condexec_mask) {
365af80e 9464 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 9465 } else {
9ef39277 9466 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9467 gen_logic_CC(tmp2);
9ee6e8bb 9468 }
99c475ab
FB
9469 break;
9470 case 0x4: /* asr */
9ee6e8bb 9471 if (s->condexec_mask) {
365af80e 9472 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 9473 } else {
9ef39277 9474 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9475 gen_logic_CC(tmp2);
9ee6e8bb 9476 }
99c475ab
FB
9477 break;
9478 case 0x5: /* adc */
49b4c31e 9479 if (s->condexec_mask) {
396e467c 9480 gen_adc(tmp, tmp2);
49b4c31e
RH
9481 } else {
9482 gen_adc_CC(tmp, tmp, tmp2);
9483 }
99c475ab
FB
9484 break;
9485 case 0x6: /* sbc */
2de68a49 9486 if (s->condexec_mask) {
396e467c 9487 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
9488 } else {
9489 gen_sbc_CC(tmp, tmp, tmp2);
9490 }
99c475ab
FB
9491 break;
9492 case 0x7: /* ror */
9ee6e8bb 9493 if (s->condexec_mask) {
f669df27
AJ
9494 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9495 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 9496 } else {
9ef39277 9497 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 9498 gen_logic_CC(tmp2);
9ee6e8bb 9499 }
99c475ab
FB
9500 break;
9501 case 0x8: /* tst */
396e467c
FN
9502 tcg_gen_and_i32(tmp, tmp, tmp2);
9503 gen_logic_CC(tmp);
99c475ab 9504 rd = 16;
5899f386 9505 break;
99c475ab 9506 case 0x9: /* neg */
9ee6e8bb 9507 if (s->condexec_mask)
396e467c 9508 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 9509 else
72485ec4 9510 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9511 break;
9512 case 0xa: /* cmp */
72485ec4 9513 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
9514 rd = 16;
9515 break;
9516 case 0xb: /* cmn */
72485ec4 9517 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
9518 rd = 16;
9519 break;
9520 case 0xc: /* orr */
396e467c 9521 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 9522 if (!s->condexec_mask)
396e467c 9523 gen_logic_CC(tmp);
99c475ab
FB
9524 break;
9525 case 0xd: /* mul */
7b2919a0 9526 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 9527 if (!s->condexec_mask)
396e467c 9528 gen_logic_CC(tmp);
99c475ab
FB
9529 break;
9530 case 0xe: /* bic */
f669df27 9531 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 9532 if (!s->condexec_mask)
396e467c 9533 gen_logic_CC(tmp);
99c475ab
FB
9534 break;
9535 case 0xf: /* mvn */
396e467c 9536 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 9537 if (!s->condexec_mask)
396e467c 9538 gen_logic_CC(tmp2);
99c475ab 9539 val = 1;
5899f386 9540 rm = rd;
99c475ab
FB
9541 break;
9542 }
9543 if (rd != 16) {
396e467c
FN
9544 if (val) {
9545 store_reg(s, rm, tmp2);
9546 if (op != 0xf)
7d1b0095 9547 tcg_temp_free_i32(tmp);
396e467c
FN
9548 } else {
9549 store_reg(s, rd, tmp);
7d1b0095 9550 tcg_temp_free_i32(tmp2);
396e467c
FN
9551 }
9552 } else {
7d1b0095
PM
9553 tcg_temp_free_i32(tmp);
9554 tcg_temp_free_i32(tmp2);
99c475ab
FB
9555 }
9556 break;
9557
9558 case 5:
9559 /* load/store register offset. */
9560 rd = insn & 7;
9561 rn = (insn >> 3) & 7;
9562 rm = (insn >> 6) & 7;
9563 op = (insn >> 9) & 7;
b0109805 9564 addr = load_reg(s, rn);
b26eefb6 9565 tmp = load_reg(s, rm);
b0109805 9566 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9567 tcg_temp_free_i32(tmp);
99c475ab 9568
c40c8556 9569 if (op < 3) { /* store */
b0109805 9570 tmp = load_reg(s, rd);
c40c8556
PM
9571 } else {
9572 tmp = tcg_temp_new_i32();
9573 }
99c475ab
FB
9574
9575 switch (op) {
9576 case 0: /* str */
08307563 9577 gen_aa32_st32(tmp, addr, IS_USER(s));
99c475ab
FB
9578 break;
9579 case 1: /* strh */
08307563 9580 gen_aa32_st16(tmp, addr, IS_USER(s));
99c475ab
FB
9581 break;
9582 case 2: /* strb */
08307563 9583 gen_aa32_st8(tmp, addr, IS_USER(s));
99c475ab
FB
9584 break;
9585 case 3: /* ldrsb */
08307563 9586 gen_aa32_ld8s(tmp, addr, IS_USER(s));
99c475ab
FB
9587 break;
9588 case 4: /* ldr */
08307563 9589 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9590 break;
9591 case 5: /* ldrh */
08307563 9592 gen_aa32_ld16u(tmp, addr, IS_USER(s));
99c475ab
FB
9593 break;
9594 case 6: /* ldrb */
08307563 9595 gen_aa32_ld8u(tmp, addr, IS_USER(s));
99c475ab
FB
9596 break;
9597 case 7: /* ldrsh */
08307563 9598 gen_aa32_ld16s(tmp, addr, IS_USER(s));
99c475ab
FB
9599 break;
9600 }
c40c8556 9601 if (op >= 3) { /* load */
b0109805 9602 store_reg(s, rd, tmp);
c40c8556
PM
9603 } else {
9604 tcg_temp_free_i32(tmp);
9605 }
7d1b0095 9606 tcg_temp_free_i32(addr);
99c475ab
FB
9607 break;
9608
9609 case 6:
9610 /* load/store word immediate offset */
9611 rd = insn & 7;
9612 rn = (insn >> 3) & 7;
b0109805 9613 addr = load_reg(s, rn);
99c475ab 9614 val = (insn >> 4) & 0x7c;
b0109805 9615 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9616
9617 if (insn & (1 << 11)) {
9618 /* load */
c40c8556 9619 tmp = tcg_temp_new_i32();
08307563 9620 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9621 store_reg(s, rd, tmp);
99c475ab
FB
9622 } else {
9623 /* store */
b0109805 9624 tmp = load_reg(s, rd);
08307563 9625 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9626 tcg_temp_free_i32(tmp);
99c475ab 9627 }
7d1b0095 9628 tcg_temp_free_i32(addr);
99c475ab
FB
9629 break;
9630
9631 case 7:
9632 /* load/store byte immediate offset */
9633 rd = insn & 7;
9634 rn = (insn >> 3) & 7;
b0109805 9635 addr = load_reg(s, rn);
99c475ab 9636 val = (insn >> 6) & 0x1f;
b0109805 9637 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9638
9639 if (insn & (1 << 11)) {
9640 /* load */
c40c8556 9641 tmp = tcg_temp_new_i32();
08307563 9642 gen_aa32_ld8u(tmp, addr, IS_USER(s));
b0109805 9643 store_reg(s, rd, tmp);
99c475ab
FB
9644 } else {
9645 /* store */
b0109805 9646 tmp = load_reg(s, rd);
08307563 9647 gen_aa32_st8(tmp, addr, IS_USER(s));
c40c8556 9648 tcg_temp_free_i32(tmp);
99c475ab 9649 }
7d1b0095 9650 tcg_temp_free_i32(addr);
99c475ab
FB
9651 break;
9652
9653 case 8:
9654 /* load/store halfword immediate offset */
9655 rd = insn & 7;
9656 rn = (insn >> 3) & 7;
b0109805 9657 addr = load_reg(s, rn);
99c475ab 9658 val = (insn >> 5) & 0x3e;
b0109805 9659 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9660
9661 if (insn & (1 << 11)) {
9662 /* load */
c40c8556 9663 tmp = tcg_temp_new_i32();
08307563 9664 gen_aa32_ld16u(tmp, addr, IS_USER(s));
b0109805 9665 store_reg(s, rd, tmp);
99c475ab
FB
9666 } else {
9667 /* store */
b0109805 9668 tmp = load_reg(s, rd);
08307563 9669 gen_aa32_st16(tmp, addr, IS_USER(s));
c40c8556 9670 tcg_temp_free_i32(tmp);
99c475ab 9671 }
7d1b0095 9672 tcg_temp_free_i32(addr);
99c475ab
FB
9673 break;
9674
9675 case 9:
9676 /* load/store from stack */
9677 rd = (insn >> 8) & 7;
b0109805 9678 addr = load_reg(s, 13);
99c475ab 9679 val = (insn & 0xff) * 4;
b0109805 9680 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
9681
9682 if (insn & (1 << 11)) {
9683 /* load */
c40c8556 9684 tmp = tcg_temp_new_i32();
08307563 9685 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9686 store_reg(s, rd, tmp);
99c475ab
FB
9687 } else {
9688 /* store */
b0109805 9689 tmp = load_reg(s, rd);
08307563 9690 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9691 tcg_temp_free_i32(tmp);
99c475ab 9692 }
7d1b0095 9693 tcg_temp_free_i32(addr);
99c475ab
FB
9694 break;
9695
9696 case 10:
9697 /* add to high reg */
9698 rd = (insn >> 8) & 7;
5899f386
FB
9699 if (insn & (1 << 11)) {
9700 /* SP */
5e3f878a 9701 tmp = load_reg(s, 13);
5899f386
FB
9702 } else {
9703 /* PC. bit 1 is ignored. */
7d1b0095 9704 tmp = tcg_temp_new_i32();
5e3f878a 9705 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 9706 }
99c475ab 9707 val = (insn & 0xff) * 4;
5e3f878a
PB
9708 tcg_gen_addi_i32(tmp, tmp, val);
9709 store_reg(s, rd, tmp);
99c475ab
FB
9710 break;
9711
9712 case 11:
9713 /* misc */
9714 op = (insn >> 8) & 0xf;
9715 switch (op) {
9716 case 0:
9717 /* adjust stack pointer */
b26eefb6 9718 tmp = load_reg(s, 13);
99c475ab
FB
9719 val = (insn & 0x7f) * 4;
9720 if (insn & (1 << 7))
6a0d8a1d 9721 val = -(int32_t)val;
b26eefb6
PB
9722 tcg_gen_addi_i32(tmp, tmp, val);
9723 store_reg(s, 13, tmp);
99c475ab
FB
9724 break;
9725
9ee6e8bb
PB
9726 case 2: /* sign/zero extend. */
9727 ARCH(6);
9728 rd = insn & 7;
9729 rm = (insn >> 3) & 7;
b0109805 9730 tmp = load_reg(s, rm);
9ee6e8bb 9731 switch ((insn >> 6) & 3) {
b0109805
PB
9732 case 0: gen_sxth(tmp); break;
9733 case 1: gen_sxtb(tmp); break;
9734 case 2: gen_uxth(tmp); break;
9735 case 3: gen_uxtb(tmp); break;
9ee6e8bb 9736 }
b0109805 9737 store_reg(s, rd, tmp);
9ee6e8bb 9738 break;
99c475ab
FB
9739 case 4: case 5: case 0xc: case 0xd:
9740 /* push/pop */
b0109805 9741 addr = load_reg(s, 13);
5899f386
FB
9742 if (insn & (1 << 8))
9743 offset = 4;
99c475ab 9744 else
5899f386
FB
9745 offset = 0;
9746 for (i = 0; i < 8; i++) {
9747 if (insn & (1 << i))
9748 offset += 4;
9749 }
9750 if ((insn & (1 << 11)) == 0) {
b0109805 9751 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9752 }
99c475ab
FB
9753 for (i = 0; i < 8; i++) {
9754 if (insn & (1 << i)) {
9755 if (insn & (1 << 11)) {
9756 /* pop */
c40c8556 9757 tmp = tcg_temp_new_i32();
08307563 9758 gen_aa32_ld32u(tmp, addr, IS_USER(s));
b0109805 9759 store_reg(s, i, tmp);
99c475ab
FB
9760 } else {
9761 /* push */
b0109805 9762 tmp = load_reg(s, i);
08307563 9763 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9764 tcg_temp_free_i32(tmp);
99c475ab 9765 }
5899f386 9766 /* advance to the next address. */
b0109805 9767 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9768 }
9769 }
39d5492a 9770 TCGV_UNUSED_I32(tmp);
99c475ab
FB
9771 if (insn & (1 << 8)) {
9772 if (insn & (1 << 11)) {
9773 /* pop pc */
c40c8556 9774 tmp = tcg_temp_new_i32();
08307563 9775 gen_aa32_ld32u(tmp, addr, IS_USER(s));
99c475ab
FB
9776 /* don't set the pc until the rest of the instruction
9777 has completed */
9778 } else {
9779 /* push lr */
b0109805 9780 tmp = load_reg(s, 14);
08307563 9781 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9782 tcg_temp_free_i32(tmp);
99c475ab 9783 }
b0109805 9784 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 9785 }
5899f386 9786 if ((insn & (1 << 11)) == 0) {
b0109805 9787 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 9788 }
99c475ab 9789 /* write back the new stack pointer */
b0109805 9790 store_reg(s, 13, addr);
99c475ab 9791 /* set the new PC value */
be5e7a76
DES
9792 if ((insn & 0x0900) == 0x0900) {
9793 store_reg_from_load(env, s, 15, tmp);
9794 }
99c475ab
FB
9795 break;
9796
9ee6e8bb
PB
9797 case 1: case 3: case 9: case 11: /* czb */
9798 rm = insn & 7;
d9ba4830 9799 tmp = load_reg(s, rm);
9ee6e8bb
PB
9800 s->condlabel = gen_new_label();
9801 s->condjmp = 1;
9802 if (insn & (1 << 11))
cb63669a 9803 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 9804 else
cb63669a 9805 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 9806 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9807 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9808 val = (uint32_t)s->pc + 2;
9809 val += offset;
9810 gen_jmp(s, val);
9811 break;
9812
9813 case 15: /* IT, nop-hint. */
9814 if ((insn & 0xf) == 0) {
9815 gen_nop_hint(s, (insn >> 4) & 0xf);
9816 break;
9817 }
9818 /* If Then. */
9819 s->condexec_cond = (insn >> 4) & 0xe;
9820 s->condexec_mask = insn & 0x1f;
9821 /* No actual code generated for this insn, just setup state. */
9822 break;
9823
06c949e6 9824 case 0xe: /* bkpt */
be5e7a76 9825 ARCH(5);
bc4a0de0 9826 gen_exception_insn(s, 2, EXCP_BKPT);
06c949e6
PB
9827 break;
9828
9ee6e8bb
PB
9829 case 0xa: /* rev */
9830 ARCH(6);
9831 rn = (insn >> 3) & 0x7;
9832 rd = insn & 0x7;
b0109805 9833 tmp = load_reg(s, rn);
9ee6e8bb 9834 switch ((insn >> 6) & 3) {
66896cb8 9835 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
9836 case 1: gen_rev16(tmp); break;
9837 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
9838 default: goto illegal_op;
9839 }
b0109805 9840 store_reg(s, rd, tmp);
9ee6e8bb
PB
9841 break;
9842
d9e028c1
PM
9843 case 6:
9844 switch ((insn >> 5) & 7) {
9845 case 2:
9846 /* setend */
9847 ARCH(6);
10962fd5
PM
9848 if (((insn >> 3) & 1) != s->bswap_code) {
9849 /* Dynamic endianness switching not implemented. */
e0c270d9 9850 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
9851 goto illegal_op;
9852 }
9ee6e8bb 9853 break;
d9e028c1
PM
9854 case 3:
9855 /* cps */
9856 ARCH(6);
9857 if (IS_USER(s)) {
9858 break;
8984bd2e 9859 }
d9e028c1
PM
9860 if (IS_M(env)) {
9861 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9862 /* FAULTMASK */
9863 if (insn & 1) {
9864 addr = tcg_const_i32(19);
9865 gen_helper_v7m_msr(cpu_env, addr, tmp);
9866 tcg_temp_free_i32(addr);
9867 }
9868 /* PRIMASK */
9869 if (insn & 2) {
9870 addr = tcg_const_i32(16);
9871 gen_helper_v7m_msr(cpu_env, addr, tmp);
9872 tcg_temp_free_i32(addr);
9873 }
9874 tcg_temp_free_i32(tmp);
9875 gen_lookup_tb(s);
9876 } else {
9877 if (insn & (1 << 4)) {
9878 shift = CPSR_A | CPSR_I | CPSR_F;
9879 } else {
9880 shift = 0;
9881 }
9882 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 9883 }
d9e028c1
PM
9884 break;
9885 default:
9886 goto undef;
9ee6e8bb
PB
9887 }
9888 break;
9889
99c475ab
FB
9890 default:
9891 goto undef;
9892 }
9893 break;
9894
9895 case 12:
a7d3970d 9896 {
99c475ab 9897 /* load/store multiple */
39d5492a
PM
9898 TCGv_i32 loaded_var;
9899 TCGV_UNUSED_I32(loaded_var);
99c475ab 9900 rn = (insn >> 8) & 0x7;
b0109805 9901 addr = load_reg(s, rn);
99c475ab
FB
9902 for (i = 0; i < 8; i++) {
9903 if (insn & (1 << i)) {
99c475ab
FB
9904 if (insn & (1 << 11)) {
9905 /* load */
c40c8556 9906 tmp = tcg_temp_new_i32();
08307563 9907 gen_aa32_ld32u(tmp, addr, IS_USER(s));
a7d3970d
PM
9908 if (i == rn) {
9909 loaded_var = tmp;
9910 } else {
9911 store_reg(s, i, tmp);
9912 }
99c475ab
FB
9913 } else {
9914 /* store */
b0109805 9915 tmp = load_reg(s, i);
08307563 9916 gen_aa32_st32(tmp, addr, IS_USER(s));
c40c8556 9917 tcg_temp_free_i32(tmp);
99c475ab 9918 }
5899f386 9919 /* advance to the next address */
b0109805 9920 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
9921 }
9922 }
b0109805 9923 if ((insn & (1 << rn)) == 0) {
a7d3970d 9924 /* base reg not in list: base register writeback */
b0109805
PB
9925 store_reg(s, rn, addr);
9926 } else {
a7d3970d
PM
9927 /* base reg in list: if load, complete it now */
9928 if (insn & (1 << 11)) {
9929 store_reg(s, rn, loaded_var);
9930 }
7d1b0095 9931 tcg_temp_free_i32(addr);
b0109805 9932 }
99c475ab 9933 break;
a7d3970d 9934 }
99c475ab
FB
9935 case 13:
9936 /* conditional branch or swi */
9937 cond = (insn >> 8) & 0xf;
9938 if (cond == 0xe)
9939 goto undef;
9940
9941 if (cond == 0xf) {
9942 /* swi */
eaed129d 9943 gen_set_pc_im(s, s->pc);
9ee6e8bb 9944 s->is_jmp = DISAS_SWI;
99c475ab
FB
9945 break;
9946 }
9947 /* generate a conditional jump to next instruction */
e50e6a20 9948 s->condlabel = gen_new_label();
d9ba4830 9949 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 9950 s->condjmp = 1;
99c475ab
FB
9951
9952 /* jump to the offset */
5899f386 9953 val = (uint32_t)s->pc + 2;
99c475ab 9954 offset = ((int32_t)insn << 24) >> 24;
5899f386 9955 val += offset << 1;
8aaca4c0 9956 gen_jmp(s, val);
99c475ab
FB
9957 break;
9958
9959 case 14:
358bf29e 9960 if (insn & (1 << 11)) {
9ee6e8bb
PB
9961 if (disas_thumb2_insn(env, s, insn))
9962 goto undef32;
358bf29e
PB
9963 break;
9964 }
9ee6e8bb 9965 /* unconditional branch */
99c475ab
FB
9966 val = (uint32_t)s->pc;
9967 offset = ((int32_t)insn << 21) >> 21;
9968 val += (offset << 1) + 2;
8aaca4c0 9969 gen_jmp(s, val);
99c475ab
FB
9970 break;
9971
9972 case 15:
9ee6e8bb 9973 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 9974 goto undef32;
9ee6e8bb 9975 break;
99c475ab
FB
9976 }
9977 return;
9ee6e8bb 9978undef32:
bc4a0de0 9979 gen_exception_insn(s, 4, EXCP_UDEF);
9ee6e8bb
PB
9980 return;
9981illegal_op:
99c475ab 9982undef:
bc4a0de0 9983 gen_exception_insn(s, 2, EXCP_UDEF);
99c475ab
FB
9984}
9985
2c0262af
FB
9986/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9987 basic block 'tb'. If search_pc is TRUE, also generate PC
9988 information for each intermediate instruction. */
5639c3f2 9989static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 9990 TranslationBlock *tb,
5639c3f2 9991 bool search_pc)
2c0262af 9992{
ed2803da 9993 CPUState *cs = CPU(cpu);
5639c3f2 9994 CPUARMState *env = &cpu->env;
2c0262af 9995 DisasContext dc1, *dc = &dc1;
a1d1bb31 9996 CPUBreakpoint *bp;
2c0262af
FB
9997 uint16_t *gen_opc_end;
9998 int j, lj;
0fa85d43 9999 target_ulong pc_start;
0a2461fa 10000 target_ulong next_page_start;
2e70f6ef
PB
10001 int num_insns;
10002 int max_insns;
3b46e624 10003
2c0262af 10004 /* generate intermediate code */
0fa85d43 10005 pc_start = tb->pc;
3b46e624 10006
2c0262af
FB
10007 dc->tb = tb;
10008
92414b31 10009 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
10010
10011 dc->is_jmp = DISAS_NEXT;
10012 dc->pc = pc_start;
ed2803da 10013 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 10014 dc->condjmp = 0;
7204ab88 10015 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
d8fd2954 10016 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
98eac7ca
PM
10017 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10018 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
b5ff1b31 10019#if !defined(CONFIG_USER_ONLY)
61f74d6a 10020 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
b5ff1b31 10021#endif
5df8bac1 10022 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
69d1fc22
PM
10023 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10024 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
a7812ae4
PB
10025 cpu_F0s = tcg_temp_new_i32();
10026 cpu_F1s = tcg_temp_new_i32();
10027 cpu_F0d = tcg_temp_new_i64();
10028 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
10029 cpu_V0 = cpu_F0d;
10030 cpu_V1 = cpu_F1d;
e677137d 10031 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 10032 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 10033 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 10034 lj = -1;
2e70f6ef
PB
10035 num_insns = 0;
10036 max_insns = tb->cflags & CF_COUNT_MASK;
10037 if (max_insns == 0)
10038 max_insns = CF_COUNT_MASK;
10039
806f352d 10040 gen_tb_start();
e12ce78d 10041
3849902c
PM
10042 tcg_clear_temp_count();
10043
e12ce78d
PM
10044 /* A note on handling of the condexec (IT) bits:
10045 *
10046 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 10047 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 10048 * (1) if the condexec bits are not already zero then we write
0ecb72a5 10049 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
10050 * to do it at the end of the block. (For example if we don't do this
10051 * it's hard to identify whether we can safely skip writing condexec
10052 * at the end of the TB, which we definitely want to do for the case
10053 * where a TB doesn't do anything with the IT state at all.)
10054 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 10055 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
10056 * This is done both for leaving the TB at the end, and for leaving
10057 * it because of an exception we know will happen, which is done in
10058 * gen_exception_insn(). The latter is necessary because we need to
10059 * leave the TB with the PC/IT state just prior to execution of the
10060 * instruction which caused the exception.
10061 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 10062 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
10063 * This is handled in the same way as restoration of the
10064 * PC in these situations: we will be called again with search_pc=1
10065 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
10066 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10067 * this to restore the condexec bits.
e12ce78d
PM
10068 *
10069 * Note that there are no instructions which can read the condexec
10070 * bits, and none which can write non-static values to them, so
0ecb72a5 10071 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
10072 * middle of a TB.
10073 */
10074
9ee6e8bb
PB
10075 /* Reset the conditional execution bits immediately. This avoids
10076 complications trying to do it at the end of the block. */
98eac7ca 10077 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 10078 {
39d5492a 10079 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 10080 tcg_gen_movi_i32(tmp, 0);
d9ba4830 10081 store_cpu_field(tmp, condexec_bits);
8f01245e 10082 }
2c0262af 10083 do {
fbb4a2e3
PB
10084#ifdef CONFIG_USER_ONLY
10085 /* Intercept jump to the magic kernel page. */
10086 if (dc->pc >= 0xffff0000) {
10087 /* We always get here via a jump, so know we are not in a
10088 conditional execution block. */
10089 gen_exception(EXCP_KERNEL_TRAP);
10090 dc->is_jmp = DISAS_UPDATE;
10091 break;
10092 }
10093#else
9ee6e8bb
PB
10094 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10095 /* We always get here via a jump, so know we are not in a
10096 conditional execution block. */
d9ba4830 10097 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
10098 dc->is_jmp = DISAS_UPDATE;
10099 break;
9ee6e8bb
PB
10100 }
10101#endif
10102
72cf2d4f
BS
10103 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10104 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 10105 if (bp->pc == dc->pc) {
bc4a0de0 10106 gen_exception_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
10107 /* Advance PC so that clearing the breakpoint will
10108 invalidate this TB. */
10109 dc->pc += 2;
10110 goto done_generating;
1fddef4b
FB
10111 }
10112 }
10113 }
2c0262af 10114 if (search_pc) {
92414b31 10115 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
2c0262af
FB
10116 if (lj < j) {
10117 lj++;
10118 while (lj < j)
ab1103de 10119 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 10120 }
25983cad 10121 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 10122 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 10123 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 10124 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 10125 }
e50e6a20 10126
2e70f6ef
PB
10127 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10128 gen_io_start();
10129
fdefe51c 10130 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
10131 tcg_gen_debug_insn_start(dc->pc);
10132 }
10133
7204ab88 10134 if (dc->thumb) {
9ee6e8bb
PB
10135 disas_thumb_insn(env, dc);
10136 if (dc->condexec_mask) {
10137 dc->condexec_cond = (dc->condexec_cond & 0xe)
10138 | ((dc->condexec_mask >> 4) & 1);
10139 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10140 if (dc->condexec_mask == 0) {
10141 dc->condexec_cond = 0;
10142 }
10143 }
10144 } else {
10145 disas_arm_insn(env, dc);
10146 }
e50e6a20
FB
10147
10148 if (dc->condjmp && !dc->is_jmp) {
10149 gen_set_label(dc->condlabel);
10150 dc->condjmp = 0;
10151 }
3849902c
PM
10152
10153 if (tcg_check_temp_count()) {
0a2461fa
AG
10154 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10155 dc->pc);
3849902c
PM
10156 }
10157
aaf2d97d 10158 /* Translation stops when a conditional branch is encountered.
e50e6a20 10159 * Otherwise the subsequent code could get translated several times.
b5ff1b31 10160 * Also stop translation when a page boundary is reached. This
bf20dc07 10161 * ensures prefetch aborts occur at the right place. */
2e70f6ef 10162 num_insns ++;
efd7f486 10163 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
ed2803da 10164 !cs->singlestep_enabled &&
1b530a6d 10165 !singlestep &&
2e70f6ef
PB
10166 dc->pc < next_page_start &&
10167 num_insns < max_insns);
10168
10169 if (tb->cflags & CF_LAST_IO) {
10170 if (dc->condjmp) {
10171 /* FIXME: This can theoretically happen with self-modifying
10172 code. */
10173 cpu_abort(env, "IO on conditional branch instruction");
10174 }
10175 gen_io_end();
10176 }
9ee6e8bb 10177
b5ff1b31 10178 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
10179 instruction was a conditional branch or trap, and the PC has
10180 already been written. */
ed2803da 10181 if (unlikely(cs->singlestep_enabled)) {
8aaca4c0 10182 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 10183 if (dc->condjmp) {
9ee6e8bb
PB
10184 gen_set_condexec(dc);
10185 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 10186 gen_exception(EXCP_SWI);
9ee6e8bb 10187 } else {
d9ba4830 10188 gen_exception(EXCP_DEBUG);
9ee6e8bb 10189 }
e50e6a20
FB
10190 gen_set_label(dc->condlabel);
10191 }
10192 if (dc->condjmp || !dc->is_jmp) {
eaed129d 10193 gen_set_pc_im(dc, dc->pc);
e50e6a20 10194 dc->condjmp = 0;
8aaca4c0 10195 }
9ee6e8bb
PB
10196 gen_set_condexec(dc);
10197 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 10198 gen_exception(EXCP_SWI);
9ee6e8bb
PB
10199 } else {
10200 /* FIXME: Single stepping a WFI insn will not halt
10201 the CPU. */
d9ba4830 10202 gen_exception(EXCP_DEBUG);
9ee6e8bb 10203 }
8aaca4c0 10204 } else {
9ee6e8bb
PB
10205 /* While branches must always occur at the end of an IT block,
10206 there are a few other things that can cause us to terminate
65626741 10207 the TB in the middle of an IT block:
9ee6e8bb
PB
10208 - Exception generating instructions (bkpt, swi, undefined).
10209 - Page boundaries.
10210 - Hardware watchpoints.
10211 Hardware breakpoints have already been handled and skip this code.
10212 */
10213 gen_set_condexec(dc);
8aaca4c0 10214 switch(dc->is_jmp) {
8aaca4c0 10215 case DISAS_NEXT:
6e256c93 10216 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
10217 break;
10218 default:
10219 case DISAS_JUMP:
10220 case DISAS_UPDATE:
10221 /* indicate that the hash table must be used to find the next TB */
57fec1fe 10222 tcg_gen_exit_tb(0);
8aaca4c0
FB
10223 break;
10224 case DISAS_TB_JUMP:
10225 /* nothing more to generate */
10226 break;
9ee6e8bb 10227 case DISAS_WFI:
1ce94f81 10228 gen_helper_wfi(cpu_env);
9ee6e8bb
PB
10229 break;
10230 case DISAS_SWI:
d9ba4830 10231 gen_exception(EXCP_SWI);
9ee6e8bb 10232 break;
8aaca4c0 10233 }
e50e6a20
FB
10234 if (dc->condjmp) {
10235 gen_set_label(dc->condlabel);
9ee6e8bb 10236 gen_set_condexec(dc);
6e256c93 10237 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
10238 dc->condjmp = 0;
10239 }
2c0262af 10240 }
2e70f6ef 10241
9ee6e8bb 10242done_generating:
806f352d 10243 gen_tb_end(tb, num_insns);
efd7f486 10244 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
2c0262af
FB
10245
10246#ifdef DEBUG_DISAS
8fec2b8c 10247 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
10248 qemu_log("----------------\n");
10249 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 10250 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 10251 dc->thumb | (dc->bswap_code << 1));
93fcfe39 10252 qemu_log("\n");
2c0262af
FB
10253 }
10254#endif
b5ff1b31 10255 if (search_pc) {
92414b31 10256 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
b5ff1b31
FB
10257 lj++;
10258 while (lj <= j)
ab1103de 10259 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 10260 } else {
2c0262af 10261 tb->size = dc->pc - pc_start;
2e70f6ef 10262 tb->icount = num_insns;
b5ff1b31 10263 }
2c0262af
FB
10264}
10265
0ecb72a5 10266void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 10267{
5639c3f2 10268 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
10269}
10270
0ecb72a5 10271void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 10272{
5639c3f2 10273 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
10274}
10275
b5ff1b31
FB
10276static const char *cpu_mode_names[16] = {
10277 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10278 "???", "???", "???", "und", "???", "???", "???", "sys"
10279};
9ee6e8bb 10280
878096ee
AF
10281void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10282 int flags)
2c0262af 10283{
878096ee
AF
10284 ARMCPU *cpu = ARM_CPU(cs);
10285 CPUARMState *env = &cpu->env;
2c0262af 10286 int i;
b5ff1b31 10287 uint32_t psr;
2c0262af
FB
10288
10289 for(i=0;i<16;i++) {
7fe48483 10290 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 10291 if ((i % 4) == 3)
7fe48483 10292 cpu_fprintf(f, "\n");
2c0262af 10293 else
7fe48483 10294 cpu_fprintf(f, " ");
2c0262af 10295 }
b5ff1b31 10296 psr = cpsr_read(env);
687fa640
TS
10297 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10298 psr,
b5ff1b31
FB
10299 psr & (1 << 31) ? 'N' : '-',
10300 psr & (1 << 30) ? 'Z' : '-',
10301 psr & (1 << 29) ? 'C' : '-',
10302 psr & (1 << 28) ? 'V' : '-',
5fafdf24 10303 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 10304 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 10305
f2617cfc
PM
10306 if (flags & CPU_DUMP_FPU) {
10307 int numvfpregs = 0;
10308 if (arm_feature(env, ARM_FEATURE_VFP)) {
10309 numvfpregs += 16;
10310 }
10311 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10312 numvfpregs += 16;
10313 }
10314 for (i = 0; i < numvfpregs; i++) {
10315 uint64_t v = float64_val(env->vfp.regs[i]);
10316 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10317 i * 2, (uint32_t)v,
10318 i * 2 + 1, (uint32_t)(v >> 32),
10319 i, v);
10320 }
10321 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 10322 }
2c0262af 10323}
a6b025d3 10324
0ecb72a5 10325void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 10326{
25983cad 10327 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
e12ce78d 10328 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
d2856f1a 10329}